diff options
660 files changed, 9341 insertions, 5090 deletions
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl index 7514dbf0a67..c36892c072d 100644 --- a/Documentation/DocBook/device-drivers.tmpl +++ b/Documentation/DocBook/device-drivers.tmpl @@ -227,7 +227,7 @@ X!Isound/sound_firmware.c    <chapter id="uart16x50">       <title>16x50 UART Driver</title>  !Edrivers/tty/serial/serial_core.c -!Edrivers/tty/serial/8250/8250.c +!Edrivers/tty/serial/8250/8250_core.c    </chapter>    <chapter id="fbdev"> diff --git a/Documentation/arm/cluster-pm-race-avoidance.txt b/Documentation/arm/cluster-pm-race-avoidance.txt new file mode 100644 index 00000000000..750b6fc24af --- /dev/null +++ b/Documentation/arm/cluster-pm-race-avoidance.txt @@ -0,0 +1,498 @@ +Cluster-wide Power-up/power-down race avoidance algorithm +========================================================= + +This file documents the algorithm which is used to coordinate CPU and +cluster setup and teardown operations and to manage hardware coherency +controls safely. + +The section "Rationale" explains what the algorithm is for and why it is +needed.  "Basic model" explains general concepts using a simplified view +of the system.  The other sections explain the actual details of the +algorithm in use. + + +Rationale +--------- + +In a system containing multiple CPUs, it is desirable to have the +ability to turn off individual CPUs when the system is idle, reducing +power consumption and thermal dissipation. + +In a system containing multiple clusters of CPUs, it is also desirable +to have the ability to turn off entire clusters. + +Turning entire clusters off and on is a risky business, because it +involves performing potentially destructive operations affecting a group +of independently running CPUs, while the OS continues to run.  This +means that we need some coordination in order to ensure that critical +cluster-level operations are only performed when it is truly safe to do +so. + +Simple locking may not be sufficient to solve this problem, because +mechanisms like Linux spinlocks may rely on coherency mechanisms which +are not immediately enabled when a cluster powers up.  Since enabling or +disabling those mechanisms may itself be a non-atomic operation (such as +writing some hardware registers and invalidating large caches), other +methods of coordination are required in order to guarantee safe +power-down and power-up at the cluster level. + +The mechanism presented in this document describes a coherent memory +based protocol for performing the needed coordination.  It aims to be as +lightweight as possible, while providing the required safety properties. + + +Basic model +----------- + +Each cluster and CPU is assigned a state, as follows: + +	DOWN +	COMING_UP +	UP +	GOING_DOWN + +	    +---------> UP ----------+ +	    |                        v + +	COMING_UP                GOING_DOWN + +	    ^                        | +	    +--------- DOWN <--------+ + + +DOWN:	The CPU or cluster is not coherent, and is either powered off or +	suspended, or is ready to be powered off or suspended. + +COMING_UP: The CPU or cluster has committed to moving to the UP state. +	It may be part way through the process of initialisation and +	enabling coherency. + +UP:	The CPU or cluster is active and coherent at the hardware +	level.  A CPU in this state is not necessarily being used +	actively by the kernel. + +GOING_DOWN: The CPU or cluster has committed to moving to the DOWN +	state.  It may be part way through the process of teardown and +	coherency exit. + + +Each CPU has one of these states assigned to it at any point in time. +The CPU states are described in the "CPU state" section, below. + +Each cluster is also assigned a state, but it is necessary to split the +state value into two parts (the "cluster" state and "inbound" state) and +to introduce additional states in order to avoid races between different +CPUs in the cluster simultaneously modifying the state.  The cluster- +level states are described in the "Cluster state" section. + +To help distinguish the CPU states from cluster states in this +discussion, the state names are given a CPU_ prefix for the CPU states, +and a CLUSTER_ or INBOUND_ prefix for the cluster states. + + +CPU state +--------- + +In this algorithm, each individual core in a multi-core processor is +referred to as a "CPU".  CPUs are assumed to be single-threaded: +therefore, a CPU can only be doing one thing at a single point in time. + +This means that CPUs fit the basic model closely. + +The algorithm defines the following states for each CPU in the system: + +	CPU_DOWN +	CPU_COMING_UP +	CPU_UP +	CPU_GOING_DOWN + +	 cluster setup and +	CPU setup complete          policy decision +	      +-----------> CPU_UP ------------+ +	      |                                v + +	CPU_COMING_UP                   CPU_GOING_DOWN + +	      ^                                | +	      +----------- CPU_DOWN <----------+ +	 policy decision           CPU teardown complete +	or hardware event + + +The definitions of the four states correspond closely to the states of +the basic model. + +Transitions between states occur as follows. + +A trigger event (spontaneous) means that the CPU can transition to the +next state as a result of making local progress only, with no +requirement for any external event to happen. + + +CPU_DOWN: + +	A CPU reaches the CPU_DOWN state when it is ready for +	power-down.  On reaching this state, the CPU will typically +	power itself down or suspend itself, via a WFI instruction or a +	firmware call. + +	Next state:	CPU_COMING_UP +	Conditions:	none + +	Trigger events: + +		a) an explicit hardware power-up operation, resulting +		   from a policy decision on another CPU; + +		b) a hardware event, such as an interrupt. + + +CPU_COMING_UP: + +	A CPU cannot start participating in hardware coherency until the +	cluster is set up and coherent.  If the cluster is not ready, +	then the CPU will wait in the CPU_COMING_UP state until the +	cluster has been set up. + +	Next state:	CPU_UP +	Conditions:	The CPU's parent cluster must be in CLUSTER_UP. +	Trigger events:	Transition of the parent cluster to CLUSTER_UP. + +	Refer to the "Cluster state" section for a description of the +	CLUSTER_UP state. + + +CPU_UP: +	When a CPU reaches the CPU_UP state, it is safe for the CPU to +	start participating in local coherency. + +	This is done by jumping to the kernel's CPU resume code. + +	Note that the definition of this state is slightly different +	from the basic model definition: CPU_UP does not mean that the +	CPU is coherent yet, but it does mean that it is safe to resume +	the kernel.  The kernel handles the rest of the resume +	procedure, so the remaining steps are not visible as part of the +	race avoidance algorithm. + +	The CPU remains in this state until an explicit policy decision +	is made to shut down or suspend the CPU. + +	Next state:	CPU_GOING_DOWN +	Conditions:	none +	Trigger events:	explicit policy decision + + +CPU_GOING_DOWN: + +	While in this state, the CPU exits coherency, including any +	operations required to achieve this (such as cleaning data +	caches). + +	Next state:	CPU_DOWN +	Conditions:	local CPU teardown complete +	Trigger events:	(spontaneous) + + +Cluster state +------------- + +A cluster is a group of connected CPUs with some common resources. +Because a cluster contains multiple CPUs, it can be doing multiple +things at the same time.  This has some implications.  In particular, a +CPU can start up while another CPU is tearing the cluster down. + +In this discussion, the "outbound side" is the view of the cluster state +as seen by a CPU tearing the cluster down.  The "inbound side" is the +view of the cluster state as seen by a CPU setting the CPU up. + +In order to enable safe coordination in such situations, it is important +that a CPU which is setting up the cluster can advertise its state +independently of the CPU which is tearing down the cluster.  For this +reason, the cluster state is split into two parts: + +	"cluster" state: The global state of the cluster; or the state +		on the outbound side: + +		CLUSTER_DOWN +		CLUSTER_UP +		CLUSTER_GOING_DOWN + +	"inbound" state: The state of the cluster on the inbound side. + +		INBOUND_NOT_COMING_UP +		INBOUND_COMING_UP + + +	The different pairings of these states results in six possible +	states for the cluster as a whole: + +	                            CLUSTER_UP +	          +==========> INBOUND_NOT_COMING_UP -------------+ +	          #                                               | +	                                                          | +	     CLUSTER_UP     <----+                                | +	  INBOUND_COMING_UP      |                                v + +	          ^             CLUSTER_GOING_DOWN       CLUSTER_GOING_DOWN +	          #              INBOUND_COMING_UP <=== INBOUND_NOT_COMING_UP + +	    CLUSTER_DOWN         |                                | +	  INBOUND_COMING_UP <----+                                | +	                                                          | +	          ^                                               | +	          +===========     CLUSTER_DOWN      <------------+ +	                       INBOUND_NOT_COMING_UP + +	Transitions -----> can only be made by the outbound CPU, and +	only involve changes to the "cluster" state. + +	Transitions ===##> can only be made by the inbound CPU, and only +	involve changes to the "inbound" state, except where there is no +	further transition possible on the outbound side (i.e., the +	outbound CPU has put the cluster into the CLUSTER_DOWN state). + +	The race avoidance algorithm does not provide a way to determine +	which exact CPUs within the cluster play these roles.  This must +	be decided in advance by some other means.  Refer to the section +	"Last man and first man selection" for more explanation. + + +	CLUSTER_DOWN/INBOUND_NOT_COMING_UP is the only state where the +	cluster can actually be powered down. + +	The parallelism of the inbound and outbound CPUs is observed by +	the existence of two different paths from CLUSTER_GOING_DOWN/ +	INBOUND_NOT_COMING_UP (corresponding to GOING_DOWN in the basic +	model) to CLUSTER_DOWN/INBOUND_COMING_UP (corresponding to +	COMING_UP in the basic model).  The second path avoids cluster +	teardown completely. + +	CLUSTER_UP/INBOUND_COMING_UP is equivalent to UP in the basic +	model.  The final transition to CLUSTER_UP/INBOUND_NOT_COMING_UP +	is trivial and merely resets the state machine ready for the +	next cycle. + +	Details of the allowable transitions follow. + +	The next state in each case is notated + +		<cluster state>/<inbound state> (<transitioner>) + +	where the <transitioner> is the side on which the transition +	can occur; either the inbound or the outbound side. + + +CLUSTER_DOWN/INBOUND_NOT_COMING_UP: + +	Next state:	CLUSTER_DOWN/INBOUND_COMING_UP (inbound) +	Conditions:	none +	Trigger events: + +		a) an explicit hardware power-up operation, resulting +		   from a policy decision on another CPU; + +		b) a hardware event, such as an interrupt. + + +CLUSTER_DOWN/INBOUND_COMING_UP: + +	In this state, an inbound CPU sets up the cluster, including +	enabling of hardware coherency at the cluster level and any +	other operations (such as cache invalidation) which are required +	in order to achieve this. + +	The purpose of this state is to do sufficient cluster-level +	setup to enable other CPUs in the cluster to enter coherency +	safely. + +	Next state:	CLUSTER_UP/INBOUND_COMING_UP (inbound) +	Conditions:	cluster-level setup and hardware coherency complete +	Trigger events:	(spontaneous) + + +CLUSTER_UP/INBOUND_COMING_UP: + +	Cluster-level setup is complete and hardware coherency is +	enabled for the cluster.  Other CPUs in the cluster can safely +	enter coherency. + +	This is a transient state, leading immediately to +	CLUSTER_UP/INBOUND_NOT_COMING_UP.  All other CPUs on the cluster +	should consider treat these two states as equivalent. + +	Next state:	CLUSTER_UP/INBOUND_NOT_COMING_UP (inbound) +	Conditions:	none +	Trigger events:	(spontaneous) + + +CLUSTER_UP/INBOUND_NOT_COMING_UP: + +	Cluster-level setup is complete and hardware coherency is +	enabled for the cluster.  Other CPUs in the cluster can safely +	enter coherency. + +	The cluster will remain in this state until a policy decision is +	made to power the cluster down. + +	Next state:	CLUSTER_GOING_DOWN/INBOUND_NOT_COMING_UP (outbound) +	Conditions:	none +	Trigger events:	policy decision to power down the cluster + + +CLUSTER_GOING_DOWN/INBOUND_NOT_COMING_UP: + +	An outbound CPU is tearing the cluster down.  The selected CPU +	must wait in this state until all CPUs in the cluster are in the +	CPU_DOWN state. + +	When all CPUs are in the CPU_DOWN state, the cluster can be torn +	down, for example by cleaning data caches and exiting +	cluster-level coherency. + +	To avoid wasteful unnecessary teardown operations, the outbound +	should check the inbound cluster state for asynchronous +	transitions to INBOUND_COMING_UP.  Alternatively, individual +	CPUs can be checked for entry into CPU_COMING_UP or CPU_UP. + + +	Next states: + +	CLUSTER_DOWN/INBOUND_NOT_COMING_UP (outbound) +		Conditions:	cluster torn down and ready to power off +		Trigger events:	(spontaneous) + +	CLUSTER_GOING_DOWN/INBOUND_COMING_UP (inbound) +		Conditions:	none +		Trigger events: + +			a) an explicit hardware power-up operation, +			   resulting from a policy decision on another +			   CPU; + +			b) a hardware event, such as an interrupt. + + +CLUSTER_GOING_DOWN/INBOUND_COMING_UP: + +	The cluster is (or was) being torn down, but another CPU has +	come online in the meantime and is trying to set up the cluster +	again. + +	If the outbound CPU observes this state, it has two choices: + +		a) back out of teardown, restoring the cluster to the +		   CLUSTER_UP state; + +		b) finish tearing the cluster down and put the cluster +		   in the CLUSTER_DOWN state; the inbound CPU will +		   set up the cluster again from there. + +	Choice (a) permits the removal of some latency by avoiding +	unnecessary teardown and setup operations in situations where +	the cluster is not really going to be powered down. + + +	Next states: + +	CLUSTER_UP/INBOUND_COMING_UP (outbound) +		Conditions:	cluster-level setup and hardware +				coherency complete +		Trigger events:	(spontaneous) + +	CLUSTER_DOWN/INBOUND_COMING_UP (outbound) +		Conditions:	cluster torn down and ready to power off +		Trigger events:	(spontaneous) + + +Last man and First man selection +-------------------------------- + +The CPU which performs cluster tear-down operations on the outbound side +is commonly referred to as the "last man". + +The CPU which performs cluster setup on the inbound side is commonly +referred to as the "first man". + +The race avoidance algorithm documented above does not provide a +mechanism to choose which CPUs should play these roles. + + +Last man: + +When shutting down the cluster, all the CPUs involved are initially +executing Linux and hence coherent.  Therefore, ordinary spinlocks can +be used to select a last man safely, before the CPUs become +non-coherent. + + +First man: + +Because CPUs may power up asynchronously in response to external wake-up +events, a dynamic mechanism is needed to make sure that only one CPU +attempts to play the first man role and do the cluster-level +initialisation: any other CPUs must wait for this to complete before +proceeding. + +Cluster-level initialisation may involve actions such as configuring +coherency controls in the bus fabric. + +The current implementation in mcpm_head.S uses a separate mutual exclusion +mechanism to do this arbitration.  This mechanism is documented in +detail in vlocks.txt. + + +Features and Limitations +------------------------ + +Implementation: + +	The current ARM-based implementation is split between +	arch/arm/common/mcpm_head.S (low-level inbound CPU operations) and +	arch/arm/common/mcpm_entry.c (everything else): + +	__mcpm_cpu_going_down() signals the transition of a CPU to the +		CPU_GOING_DOWN state. + +	__mcpm_cpu_down() signals the transition of a CPU to the CPU_DOWN +		state. + +	A CPU transitions to CPU_COMING_UP and then to CPU_UP via the +		low-level power-up code in mcpm_head.S.  This could +		involve CPU-specific setup code, but in the current +		implementation it does not. + +	__mcpm_outbound_enter_critical() and __mcpm_outbound_leave_critical() +		handle transitions from CLUSTER_UP to CLUSTER_GOING_DOWN +		and from there to CLUSTER_DOWN or back to CLUSTER_UP (in +		the case of an aborted cluster power-down). + +		These functions are more complex than the __mcpm_cpu_*() +		functions due to the extra inter-CPU coordination which +		is needed for safe transitions at the cluster level. + +	A cluster transitions from CLUSTER_DOWN back to CLUSTER_UP via +		the low-level power-up code in mcpm_head.S.  This +		typically involves platform-specific setup code, +		provided by the platform-specific power_up_setup +		function registered via mcpm_sync_init. + +Deep topologies: + +	As currently described and implemented, the algorithm does not +	support CPU topologies involving more than two levels (i.e., +	clusters of clusters are not supported).  The algorithm could be +	extended by replicating the cluster-level states for the +	additional topological levels, and modifying the transition +	rules for the intermediate (non-outermost) cluster levels. + + +Colophon +-------- + +Originally created and documented by Dave Martin for Linaro Limited, in +collaboration with Nicolas Pitre and Achin Gupta. + +Copyright (C) 2012-2013  Linaro Limited +Distributed under the terms of Version 2 of the GNU General Public +License, as defined in linux/COPYING. diff --git a/Documentation/arm/vlocks.txt b/Documentation/arm/vlocks.txt new file mode 100644 index 00000000000..415960a9bab --- /dev/null +++ b/Documentation/arm/vlocks.txt @@ -0,0 +1,211 @@ +vlocks for Bare-Metal Mutual Exclusion +====================================== + +Voting Locks, or "vlocks" provide a simple low-level mutual exclusion +mechanism, with reasonable but minimal requirements on the memory +system. + +These are intended to be used to coordinate critical activity among CPUs +which are otherwise non-coherent, in situations where the hardware +provides no other mechanism to support this and ordinary spinlocks +cannot be used. + + +vlocks make use of the atomicity provided by the memory system for +writes to a single memory location.  To arbitrate, every CPU "votes for +itself", by storing a unique number to a common memory location.  The +final value seen in that memory location when all the votes have been +cast identifies the winner. + +In order to make sure that the election produces an unambiguous result +in finite time, a CPU will only enter the election in the first place if +no winner has been chosen and the election does not appear to have +started yet. + + +Algorithm +--------- + +The easiest way to explain the vlocks algorithm is with some pseudo-code: + + +	int currently_voting[NR_CPUS] = { 0, }; +	int last_vote = -1; /* no votes yet */ + +	bool vlock_trylock(int this_cpu) +	{ +		/* signal our desire to vote */ +		currently_voting[this_cpu] = 1; +		if (last_vote != -1) { +			/* someone already volunteered himself */ +			currently_voting[this_cpu] = 0; +			return false; /* not ourself */ +		} + +		/* let's suggest ourself */ +		last_vote = this_cpu; +		currently_voting[this_cpu] = 0; + +		/* then wait until everyone else is done voting */ +		for_each_cpu(i) { +			while (currently_voting[i] != 0) +				/* wait */; +		} + +		/* result */ +		if (last_vote == this_cpu) +			return true; /* we won */ +		return false; +	} + +	bool vlock_unlock(void) +	{ +		last_vote = -1; +	} + + +The currently_voting[] array provides a way for the CPUs to determine +whether an election is in progress, and plays a role analogous to the +"entering" array in Lamport's bakery algorithm [1]. + +However, once the election has started, the underlying memory system +atomicity is used to pick the winner.  This avoids the need for a static +priority rule to act as a tie-breaker, or any counters which could +overflow. + +As long as the last_vote variable is globally visible to all CPUs, it +will contain only one value that won't change once every CPU has cleared +its currently_voting flag. + + +Features and limitations +------------------------ + + * vlocks are not intended to be fair.  In the contended case, it is the +   _last_ CPU which attempts to get the lock which will be most likely +   to win. + +   vlocks are therefore best suited to situations where it is necessary +   to pick a unique winner, but it does not matter which CPU actually +   wins. + + * Like other similar mechanisms, vlocks will not scale well to a large +   number of CPUs. + +   vlocks can be cascaded in a voting hierarchy to permit better scaling +   if necessary, as in the following hypothetical example for 4096 CPUs: + +	/* first level: local election */ +	my_town = towns[(this_cpu >> 4) & 0xf]; +	I_won = vlock_trylock(my_town, this_cpu & 0xf); +	if (I_won) { +		/* we won the town election, let's go for the state */ +		my_state = states[(this_cpu >> 8) & 0xf]; +		I_won = vlock_lock(my_state, this_cpu & 0xf)); +		if (I_won) { +			/* and so on */ +			I_won = vlock_lock(the_whole_country, this_cpu & 0xf]; +			if (I_won) { +				/* ... */ +			} +			vlock_unlock(the_whole_country); +		} +		vlock_unlock(my_state); +	} +	vlock_unlock(my_town); + + +ARM implementation +------------------ + +The current ARM implementation [2] contains some optimisations beyond +the basic algorithm: + + * By packing the members of the currently_voting array close together, +   we can read the whole array in one transaction (providing the number +   of CPUs potentially contending the lock is small enough).  This +   reduces the number of round-trips required to external memory. + +   In the ARM implementation, this means that we can use a single load +   and comparison: + +	LDR	Rt, [Rn] +	CMP	Rt, #0 + +   ...in place of code equivalent to: + +	LDRB	Rt, [Rn] +	CMP	Rt, #0 +	LDRBEQ	Rt, [Rn, #1] +	CMPEQ	Rt, #0 +	LDRBEQ	Rt, [Rn, #2] +	CMPEQ	Rt, #0 +	LDRBEQ	Rt, [Rn, #3] +	CMPEQ	Rt, #0 + +   This cuts down on the fast-path latency, as well as potentially +   reducing bus contention in contended cases. + +   The optimisation relies on the fact that the ARM memory system +   guarantees coherency between overlapping memory accesses of +   different sizes, similarly to many other architectures.  Note that +   we do not care which element of currently_voting appears in which +   bits of Rt, so there is no need to worry about endianness in this +   optimisation. + +   If there are too many CPUs to read the currently_voting array in +   one transaction then multiple transations are still required.  The +   implementation uses a simple loop of word-sized loads for this +   case.  The number of transactions is still fewer than would be +   required if bytes were loaded individually. + + +   In principle, we could aggregate further by using LDRD or LDM, but +   to keep the code simple this was not attempted in the initial +   implementation. + + + * vlocks are currently only used to coordinate between CPUs which are +   unable to enable their caches yet.  This means that the +   implementation removes many of the barriers which would be required +   when executing the algorithm in cached memory. + +   packing of the currently_voting array does not work with cached +   memory unless all CPUs contending the lock are cache-coherent, due +   to cache writebacks from one CPU clobbering values written by other +   CPUs.  (Though if all the CPUs are cache-coherent, you should be +   probably be using proper spinlocks instead anyway). + + + * The "no votes yet" value used for the last_vote variable is 0 (not +   -1 as in the pseudocode).  This allows statically-allocated vlocks +   to be implicitly initialised to an unlocked state simply by putting +   them in .bss. + +   An offset is added to each CPU's ID for the purpose of setting this +   variable, so that no CPU uses the value 0 for its ID. + + +Colophon +-------- + +Originally created and documented by Dave Martin for Linaro Limited, for +use in ARM-based big.LITTLE platforms, with review and input gratefully +received from Nicolas Pitre and Achin Gupta.  Thanks to Nicolas for +grabbing most of this text out of the relevant mail thread and writing +up the pseudocode. + +Copyright (C) 2012-2013  Linaro Limited +Distributed under the terms of Version 2 of the GNU General Public +License, as defined in linux/COPYING. + + +References +---------- + +[1] Lamport, L. "A New Solution of Dijkstra's Concurrent Programming +    Problem", Communications of the ACM 17, 8 (August 1974), 453-455. + +    http://en.wikipedia.org/wiki/Lamport%27s_bakery_algorithm + +[2] linux/arch/arm/common/vlock.S, www.kernel.org. diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 4609e81dbc3..8ccbf27aead 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -596,9 +596,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.  			is selected automatically. Check  			Documentation/kdump/kdump.txt for further details. -	crashkernel_low=size[KMG] -			[KNL, x86] parts under 4G. -  	crashkernel=range1:size1[,range2:size2,...][@offset]  			[KNL] Same as above, but depends on the memory  			in the running system. The syntax of range is @@ -606,6 +603,26 @@ bytes respectively. Such letter suffixes can also be entirely omitted.  			a memory unit (amount[KMG]). See also  			Documentation/kdump/kdump.txt for an example. +	crashkernel=size[KMG],high +			[KNL, x86_64] range could be above 4G. Allow kernel +			to allocate physical memory region from top, so could +			be above 4G if system have more than 4G ram installed. +			Otherwise memory region will be allocated below 4G, if +			available. +			It will be ignored if crashkernel=X is specified. +	crashkernel=size[KMG],low +			[KNL, x86_64] range under 4G. When crashkernel=X,high +			is passed, kernel could allocate physical memory region +			above 4G, that cause second kernel crash on system +			that require some amount of low memory, e.g. swiotlb +			requires at least 64M+32K low memory.  Kernel would +			try to allocate 72M below 4G automatically. +			This one let user to specify own low range under 4G +			for second kernel instead. +			0: to disable low allocation. +			It will be ignored when crashkernel=X,high is not used +			or memory reserved is below 4G. +  	cs89x0_dma=	[HW,NET]  			Format: <dma> @@ -788,6 +805,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.  	edd=		[EDD]  			Format: {"off" | "on" | "skip[mbr]"} +	efi_no_storage_paranoia [EFI; X86] +			Using this parameter you can use more than 50% of +			your efi variable storage. Use this parameter only if +			you are really sure that your UEFI does sane gc and +			fulfills the spec otherwise your board may brick. +  	eisa_irq_edge=	[PARISC,HW]  			See header of drivers/parisc/eisa.c. diff --git a/Documentation/scsi/LICENSE.qla2xxx b/Documentation/scsi/LICENSE.qla2xxx index 27a91cf43d6..5020b7b5a24 100644 --- a/Documentation/scsi/LICENSE.qla2xxx +++ b/Documentation/scsi/LICENSE.qla2xxx @@ -1,4 +1,4 @@ -Copyright (c) 2003-2012 QLogic Corporation +Copyright (c) 2003-2013 QLogic Corporation  QLogic Linux FC-FCoE Driver  This program includes a device driver for Linux 3.x. diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt index 4499bd94886..95731a08f25 100644 --- a/Documentation/sound/alsa/ALSA-Configuration.txt +++ b/Documentation/sound/alsa/ALSA-Configuration.txt @@ -890,9 +890,8 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.      enable_msi	- Enable Message Signaled Interrupt (MSI) (default = off)      power_save	- Automatic power-saving timeout (in second, 0 =  		disable) -    power_save_controller - Support runtime D3 of HD-audio controller -		(-1 = on for supported chip (default), false = off, -		 true = force to on even for unsupported hardware) +    power_save_controller - Reset HD-audio controller in power-saving mode +		(default = on)      align_buffer_size - Force rounding of buffer/period sizes to multiples      		      of 128 bytes. This is more efficient in terms of memory  		      access but isn't required by the HDA spec and prevents diff --git a/MAINTAINERS b/MAINTAINERS index 74e58a4d035..8bdd7a7ef2f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4941,6 +4941,12 @@ W:	logfs.org  S:	Maintained  F:	fs/logfs/ +LPC32XX MACHINE SUPPORT +M:	Roland Stigge <stigge@antcom.de> +L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S:	Maintained +F:	arch/arm/mach-lpc32xx/ +  LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)  M:	Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com>  M:	Sreekanth Reddy <Sreekanth.Reddy@lsi.com> @@ -5065,9 +5071,8 @@ S:	Maintained  F:	drivers/net/ethernet/marvell/sk*  MARVELL LIBERTAS WIRELESS DRIVER -M:	Dan Williams <dcbw@redhat.com>  L:	libertas-dev@lists.infradead.org -S:	Maintained +S:	Orphan  F:	drivers/net/wireless/libertas/  MARVELL MV643XX ETHERNET DRIVER @@ -5569,6 +5574,7 @@ F:	include/uapi/linux/if_*  F:	include/uapi/linux/netdevice.h  NETXEN (1/10) GbE SUPPORT +M:	Manish Chopra <manish.chopra@qlogic.com>  M:	Sony Chacko <sony.chacko@qlogic.com>  M:	Rajesh Borundia <rajesh.borundia@qlogic.com>  L:	netdev@vger.kernel.org @@ -6625,7 +6631,7 @@ S:	Supported  F:	fs/reiserfs/  REGISTER MAP ABSTRACTION -M:	Mark Brown <broonie@opensource.wolfsonmicro.com> +M:	Mark Brown <broonie@kernel.org>  T:	git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap.git  S:	Supported  F:	drivers/base/regmap/ @@ -6951,7 +6957,6 @@ F:	drivers/scsi/st*  SCTP PROTOCOL  M:	Vlad Yasevich <vyasevich@gmail.com> -M:	Sridhar Samudrala <sri@us.ibm.com>  M:	Neil Horman <nhorman@tuxdriver.com>  L:	linux-sctp@vger.kernel.org  W:	http://lksctp.sourceforge.net @@ -7374,7 +7379,7 @@ F:	sound/  SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC)  M:	Liam Girdwood <lgirdwood@gmail.com> -M:	Mark Brown <broonie@opensource.wolfsonmicro.com> +M:	Mark Brown <broonie@kernel.org>  T:	git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git  L:	alsa-devel@alsa-project.org (moderated for non-subscribers)  W:	http://alsa-project.org/main/index.php/ASoC @@ -7463,7 +7468,7 @@ F:	drivers/clk/spear/  SPI SUBSYSTEM  M:	Grant Likely <grant.likely@secretlab.ca> -M:	Mark Brown <broonie@opensource.wolfsonmicro.com> +M:	Mark Brown <broonie@kernel.org>  L:	spi-devel-general@lists.sourceforge.net  Q:	http://patchwork.kernel.org/project/spi-devel-general/list/  T:	git git://git.secretlab.ca/git/linux-2.6.git @@ -8708,7 +8713,7 @@ F:	drivers/scsi/vmw_pvscsi.h  VOLTAGE AND CURRENT REGULATOR FRAMEWORK  M:	Liam Girdwood <lrg@ti.com> -M:	Mark Brown <broonie@opensource.wolfsonmicro.com> +M:	Mark Brown <broonie@kernel.org>  W:	http://opensource.wolfsonmicro.com/node/15  W:	http://www.slimlogic.co.uk/?p=48  T:	git git://git.kernel.org/pub/scm/linux/kernel/git/lrg/regulator.git @@ -1,7 +1,7 @@  VERSION = 3  PATCHLEVEL = 9  SUBLEVEL = 0 -EXTRAVERSION = -rc5 +EXTRAVERSION = -rc8  NAME = Unicycling Gorilla  # *DOCUMENTATION* @@ -513,7 +513,8 @@ ifeq ($(KBUILD_EXTMOD),)  # Carefully list dependencies so we do not try to build scripts twice  # in parallel  PHONY += scripts -scripts: scripts_basic include/config/auto.conf include/config/tristate.conf +scripts: scripts_basic include/config/auto.conf include/config/tristate.conf \ +	 asm-generic  	$(Q)$(MAKE) $(build)=$(@)  # Objects we will link into vmlinux / subdirs we need to visit diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile index 4759fe751aa..2cc3cc519c5 100644 --- a/arch/alpha/Makefile +++ b/arch/alpha/Makefile @@ -12,7 +12,7 @@ NM := $(NM) -B  LDFLAGS_vmlinux	:= -static -N #-relax  CHECKFLAGS	+= -D__alpha__ -m64 -cflags-y	:= -pipe -mno-fp-regs -ffixed-8 -msmall-data +cflags-y	:= -pipe -mno-fp-regs -ffixed-8  cflags-y	+= $(call cc-option, -fno-jump-tables)  cpuflags-$(CONFIG_ALPHA_EV4)		:= -mcpu=ev4 diff --git a/arch/alpha/include/asm/floppy.h b/arch/alpha/include/asm/floppy.h index 46cefbd50e7..bae97eb19d2 100644 --- a/arch/alpha/include/asm/floppy.h +++ b/arch/alpha/include/asm/floppy.h @@ -26,7 +26,7 @@  #define fd_disable_irq()        disable_irq(FLOPPY_IRQ)  #define fd_cacheflush(addr,size) /* nothing */  #define fd_request_irq()        request_irq(FLOPPY_IRQ, floppy_interrupt,\ -					    IRQF_DISABLED, "floppy", NULL) +					    0, "floppy", NULL)  #define fd_free_irq()           free_irq(FLOPPY_IRQ, NULL)  #ifdef CONFIG_PCI diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 2872accd221..7b2be251c30 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -117,13 +117,6 @@ handle_irq(int irq)  		return;  	} -	/* -	 * From here we must proceed with IPL_MAX. Note that we do not -	 * explicitly enable interrupts afterwards - some MILO PALcode -	 * (namely LX164 one) seems to have severe problems with RTI -	 * at IPL 0. -	 */ -	local_irq_disable();  	irq_enter();  	generic_handle_irq_desc(irq, desc);  	irq_exit(); diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index 772ddfdb71a..f433fc11877 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c @@ -45,6 +45,14 @@ do_entInt(unsigned long type, unsigned long vector,  	  unsigned long la_ptr, struct pt_regs *regs)  {  	struct pt_regs *old_regs; + +	/* +	 * Disable interrupts during IRQ handling. +	 * Note that there is no matching local_irq_enable() due to +	 * severe problems with RTI at IPL0 and some MILO PALcode +	 * (namely LX164). +	 */ +	local_irq_disable();  	switch (type) {  	case 0:  #ifdef CONFIG_SMP @@ -62,7 +70,6 @@ do_entInt(unsigned long type, unsigned long vector,  	  {  		long cpu; -		local_irq_disable();  		smp_percpu_timer_interrupt(regs);  		cpu = smp_processor_id();  		if (cpu != boot_cpuid) { @@ -222,7 +229,6 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr,  struct irqaction timer_irqaction = {  	.handler	= timer_interrupt, -	.flags		= IRQF_DISABLED,  	.name		= "timer",  }; diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c index 4d4c046f708..1383f8601a9 100644 --- a/arch/alpha/kernel/sys_nautilus.c +++ b/arch/alpha/kernel/sys_nautilus.c @@ -188,6 +188,10 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr)  extern void free_reserved_mem(void *, void *);  extern void pcibios_claim_one_bus(struct pci_bus *); +static struct resource irongate_io = { +	.name	= "Irongate PCI IO", +	.flags	= IORESOURCE_IO, +};  static struct resource irongate_mem = {  	.name	= "Irongate PCI MEM",  	.flags	= IORESOURCE_MEM, @@ -209,6 +213,7 @@ nautilus_init_pci(void)  	irongate = pci_get_bus_and_slot(0, 0);  	bus->self = irongate; +	bus->resource[0] = &irongate_io;  	bus->resource[1] = &irongate_mem;  	pci_bus_size_bridges(bus); diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index 5cf4a481b8c..a53cf03f49d 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c @@ -280,15 +280,15 @@ titan_late_init(void)  	 * all reported to the kernel as machine checks, so the handler  	 * is a nop so it can be called to count the individual events.  	 */ -	titan_request_irq(63+16, titan_intr_nop, IRQF_DISABLED, +	titan_request_irq(63+16, titan_intr_nop, 0,  		    "CChip Error", NULL); -	titan_request_irq(62+16, titan_intr_nop, IRQF_DISABLED, +	titan_request_irq(62+16, titan_intr_nop, 0,  		    "PChip 0 H_Error", NULL); -	titan_request_irq(61+16, titan_intr_nop, IRQF_DISABLED, +	titan_request_irq(61+16, titan_intr_nop, 0,  		    "PChip 1 H_Error", NULL); -	titan_request_irq(60+16, titan_intr_nop, IRQF_DISABLED, +	titan_request_irq(60+16, titan_intr_nop, 0,  		    "PChip 0 C_Error", NULL); -	titan_request_irq(59+16, titan_intr_nop, IRQF_DISABLED, +	titan_request_irq(59+16, titan_intr_nop, 0,  		    "PChip 1 C_Error", NULL);  	/*  @@ -348,9 +348,9 @@ privateer_init_pci(void)  	 * Hook a couple of extra err interrupts that the  	 * common titan code won't.  	 */ -	titan_request_irq(53+16, titan_intr_nop, IRQF_DISABLED, +	titan_request_irq(53+16, titan_intr_nop, 0,  		    "NMI", NULL); -	titan_request_irq(50+16, titan_intr_nop, IRQF_DISABLED, +	titan_request_irq(50+16, titan_intr_nop, 0,  		    "Temperature Warning", NULL);  	/* diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h index ccd84806b62..eac07166820 100644 --- a/arch/arc/include/asm/irqflags.h +++ b/arch/arc/include/asm/irqflags.h @@ -39,7 +39,7 @@ static inline long arch_local_irq_save(void)  	"	flag.nz %0		\n"  	: "=r"(temp), "=r"(flags)  	: "n"((STATUS_E1_MASK | STATUS_E2_MASK)) -	: "cc"); +	: "memory", "cc");  	return flags;  } @@ -53,7 +53,8 @@ static inline void arch_local_irq_restore(unsigned long flags)  	__asm__ __volatile__(  	"	flag %0			\n"  	: -	: "r"(flags)); +	: "r"(flags) +	: "memory");  }  /* @@ -73,7 +74,8 @@ static inline void arch_local_irq_disable(void)  	"	and %0, %0, %1		\n"  	"	flag %0			\n"  	: "=&r"(temp) -	: "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))); +	: "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)) +	: "memory");  }  /* @@ -85,7 +87,9 @@ static inline long arch_local_save_flags(void)  	__asm__ __volatile__(  	"	lr  %0, [status32]	\n" -	: "=&r"(temp)); +	: "=&r"(temp) +	: +	: "memory");  	return temp;  } diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 13b739469c5..00bdfdbdd4a 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -58,6 +58,7 @@ config ARM  	select CLONE_BACKWARDS  	select OLD_SIGSUSPEND3  	select OLD_SIGACTION +	select HAVE_CONTEXT_TRACKING  	help  	  The ARM series is a line of low-power-consumption RISC chip designs  	  licensed by ARM Ltd and targeted at embedded applications and @@ -1183,9 +1184,9 @@ config ARM_NR_BANKS  	default 8  config IWMMXT -	bool "Enable iWMMXt support" +	bool "Enable iWMMXt support" if !CPU_PJ4  	depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 -	default y if PXA27x || PXA3xx || ARCH_MMP +	default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4  	help  	  Enable support for iWMMXt context switching at run time if  	  running on a CPU that supports it. @@ -1439,6 +1440,16 @@ config ARM_ERRATA_775420  	 to deadlock. This workaround puts DSB before executing ISB if  	 an abort may occur on cache maintenance. +config ARM_ERRATA_798181 +	bool "ARM errata: TLBI/DSB failure on Cortex-A15" +	depends on CPU_V7 && SMP +	help +	  On Cortex-A15 (r0p0..r3p2) the TLBI*IS/DSB operations are not +	  adequately shooting down all use of the old entries. This +	  option enables the Linux kernel workaround for this erratum +	  which sends an IPI to the CPUs that are running the same ASID +	  as the one being invalidated. +  endmenu  source "arch/arm/common/Kconfig" @@ -1596,6 +1607,14 @@ config HAVE_ARM_TWD  	help  	  This options enables support for the ARM timer and watchdog unit +config MCPM +	bool "Multi-Cluster Power Management" +	depends on CPU_V7 && SMP +	help +	  This option provides the common power management infrastructure +	  for (multi-)cluster based systems, such as big.LITTLE based +	  systems. +  choice  	prompt "Memory split"  	default VMSPLIT_3G @@ -1683,8 +1702,9 @@ config SCHED_HRTICK  	def_bool HIGH_RES_TIMERS  config THUMB2_KERNEL -	bool "Compile the kernel in Thumb-2 mode" +	bool "Compile the kernel in Thumb-2 mode" if !CPU_THUMBONLY  	depends on CPU_V7 && !CPU_V6 && !CPU_V6K +	default y if CPU_THUMBONLY  	select AEABI  	select ARM_ASM_UNIFIED  	select ARM_UNWIND diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 9b31f4311ea..791fbeba40c 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -602,6 +602,17 @@ config DEBUG_LL_INCLUDE  	default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1  	default "mach/debug-macro.S" +config DEBUG_UNCOMPRESS +	bool +	default y if ARCH_MULTIPLATFORM && DEBUG_LL && \ +		     !DEBUG_OMAP2PLUS_UART && \ +		     !DEBUG_TEGRA_UART + +config UNCOMPRESS_INCLUDE +	string +	default "debug/uncompress.h" if ARCH_MULTIPLATFORM +	default "mach/uncompress.h" +  config EARLY_PRINTK  	bool "Early printk"  	depends on DEBUG_LL diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index afed28e37ea..3580d57ea21 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -24,6 +24,9 @@ endif  AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET)  HEAD	= head.o  OBJS	+= misc.o decompress.o +ifeq ($(CONFIG_DEBUG_UNCOMPRESS),y) +OBJS	+= debug.o +endif  FONTC	= $(srctree)/drivers/video/console/font_acorn_8x8.c  # string library code (-Os is enforced to keep it much smaller) diff --git a/arch/arm/boot/compressed/debug.S b/arch/arm/boot/compressed/debug.S new file mode 100644 index 00000000000..6e8382d5b7a --- /dev/null +++ b/arch/arm/boot/compressed/debug.S @@ -0,0 +1,12 @@ +#include <linux/linkage.h> +#include <asm/assembler.h> + +#include CONFIG_DEBUG_LL_INCLUDE + +ENTRY(putc) +	addruart r1, r2, r3 +	waituart r3, r1 +	senduart r0, r1 +	busyuart r3, r1 +	mov	 pc, lr +ENDPROC(putc) diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c index df899834d84..31bd43b8209 100644 --- a/arch/arm/boot/compressed/misc.c +++ b/arch/arm/boot/compressed/misc.c @@ -25,13 +25,7 @@ unsigned int __machine_arch_type;  static void putstr(const char *ptr);  extern void error(char *x); -#ifdef CONFIG_ARCH_MULTIPLATFORM -static inline void putc(int c) {} -static inline void flush(void) {} -static inline void arch_decomp_setup(void) {} -#else -#include <mach/uncompress.h> -#endif +#include CONFIG_UNCOMPRESS_INCLUDE  #ifdef CONFIG_DEBUG_ICEDCC diff --git a/arch/arm/boot/dts/armada-370-mirabox.dts b/arch/arm/boot/dts/armada-370-mirabox.dts index dd0c57dd9f3..3234875824d 100644 --- a/arch/arm/boot/dts/armada-370-mirabox.dts +++ b/arch/arm/boot/dts/armada-370-mirabox.dts @@ -54,7 +54,7 @@  		};  		mvsdio@d00d4000 { -			pinctrl-0 = <&sdio_pins2>; +			pinctrl-0 = <&sdio_pins3>;  			pinctrl-names = "default";  			status = "okay";  			/* diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi index 8188d138020..a195debb67d 100644 --- a/arch/arm/boot/dts/armada-370.dtsi +++ b/arch/arm/boot/dts/armada-370.dtsi @@ -59,6 +59,12 @@  					     "mpp50", "mpp51", "mpp52";  			      marvell,function = "sd0";  			}; + +			sdio_pins3: sdio-pins3 { +			      marvell,pins = "mpp48", "mpp49", "mpp50", +					     "mpp51", "mpp52", "mpp53"; +			      marvell,function = "sd0"; +			};  	        };  		gpio0: gpio@d0018100 { diff --git a/arch/arm/boot/dts/dbx5x0.dtsi b/arch/arm/boot/dts/dbx5x0.dtsi index 9de93096601..aaa63d0a809 100644 --- a/arch/arm/boot/dts/dbx5x0.dtsi +++ b/arch/arm/boot/dts/dbx5x0.dtsi @@ -191,8 +191,8 @@  		prcmu: prcmu@80157000 {  			compatible = "stericsson,db8500-prcmu"; -			reg = <0x80157000 0x1000>; -			reg-names = "prcmu"; +			reg = <0x80157000 0x1000>, <0x801b0000 0x8000>, <0x801b8000 0x1000>; +			reg-names = "prcmu", "prcmu-tcpm", "prcmu-tcdm";  			interrupts = <0 47 0x4>;  			#address-cells = <1>;  			#size-cells = <1>; diff --git a/arch/arm/boot/dts/imx28-m28evk.dts b/arch/arm/boot/dts/imx28-m28evk.dts index 6ce3d17c3a2..fd36e1cca10 100644 --- a/arch/arm/boot/dts/imx28-m28evk.dts +++ b/arch/arm/boot/dts/imx28-m28evk.dts @@ -152,7 +152,6 @@  			i2c0: i2c@80058000 {  				pinctrl-names = "default";  				pinctrl-0 = <&i2c0_pins_a>; -				clock-frequency = <400000>;  				status = "okay";  				sgtl5000: codec@0a { diff --git a/arch/arm/boot/dts/imx28-sps1.dts b/arch/arm/boot/dts/imx28-sps1.dts index e6cde8aa7ff..6c6a5442800 100644 --- a/arch/arm/boot/dts/imx28-sps1.dts +++ b/arch/arm/boot/dts/imx28-sps1.dts @@ -70,7 +70,6 @@  			i2c0: i2c@80058000 {  				pinctrl-names = "default";  				pinctrl-0 = <&i2c0_pins_a>; -				clock-frequency = <400000>;  				status = "okay";  				rtc: rtc@51 { diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index 06ec460b458..281a223591f 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -91,6 +91,7 @@  			compatible = "arm,cortex-a9-twd-timer";  			reg = <0x00a00600 0x20>;  			interrupts = <1 13 0xf01>; +			clocks = <&clks 15>;  		};  		L2: l2-cache@00a02000 { diff --git a/arch/arm/boot/dts/kirkwood-goflexnet.dts b/arch/arm/boot/dts/kirkwood-goflexnet.dts index bd83b8fc7c8..c3573be7b92 100644 --- a/arch/arm/boot/dts/kirkwood-goflexnet.dts +++ b/arch/arm/boot/dts/kirkwood-goflexnet.dts @@ -77,6 +77,7 @@  		};  		nand@3000000 { +			chip-delay = <40>;  			status = "okay";  			partition@0 { diff --git a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts index 93c3afbef9e..3694e94f6e9 100644 --- a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts +++ b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts @@ -96,11 +96,11 @@  				marvell,function = "gpio";  			};  			pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 { -				marvell,pins = "mpp44"; +				marvell,pins = "mpp46";  				marvell,function = "gpio";  			};  			pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 { -				marvell,pins = "mpp45"; +				marvell,pins = "mpp47";  				marvell,function = "gpio";  			}; @@ -157,14 +157,14 @@  			gpios = <&gpio0 16 0>;  			linux,default-trigger = "default-on";  		}; -		health_led1 { +		rebuild_led { +			label = "status:white:rebuild_led"; +			gpios = <&gpio1 4 0>; +		}; +		health_led {  			label = "status:red:health_led";  			gpios = <&gpio1 5 0>;  		}; -		health_led2 { -			label = "status:white:health_led"; -			gpios = <&gpio1 4 0>; -		};  		backup_led {  			label = "status:blue:backup_led";  			gpios = <&gpio0 15 0>; diff --git a/arch/arm/boot/dts/orion5x.dtsi b/arch/arm/boot/dts/orion5x.dtsi index 8aad00f81ed..f7bec3b1ba3 100644 --- a/arch/arm/boot/dts/orion5x.dtsi +++ b/arch/arm/boot/dts/orion5x.dtsi @@ -13,6 +13,9 @@  	compatible = "marvell,orion5x";  	interrupt-parent = <&intc>; +	aliases { +		gpio0 = &gpio0; +	};  	intc: interrupt-controller {  		compatible = "marvell,orion-intc", "marvell,intc";  		interrupt-controller; @@ -32,7 +35,9 @@  			#gpio-cells = <2>;  			gpio-controller;  			reg = <0x10100 0x40>; -			ngpio = <32>; +			ngpios = <32>; +			interrupt-controller; +			#interrupt-cells = <2>;  			interrupts = <6>, <7>, <8>, <9>;  		}; @@ -91,7 +96,7 @@  			reg = <0x90000 0x10000>,  			      <0xf2200000 0x800>;  			reg-names = "regs", "sram"; -			interrupts = <22>; +			interrupts = <28>;  			status = "okay";  		};  	}; diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile index dc8dd0de5c0..53e68b16319 100644 --- a/arch/arm/common/Makefile +++ b/arch/arm/common/Makefile @@ -11,3 +11,6 @@ obj-$(CONFIG_SHARP_PARAM)	+= sharpsl_param.o  obj-$(CONFIG_SHARP_SCOOP)	+= scoop.o  obj-$(CONFIG_PCI_HOST_ITE8152)  += it8152.o  obj-$(CONFIG_ARM_TIMER_SP804)	+= timer-sp.o +obj-$(CONFIG_MCPM)		+= mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o +AFLAGS_mcpm_head.o		:= -march=armv7-a +AFLAGS_vlock.o			:= -march=armv7-a diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c new file mode 100644 index 00000000000..370236dd1a0 --- /dev/null +++ b/arch/arm/common/mcpm_entry.c @@ -0,0 +1,263 @@ +/* + * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM + * + * Created by:  Nicolas Pitre, March 2012 + * Copyright:   (C) 2012-2013  Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/irqflags.h> + +#include <asm/mcpm.h> +#include <asm/cacheflush.h> +#include <asm/idmap.h> +#include <asm/cputype.h> + +extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; + +void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) +{ +	unsigned long val = ptr ? virt_to_phys(ptr) : 0; +	mcpm_entry_vectors[cluster][cpu] = val; +	sync_cache_w(&mcpm_entry_vectors[cluster][cpu]); +} + +static const struct mcpm_platform_ops *platform_ops; + +int __init mcpm_platform_register(const struct mcpm_platform_ops *ops) +{ +	if (platform_ops) +		return -EBUSY; +	platform_ops = ops; +	return 0; +} + +int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster) +{ +	if (!platform_ops) +		return -EUNATCH; /* try not to shadow power_up errors */ +	might_sleep(); +	return platform_ops->power_up(cpu, cluster); +} + +typedef void (*phys_reset_t)(unsigned long); + +void mcpm_cpu_power_down(void) +{ +	phys_reset_t phys_reset; + +	BUG_ON(!platform_ops); +	BUG_ON(!irqs_disabled()); + +	/* +	 * Do this before calling into the power_down method, +	 * as it might not always be safe to do afterwards. +	 */ +	setup_mm_for_reboot(); + +	platform_ops->power_down(); + +	/* +	 * It is possible for a power_up request to happen concurrently +	 * with a power_down request for the same CPU. In this case the +	 * power_down method might not be able to actually enter a +	 * powered down state with the WFI instruction if the power_up +	 * method has removed the required reset condition.  The +	 * power_down method is then allowed to return. We must perform +	 * a re-entry in the kernel as if the power_up method just had +	 * deasserted reset on the CPU. +	 * +	 * To simplify race issues, the platform specific implementation +	 * must accommodate for the possibility of unordered calls to +	 * power_down and power_up with a usage count. Therefore, if a +	 * call to power_up is issued for a CPU that is not down, then +	 * the next call to power_down must not attempt a full shutdown +	 * but only do the minimum (normally disabling L1 cache and CPU +	 * coherency) and return just as if a concurrent power_up request +	 * had happened as described above. +	 */ + +	phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); +	phys_reset(virt_to_phys(mcpm_entry_point)); + +	/* should never get here */ +	BUG(); +} + +void mcpm_cpu_suspend(u64 expected_residency) +{ +	phys_reset_t phys_reset; + +	BUG_ON(!platform_ops); +	BUG_ON(!irqs_disabled()); + +	/* Very similar to mcpm_cpu_power_down() */ +	setup_mm_for_reboot(); +	platform_ops->suspend(expected_residency); +	phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); +	phys_reset(virt_to_phys(mcpm_entry_point)); +	BUG(); +} + +int mcpm_cpu_powered_up(void) +{ +	if (!platform_ops) +		return -EUNATCH; +	if (platform_ops->powered_up) +		platform_ops->powered_up(); +	return 0; +} + +struct sync_struct mcpm_sync; + +/* + * __mcpm_cpu_going_down: Indicates that the cpu is being torn down. + *    This must be called at the point of committing to teardown of a CPU. + *    The CPU cache (SCTRL.C bit) is expected to still be active. + */ +void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) +{ +	mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; +	sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); +} + +/* + * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the + *    cluster can be torn down without disrupting this CPU. + *    To avoid deadlocks, this must be called before a CPU is powered down. + *    The CPU cache (SCTRL.C bit) is expected to be off. + *    However L2 cache might or might not be active. + */ +void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) +{ +	dmb(); +	mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; +	sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); +	dsb_sev(); +} + +/* + * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section. + * @state: the final state of the cluster: + *     CLUSTER_UP: no destructive teardown was done and the cluster has been + *         restored to the previous state (CPU cache still active); or + *     CLUSTER_DOWN: the cluster has been torn-down, ready for power-off + *         (CPU cache disabled, L2 cache either enabled or disabled). + */ +void __mcpm_outbound_leave_critical(unsigned int cluster, int state) +{ +	dmb(); +	mcpm_sync.clusters[cluster].cluster = state; +	sync_cache_w(&mcpm_sync.clusters[cluster].cluster); +	dsb_sev(); +} + +/* + * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section. + * This function should be called by the last man, after local CPU teardown + * is complete.  CPU cache expected to be active. + * + * Returns: + *     false: the critical section was not entered because an inbound CPU was + *         observed, or the cluster is already being set up; + *     true: the critical section was entered: it is now safe to tear down the + *         cluster. + */ +bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) +{ +	unsigned int i; +	struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; + +	/* Warn inbound CPUs that the cluster is being torn down: */ +	c->cluster = CLUSTER_GOING_DOWN; +	sync_cache_w(&c->cluster); + +	/* Back out if the inbound cluster is already in the critical region: */ +	sync_cache_r(&c->inbound); +	if (c->inbound == INBOUND_COMING_UP) +		goto abort; + +	/* +	 * Wait for all CPUs to get out of the GOING_DOWN state, so that local +	 * teardown is complete on each CPU before tearing down the cluster. +	 * +	 * If any CPU has been woken up again from the DOWN state, then we +	 * shouldn't be taking the cluster down at all: abort in that case. +	 */ +	sync_cache_r(&c->cpus); +	for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) { +		int cpustate; + +		if (i == cpu) +			continue; + +		while (1) { +			cpustate = c->cpus[i].cpu; +			if (cpustate != CPU_GOING_DOWN) +				break; + +			wfe(); +			sync_cache_r(&c->cpus[i].cpu); +		} + +		switch (cpustate) { +		case CPU_DOWN: +			continue; + +		default: +			goto abort; +		} +	} + +	return true; + +abort: +	__mcpm_outbound_leave_critical(cluster, CLUSTER_UP); +	return false; +} + +int __mcpm_cluster_state(unsigned int cluster) +{ +	sync_cache_r(&mcpm_sync.clusters[cluster].cluster); +	return mcpm_sync.clusters[cluster].cluster; +} + +extern unsigned long mcpm_power_up_setup_phys; + +int __init mcpm_sync_init( +	void (*power_up_setup)(unsigned int affinity_level)) +{ +	unsigned int i, j, mpidr, this_cluster; + +	BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync); +	BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1)); + +	/* +	 * Set initial CPU and cluster states. +	 * Only one cluster is assumed to be active at this point. +	 */ +	for (i = 0; i < MAX_NR_CLUSTERS; i++) { +		mcpm_sync.clusters[i].cluster = CLUSTER_DOWN; +		mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP; +		for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++) +			mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN; +	} +	mpidr = read_cpuid_mpidr(); +	this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); +	for_each_online_cpu(i) +		mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP; +	mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP; +	sync_cache_w(&mcpm_sync); + +	if (power_up_setup) { +		mcpm_power_up_setup_phys = virt_to_phys(power_up_setup); +		sync_cache_w(&mcpm_power_up_setup_phys); +	} + +	return 0; +} diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S new file mode 100644 index 00000000000..8178705c4b2 --- /dev/null +++ b/arch/arm/common/mcpm_head.S @@ -0,0 +1,219 @@ +/* + * arch/arm/common/mcpm_head.S -- kernel entry point for multi-cluster PM + * + * Created by:  Nicolas Pitre, March 2012 + * Copyright:   (C) 2012-2013  Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * + * Refer to Documentation/arm/cluster-pm-race-avoidance.txt + * for details of the synchronisation algorithms used here. + */ + +#include <linux/linkage.h> +#include <asm/mcpm.h> + +#include "vlock.h" + +.if MCPM_SYNC_CLUSTER_CPUS +.error "cpus must be the first member of struct mcpm_sync_struct" +.endif + +	.macro	pr_dbg	string +#if defined(CONFIG_DEBUG_LL) && defined(DEBUG) +	b	1901f +1902:	.asciz	"CPU" +1903:	.asciz	" cluster" +1904:	.asciz	": \string" +	.align +1901:	adr	r0, 1902b +	bl	printascii +	mov	r0, r9 +	bl	printhex8 +	adr	r0, 1903b +	bl	printascii +	mov	r0, r10 +	bl	printhex8 +	adr	r0, 1904b +	bl	printascii +#endif +	.endm + +	.arm +	.align + +ENTRY(mcpm_entry_point) + + THUMB(	adr	r12, BSYM(1f)	) + THUMB(	bx	r12		) + THUMB(	.thumb			) +1: +	mrc	p15, 0, r0, c0, c0, 5		@ MPIDR +	ubfx	r9, r0, #0, #8			@ r9 = cpu +	ubfx	r10, r0, #8, #8			@ r10 = cluster +	mov	r3, #MAX_CPUS_PER_CLUSTER +	mla	r4, r3, r10, r9			@ r4 = canonical CPU index +	cmp	r4, #(MAX_CPUS_PER_CLUSTER * MAX_NR_CLUSTERS) +	blo	2f + +	/* We didn't expect this CPU.  Try to cheaply make it quiet. */ +1:	wfi +	wfe +	b	1b + +2:	pr_dbg	"kernel mcpm_entry_point\n" + +	/* +	 * MMU is off so we need to get to various variables in a +	 * position independent way. +	 */ +	adr	r5, 3f +	ldmia	r5, {r6, r7, r8, r11} +	add	r6, r5, r6			@ r6 = mcpm_entry_vectors +	ldr	r7, [r5, r7]			@ r7 = mcpm_power_up_setup_phys +	add	r8, r5, r8			@ r8 = mcpm_sync +	add	r11, r5, r11			@ r11 = first_man_locks + +	mov	r0, #MCPM_SYNC_CLUSTER_SIZE +	mla	r8, r0, r10, r8			@ r8 = sync cluster base + +	@ Signal that this CPU is coming UP: +	mov	r0, #CPU_COMING_UP +	mov	r5, #MCPM_SYNC_CPU_SIZE +	mla	r5, r9, r5, r8			@ r5 = sync cpu address +	strb	r0, [r5] + +	@ At this point, the cluster cannot unexpectedly enter the GOING_DOWN +	@ state, because there is at least one active CPU (this CPU). + +	mov	r0, #VLOCK_SIZE +	mla	r11, r0, r10, r11		@ r11 = cluster first man lock +	mov	r0, r11 +	mov	r1, r9				@ cpu +	bl	vlock_trylock			@ implies DMB + +	cmp	r0, #0				@ failed to get the lock? +	bne	mcpm_setup_wait		@ wait for cluster setup if so + +	ldrb	r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER] +	cmp	r0, #CLUSTER_UP			@ cluster already up? +	bne	mcpm_setup			@ if not, set up the cluster + +	@ Otherwise, release the first man lock and skip setup: +	mov	r0, r11 +	bl	vlock_unlock +	b	mcpm_setup_complete + +mcpm_setup: +	@ Control dependency implies strb not observable before previous ldrb. + +	@ Signal that the cluster is being brought up: +	mov	r0, #INBOUND_COMING_UP +	strb	r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND] +	dmb + +	@ Any CPU trying to take the cluster into CLUSTER_GOING_DOWN from this +	@ point onwards will observe INBOUND_COMING_UP and abort. + +	@ Wait for any previously-pending cluster teardown operations to abort +	@ or complete: +mcpm_teardown_wait: +	ldrb	r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER] +	cmp	r0, #CLUSTER_GOING_DOWN +	bne	first_man_setup +	wfe +	b	mcpm_teardown_wait + +first_man_setup: +	dmb + +	@ If the outbound gave up before teardown started, skip cluster setup: + +	cmp	r0, #CLUSTER_UP +	beq	mcpm_setup_leave + +	@ power_up_setup is now responsible for setting up the cluster: + +	cmp	r7, #0 +	mov	r0, #1		@ second (cluster) affinity level +	blxne	r7		@ Call power_up_setup if defined +	dmb + +	mov	r0, #CLUSTER_UP +	strb	r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER] +	dmb + +mcpm_setup_leave: +	@ Leave the cluster setup critical section: + +	mov	r0, #INBOUND_NOT_COMING_UP +	strb	r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND] +	dsb +	sev + +	mov	r0, r11 +	bl	vlock_unlock	@ implies DMB +	b	mcpm_setup_complete + +	@ In the contended case, non-first men wait here for cluster setup +	@ to complete: +mcpm_setup_wait: +	ldrb	r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER] +	cmp	r0, #CLUSTER_UP +	wfene +	bne	mcpm_setup_wait +	dmb + +mcpm_setup_complete: +	@ If a platform-specific CPU setup hook is needed, it is +	@ called from here. + +	cmp	r7, #0 +	mov	r0, #0		@ first (CPU) affinity level +	blxne	r7		@ Call power_up_setup if defined +	dmb + +	@ Mark the CPU as up: + +	mov	r0, #CPU_UP +	strb	r0, [r5] + +	@ Observability order of CPU_UP and opening of the gate does not matter. + +mcpm_entry_gated: +	ldr	r5, [r6, r4, lsl #2]		@ r5 = CPU entry vector +	cmp	r5, #0 +	wfeeq +	beq	mcpm_entry_gated +	dmb + +	pr_dbg	"released\n" +	bx	r5 + +	.align	2 + +3:	.word	mcpm_entry_vectors - . +	.word	mcpm_power_up_setup_phys - 3b +	.word	mcpm_sync - 3b +	.word	first_man_locks - 3b + +ENDPROC(mcpm_entry_point) + +	.bss + +	.align	CACHE_WRITEBACK_ORDER +	.type	first_man_locks, #object +first_man_locks: +	.space	VLOCK_SIZE * MAX_NR_CLUSTERS +	.align	CACHE_WRITEBACK_ORDER + +	.type	mcpm_entry_vectors, #object +ENTRY(mcpm_entry_vectors) +	.space	4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER + +	.type	mcpm_power_up_setup_phys, #object +ENTRY(mcpm_power_up_setup_phys) +	.space  4		@ set by mcpm_sync_init() diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c new file mode 100644 index 00000000000..52b88d81b7b --- /dev/null +++ b/arch/arm/common/mcpm_platsmp.c @@ -0,0 +1,92 @@ +/* + * linux/arch/arm/mach-vexpress/mcpm_platsmp.c + * + * Created by:  Nicolas Pitre, November 2012 + * Copyright:   (C) 2012-2013  Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Code to handle secondary CPU bringup and hotplug for the cluster power API. + */ + +#include <linux/init.h> +#include <linux/smp.h> +#include <linux/spinlock.h> + +#include <linux/irqchip/arm-gic.h> + +#include <asm/mcpm.h> +#include <asm/smp.h> +#include <asm/smp_plat.h> + +static void __init simple_smp_init_cpus(void) +{ +} + +static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle) +{ +	unsigned int mpidr, pcpu, pcluster, ret; +	extern void secondary_startup(void); + +	mpidr = cpu_logical_map(cpu); +	pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); +	pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); +	pr_debug("%s: logical CPU %d is physical CPU %d cluster %d\n", +		 __func__, cpu, pcpu, pcluster); + +	mcpm_set_entry_vector(pcpu, pcluster, NULL); +	ret = mcpm_cpu_power_up(pcpu, pcluster); +	if (ret) +		return ret; +	mcpm_set_entry_vector(pcpu, pcluster, secondary_startup); +	arch_send_wakeup_ipi_mask(cpumask_of(cpu)); +	dsb_sev(); +	return 0; +} + +static void __cpuinit mcpm_secondary_init(unsigned int cpu) +{ +	mcpm_cpu_powered_up(); +	gic_secondary_init(0); +} + +#ifdef CONFIG_HOTPLUG_CPU + +static int mcpm_cpu_disable(unsigned int cpu) +{ +	/* +	 * We assume all CPUs may be shut down. +	 * This would be the hook to use for eventual Secure +	 * OS migration requests as described in the PSCI spec. +	 */ +	return 0; +} + +static void mcpm_cpu_die(unsigned int cpu) +{ +	unsigned int mpidr, pcpu, pcluster; +	mpidr = read_cpuid_mpidr(); +	pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); +	pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); +	mcpm_set_entry_vector(pcpu, pcluster, NULL); +	mcpm_cpu_power_down(); +} + +#endif + +static struct smp_operations __initdata mcpm_smp_ops = { +	.smp_init_cpus		= simple_smp_init_cpus, +	.smp_boot_secondary	= mcpm_boot_secondary, +	.smp_secondary_init	= mcpm_secondary_init, +#ifdef CONFIG_HOTPLUG_CPU +	.cpu_disable		= mcpm_cpu_disable, +	.cpu_die		= mcpm_cpu_die, +#endif +}; + +void __init mcpm_smp_set_ops(void) +{ +	smp_set_ops(&mcpm_smp_ops); +} diff --git a/arch/arm/common/vlock.S b/arch/arm/common/vlock.S new file mode 100644 index 00000000000..ff198583f68 --- /dev/null +++ b/arch/arm/common/vlock.S @@ -0,0 +1,108 @@ +/* + * vlock.S - simple voting lock implementation for ARM + * + * Created by:	Dave Martin, 2012-08-16 + * Copyright:	(C) 2012-2013  Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * + * This algorithm is described in more detail in + * Documentation/arm/vlocks.txt. + */ + +#include <linux/linkage.h> +#include "vlock.h" + +/* Select different code if voting flags  can fit in a single word. */ +#if VLOCK_VOTING_SIZE > 4 +#define FEW(x...) +#define MANY(x...) x +#else +#define FEW(x...) x +#define MANY(x...) +#endif + +@ voting lock for first-man coordination + +.macro voting_begin rbase:req, rcpu:req, rscratch:req +	mov	\rscratch, #1 +	strb	\rscratch, [\rbase, \rcpu] +	dmb +.endm + +.macro voting_end rbase:req, rcpu:req, rscratch:req +	dmb +	mov	\rscratch, #0 +	strb	\rscratch, [\rbase, \rcpu] +	dsb +	sev +.endm + +/* + * The vlock structure must reside in Strongly-Ordered or Device memory. + * This implementation deliberately eliminates most of the barriers which + * would be required for other memory types, and assumes that independent + * writes to neighbouring locations within a cacheline do not interfere + * with one another. + */ + +@ r0: lock structure base +@ r1: CPU ID (0-based index within cluster) +ENTRY(vlock_trylock) +	add	r1, r1, #VLOCK_VOTING_OFFSET + +	voting_begin	r0, r1, r2 + +	ldrb	r2, [r0, #VLOCK_OWNER_OFFSET]	@ check whether lock is held +	cmp	r2, #VLOCK_OWNER_NONE +	bne	trylock_fail			@ fail if so + +	@ Control dependency implies strb not observable before previous ldrb. + +	strb	r1, [r0, #VLOCK_OWNER_OFFSET]	@ submit my vote + +	voting_end	r0, r1, r2		@ implies DMB + +	@ Wait for the current round of voting to finish: + + MANY(	mov	r3, #VLOCK_VOTING_OFFSET			) +0: + MANY(	ldr	r2, [r0, r3]					) + FEW(	ldr	r2, [r0, #VLOCK_VOTING_OFFSET]			) +	cmp	r2, #0 +	wfene +	bne	0b + MANY(	add	r3, r3, #4					) + MANY(	cmp	r3, #VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE	) + MANY(	bne	0b						) + +	@ Check who won: + +	dmb +	ldrb	r2, [r0, #VLOCK_OWNER_OFFSET] +	eor	r0, r1, r2			@ zero if I won, else nonzero +	bx	lr + +trylock_fail: +	voting_end	r0, r1, r2 +	mov	r0, #1				@ nonzero indicates that I lost +	bx	lr +ENDPROC(vlock_trylock) + +@ r0: lock structure base +ENTRY(vlock_unlock) +	dmb +	mov	r1, #VLOCK_OWNER_NONE +	strb	r1, [r0, #VLOCK_OWNER_OFFSET] +	dsb +	sev +	bx	lr +ENDPROC(vlock_unlock) diff --git a/arch/arm/common/vlock.h b/arch/arm/common/vlock.h new file mode 100644 index 00000000000..3b441475a59 --- /dev/null +++ b/arch/arm/common/vlock.h @@ -0,0 +1,29 @@ +/* + * vlock.h - simple voting lock implementation + * + * Created by:	Dave Martin, 2012-08-16 + * Copyright:	(C) 2012-2013  Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + */ + +#ifndef __VLOCK_H +#define __VLOCK_H + +#include <asm/mcpm.h> + +/* Offsets and sizes are rounded to a word (4 bytes) */ +#define VLOCK_OWNER_OFFSET	0 +#define VLOCK_VOTING_OFFSET	4 +#define VLOCK_VOTING_SIZE	((MAX_CPUS_PER_CLUSTER + 3) / 4 * 4) +#define VLOCK_SIZE		(VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE) +#define VLOCK_OWNER_NONE	0 + +#endif /* ! __VLOCK_H */ diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index c79f61faa3a..da1c77d3932 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -243,6 +243,29 @@ typedef struct {  #define ATOMIC64_INIT(i) { (i) } +#ifdef CONFIG_ARM_LPAE +static inline u64 atomic64_read(const atomic64_t *v) +{ +	u64 result; + +	__asm__ __volatile__("@ atomic64_read\n" +"	ldrd	%0, %H0, [%1]" +	: "=&r" (result) +	: "r" (&v->counter), "Qo" (v->counter) +	); + +	return result; +} + +static inline void atomic64_set(atomic64_t *v, u64 i) +{ +	__asm__ __volatile__("@ atomic64_set\n" +"	strd	%2, %H2, [%1]" +	: "=Qo" (v->counter) +	: "r" (&v->counter), "r" (i) +	); +} +#else  static inline u64 atomic64_read(const atomic64_t *v)  {  	u64 result; @@ -269,6 +292,7 @@ static inline void atomic64_set(atomic64_t *v, u64 i)  	: "r" (&v->counter), "r" (i)  	: "cc");  } +#endif  static inline void atomic64_add(u64 i, atomic64_t *v)  { diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index e1489c54cd1..bff71388e72 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -363,4 +363,79 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)  		flush_cache_all();  } +/* + * Memory synchronization helpers for mixed cached vs non cached accesses. + * + * Some synchronization algorithms have to set states in memory with the + * cache enabled or disabled depending on the code path.  It is crucial + * to always ensure proper cache maintenance to update main memory right + * away in that case. + * + * Any cached write must be followed by a cache clean operation. + * Any cached read must be preceded by a cache invalidate operation. + * Yet, in the read case, a cache flush i.e. atomic clean+invalidate + * operation is needed to avoid discarding possible concurrent writes to the + * accessed memory. + * + * Also, in order to prevent a cached writer from interfering with an + * adjacent non-cached writer, each state variable must be located to + * a separate cache line. + */ + +/* + * This needs to be >= the max cache writeback size of all + * supported platforms included in the current kernel configuration. + * This is used to align state variables to their own cache lines. + */ +#define __CACHE_WRITEBACK_ORDER 6  /* guessed from existing platforms */ +#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER) + +/* + * There is no __cpuc_clean_dcache_area but we use it anyway for + * code intent clarity, and alias it to __cpuc_flush_dcache_area. + */ +#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area + +/* + * Ensure preceding writes to *p by this CPU are visible to + * subsequent reads by other CPUs: + */ +static inline void __sync_cache_range_w(volatile void *p, size_t size) +{ +	char *_p = (char *)p; + +	__cpuc_clean_dcache_area(_p, size); +	outer_clean_range(__pa(_p), __pa(_p + size)); +} + +/* + * Ensure preceding writes to *p by other CPUs are visible to + * subsequent reads by this CPU.  We must be careful not to + * discard data simultaneously written by another CPU, hence the + * usage of flush rather than invalidate operations. + */ +static inline void __sync_cache_range_r(volatile void *p, size_t size) +{ +	char *_p = (char *)p; + +#ifdef CONFIG_OUTER_CACHE +	if (outer_cache.flush_range) { +		/* +		 * Ensure dirty data migrated from other CPUs into our cache +		 * are cleaned out safely before the outer cache is cleaned: +		 */ +		__cpuc_clean_dcache_area(_p, size); + +		/* Clean and invalidate stale data for *p from outer ... */ +		outer_flush_range(__pa(_p), __pa(_p + size)); +	} +#endif + +	/* ... and inner cache: */ +	__cpuc_flush_dcache_area(_p, size); +} + +#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr)) +#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr)) +  #endif diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h index 5ef4d8015a6..1f3262e99d8 100644 --- a/arch/arm/include/asm/cp15.h +++ b/arch/arm/include/asm/cp15.h @@ -42,6 +42,8 @@  #define vectors_high()	(0)  #endif +#ifdef CONFIG_CPU_CP15 +  extern unsigned long cr_no_alignment;	/* defined in entry-armv.S */  extern unsigned long cr_alignment;	/* defined in entry-armv.S */ @@ -82,6 +84,18 @@ static inline void set_copro_access(unsigned int val)  	isb();  } -#endif +#else /* ifdef CONFIG_CPU_CP15 */ + +/* + * cr_alignment and cr_no_alignment are tightly coupled to cp15 (at least in the + * minds of the developers). Yielding 0 for machines without a cp15 (and making + * it read-only) is fine for most cases and saves quite some #ifdeffery. + */ +#define cr_no_alignment	UL(0) +#define cr_alignment	UL(0) + +#endif /* ifdef CONFIG_CPU_CP15 / else */ + +#endif /* ifndef __ASSEMBLY__ */  #endif diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index ad41ec2471e..7652712d1d1 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -38,6 +38,24 @@  #define MPIDR_AFFINITY_LEVEL(mpidr, level) \  	((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK) +#define ARM_CPU_IMP_ARM			0x41 +#define ARM_CPU_IMP_INTEL		0x69 + +#define ARM_CPU_PART_ARM1136		0xB360 +#define ARM_CPU_PART_ARM1156		0xB560 +#define ARM_CPU_PART_ARM1176		0xB760 +#define ARM_CPU_PART_ARM11MPCORE	0xB020 +#define ARM_CPU_PART_CORTEX_A8		0xC080 +#define ARM_CPU_PART_CORTEX_A9		0xC090 +#define ARM_CPU_PART_CORTEX_A5		0xC050 +#define ARM_CPU_PART_CORTEX_A15		0xC0F0 +#define ARM_CPU_PART_CORTEX_A7		0xC070 + +#define ARM_CPU_XSCALE_ARCH_MASK	0xe000 +#define ARM_CPU_XSCALE_ARCH_V1		0x2000 +#define ARM_CPU_XSCALE_ARCH_V2		0x4000 +#define ARM_CPU_XSCALE_ARCH_V3		0x6000 +  extern unsigned int processor_id;  #ifdef CONFIG_CPU_CP15 @@ -50,6 +68,7 @@ extern unsigned int processor_id;  		    : "cc");						\  		__val;							\  	}) +  #define read_cpuid_ext(ext_reg)						\  	({								\  		unsigned int __val;					\ @@ -59,29 +78,24 @@ extern unsigned int processor_id;  		    : "cc");						\  		__val;							\  	}) -#else -#define read_cpuid(reg) (processor_id) -#define read_cpuid_ext(reg) 0 -#endif -#define ARM_CPU_IMP_ARM			0x41 -#define ARM_CPU_IMP_INTEL		0x69 +#else /* ifdef CONFIG_CPU_CP15 */ -#define ARM_CPU_PART_ARM1136		0xB360 -#define ARM_CPU_PART_ARM1156		0xB560 -#define ARM_CPU_PART_ARM1176		0xB760 -#define ARM_CPU_PART_ARM11MPCORE	0xB020 -#define ARM_CPU_PART_CORTEX_A8		0xC080 -#define ARM_CPU_PART_CORTEX_A9		0xC090 -#define ARM_CPU_PART_CORTEX_A5		0xC050 -#define ARM_CPU_PART_CORTEX_A15		0xC0F0 -#define ARM_CPU_PART_CORTEX_A7		0xC070 +/* + * read_cpuid and read_cpuid_ext should only ever be called on machines that + * have cp15 so warn on other usages. + */ +#define read_cpuid(reg)							\ +	({								\ +		WARN_ON_ONCE(1);					\ +		0;							\ +	}) -#define ARM_CPU_XSCALE_ARCH_MASK	0xe000 -#define ARM_CPU_XSCALE_ARCH_V1		0x2000 -#define ARM_CPU_XSCALE_ARCH_V2		0x4000 -#define ARM_CPU_XSCALE_ARCH_V3		0x6000 +#define read_cpuid_ext(reg) read_cpuid(reg) + +#endif /* ifdef CONFIG_CPU_CP15 / else */ +#ifdef CONFIG_CPU_CP15  /*   * The CPU ID never changes at run time, so we might as well tell the   * compiler that it's constant.  Use this function to read the CPU ID @@ -92,6 +106,15 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)  	return read_cpuid(CPUID_ID);  } +#else /* ifdef CONFIG_CPU_CP15 */ + +static inline unsigned int __attribute_const__ read_cpuid_id(void) +{ +	return processor_id; +} + +#endif /* ifdef CONFIG_CPU_CP15 / else */ +  static inline unsigned int __attribute_const__ read_cpuid_implementor(void)  {  	return (read_cpuid_id() & 0xFF000000) >> 24; diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h index 720799fd3a8..dff714d886d 100644 --- a/arch/arm/include/asm/delay.h +++ b/arch/arm/include/asm/delay.h @@ -24,7 +24,7 @@ extern struct arm_delay_ops {  	void (*delay)(unsigned long);  	void (*const_udelay)(unsigned long);  	void (*udelay)(unsigned long); -	bool const_clock; +	unsigned long ticks_per_jiffy;  } arm_delay_ops;  #define __delay(n)		arm_delay_ops.delay(n) diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h index cca9f15704e..ea289e1435e 100644 --- a/arch/arm/include/asm/glue-cache.h +++ b/arch/arm/include/asm/glue-cache.h @@ -19,14 +19,6 @@  #undef _CACHE  #undef MULTI_CACHE -#if defined(CONFIG_CPU_CACHE_V3) -# ifdef _CACHE -#  define MULTI_CACHE 1 -# else -#  define _CACHE v3 -# endif -#endif -  #if defined(CONFIG_CPU_CACHE_V4)  # ifdef _CACHE  #  define MULTI_CACHE 1 diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h index 8cacbcda76d..b6e9f2c108b 100644 --- a/arch/arm/include/asm/glue-df.h +++ b/arch/arm/include/asm/glue-df.h @@ -18,12 +18,12 @@   *	================   *   *	We have the following to choose from: - *	  arm6          - ARM6 style   *	  arm7		- ARM7 style   *	  v4_early	- ARMv4 without Thumb early abort handler   *	  v4t_late	- ARMv4 with Thumb late abort handler   *	  v4t_early	- ARMv4 with Thumb early abort handler - *	  v5tej_early	- ARMv5 with Thumb and Java early abort handler + *	  v5t_early	- ARMv5 with Thumb early abort handler + *	  v5tj_early	- ARMv5 with Thumb and Java early abort handler   *	  xscale	- ARMv5 with Thumb with Xscale extensions   *	  v6_early	- ARMv6 generic early abort handler   *	  v7_early	- ARMv7 generic early abort handler @@ -39,19 +39,19 @@  # endif  #endif -#ifdef CONFIG_CPU_ABRT_LV4T +#ifdef CONFIG_CPU_ABRT_EV4  # ifdef CPU_DABORT_HANDLER  #  define MULTI_DABORT 1  # else -#  define CPU_DABORT_HANDLER v4t_late_abort +#  define CPU_DABORT_HANDLER v4_early_abort  # endif  #endif -#ifdef CONFIG_CPU_ABRT_EV4 +#ifdef CONFIG_CPU_ABRT_LV4T  # ifdef CPU_DABORT_HANDLER  #  define MULTI_DABORT 1  # else -#  define CPU_DABORT_HANDLER v4_early_abort +#  define CPU_DABORT_HANDLER v4t_late_abort  # endif  #endif @@ -63,19 +63,19 @@  # endif  #endif -#ifdef CONFIG_CPU_ABRT_EV5TJ +#ifdef CONFIG_CPU_ABRT_EV5T  # ifdef CPU_DABORT_HANDLER  #  define MULTI_DABORT 1  # else -#  define CPU_DABORT_HANDLER v5tj_early_abort +#  define CPU_DABORT_HANDLER v5t_early_abort  # endif  #endif -#ifdef CONFIG_CPU_ABRT_EV5T +#ifdef CONFIG_CPU_ABRT_EV5TJ  # ifdef CPU_DABORT_HANDLER  #  define MULTI_DABORT 1  # else -#  define CPU_DABORT_HANDLER v5t_early_abort +#  define CPU_DABORT_HANDLER v5tj_early_abort  # endif  #endif diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index 02fe2fbe247..ed94b1a366a 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h @@ -37,7 +37,7 @@ extern int iop3xx_get_init_atu(void);   * IOP3XX processor registers   */  #define IOP3XX_PERIPHERAL_PHYS_BASE	0xffffe000 -#define IOP3XX_PERIPHERAL_VIRT_BASE	0xfeffe000 +#define IOP3XX_PERIPHERAL_VIRT_BASE	0xfedfe000  #define IOP3XX_PERIPHERAL_SIZE		0x00002000  #define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\  					IOP3XX_PERIPHERAL_SIZE - 1) diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 8c5e828f484..91b99abe7a9 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -41,6 +41,13 @@ extern void kunmap_high(struct page *page);  #endif  #endif +/* + * Needed to be able to broadcast the TLB invalidation for kmap. + */ +#ifdef CONFIG_ARM_ERRATA_798181 +#undef ARCH_NEEDS_KMAP_HIGH_GET +#endif +  #ifdef ARCH_NEEDS_KMAP_HIGH_GET  extern void *kmap_high_get(struct page *page);  #else diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index 7c3d813e15d..124623e5ef1 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -211,4 +211,8 @@  #define HSR_HVC_IMM_MASK	((1UL << 16) - 1) +#define HSR_DABT_S1PTW		(1U << 7) +#define HSR_DABT_CM		(1U << 8) +#define HSR_DABT_EA		(1U << 9) +  #endif /* __ARM_KVM_ARM_H__ */ diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index e4956f4e23e..18d50322a9e 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -75,7 +75,7 @@ extern char __kvm_hyp_code_end[];  extern void __kvm_tlb_flush_vmid(struct kvm *kvm);  extern void __kvm_flush_vm_context(void); -extern void __kvm_tlb_flush_vmid(struct kvm *kvm); +extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);  extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);  #endif diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index fd611996bfb..82b4babead2 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -22,11 +22,12 @@  #include <linux/kvm_host.h>  #include <asm/kvm_asm.h>  #include <asm/kvm_mmio.h> +#include <asm/kvm_arm.h> -u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); -u32 *vcpu_spsr(struct kvm_vcpu *vcpu); +unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); +unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); -int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run); +bool kvm_condition_valid(struct kvm_vcpu *vcpu);  void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);  void kvm_inject_undefined(struct kvm_vcpu *vcpu);  void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); @@ -37,14 +38,14 @@ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)  	return 1;  } -static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu) +static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)  { -	return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc; +	return &vcpu->arch.regs.usr_regs.ARM_pc;  } -static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu) +static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu)  { -	return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr; +	return &vcpu->arch.regs.usr_regs.ARM_cpsr;  }  static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) @@ -69,4 +70,96 @@ static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg)  	return reg == 15;  } +static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu) +{ +	return vcpu->arch.fault.hsr; +} + +static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu) +{ +	return vcpu->arch.fault.hxfar; +} + +static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu) +{ +	return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8; +} + +static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu) +{ +	return vcpu->arch.fault.hyp_pc; +} + +static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu) +{ +	return kvm_vcpu_get_hsr(vcpu) & HSR_ISV; +} + +static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu) +{ +	return kvm_vcpu_get_hsr(vcpu) & HSR_WNR; +} + +static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu) +{ +	return kvm_vcpu_get_hsr(vcpu) & HSR_SSE; +} + +static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu) +{ +	return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; +} + +static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu) +{ +	return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA; +} + +static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) +{ +	return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; +} + +/* Get Access Size from a data abort */ +static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu) +{ +	switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) { +	case 0: +		return 1; +	case 1: +		return 2; +	case 2: +		return 4; +	default: +		kvm_err("Hardware is weird: SAS 0b11 is reserved\n"); +		return -EFAULT; +	} +} + +/* This one is not specific to Data Abort */ +static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu) +{ +	return kvm_vcpu_get_hsr(vcpu) & HSR_IL; +} + +static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu) +{ +	return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT; +} + +static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu) +{ +	return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT; +} + +static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) +{ +	return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE; +} + +static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) +{ +	return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK; +} +  #endif /* __ARM_KVM_EMULATE_H__ */ diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index d1736a53b12..0c4e643d939 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -80,6 +80,15 @@ struct kvm_mmu_memory_cache {  	void *objects[KVM_NR_MEM_OBJS];  }; +struct kvm_vcpu_fault_info { +	u32 hsr;		/* Hyp Syndrome Register */ +	u32 hxfar;		/* Hyp Data/Inst. Fault Address Register */ +	u32 hpfar;		/* Hyp IPA Fault Address Register */ +	u32 hyp_pc;		/* PC when exception was taken from Hyp mode */ +}; + +typedef struct vfp_hard_struct kvm_kernel_vfp_t; +  struct kvm_vcpu_arch {  	struct kvm_regs regs; @@ -93,13 +102,11 @@ struct kvm_vcpu_arch {  	u32 midr;  	/* Exception Information */ -	u32 hsr;		/* Hyp Syndrome Register */ -	u32 hxfar;		/* Hyp Data/Inst Fault Address Register */ -	u32 hpfar;		/* Hyp IPA Fault Address Register */ +	struct kvm_vcpu_fault_info fault;  	/* Floating point registers (VFP and Advanced SIMD/NEON) */ -	struct vfp_hard_struct vfp_guest; -	struct vfp_hard_struct *vfp_host; +	kvm_kernel_vfp_t vfp_guest; +	kvm_kernel_vfp_t *vfp_host;  	/* VGIC state */  	struct vgic_cpu vgic_cpu; @@ -122,9 +129,6 @@ struct kvm_vcpu_arch {  	/* Interrupt related fields */  	u32 irq_lines;		/* IRQ and FIQ levels */ -	/* Hyp exception information */ -	u32 hyp_pc;		/* PC when exception was taken from Hyp mode */ -  	/* Cache some mmu pages needed inside spinlock regions */  	struct kvm_mmu_memory_cache mmu_page_cache; @@ -181,4 +185,26 @@ struct kvm_one_reg;  int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);  int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); +int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, +		int exception_index); + +static inline void __cpu_init_hyp_mode(unsigned long long pgd_ptr, +				       unsigned long hyp_stack_ptr, +				       unsigned long vector_ptr) +{ +	unsigned long pgd_low, pgd_high; + +	pgd_low = (pgd_ptr & ((1ULL << 32) - 1)); +	pgd_high = (pgd_ptr >> 32ULL); + +	/* +	 * Call initialization code, and switch to the full blown +	 * HYP code. The init code doesn't need to preserve these registers as +	 * r1-r3 and r12 are already callee save according to the AAPCS. +	 * Note that we slightly misuse the prototype by casing the pgd_low to +	 * a void *. +	 */ +	kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr); +} +  #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 421a20b3487..970f3b5fa10 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -19,6 +19,18 @@  #ifndef __ARM_KVM_MMU_H__  #define __ARM_KVM_MMU_H__ +#include <asm/cacheflush.h> +#include <asm/pgalloc.h> +#include <asm/idmap.h> + +/* + * We directly use the kernel VA for the HYP, as we can directly share + * the mapping (HTTBR "covers" TTBR1). + */ +#define HYP_PAGE_OFFSET_MASK	(~0UL) +#define HYP_PAGE_OFFSET		PAGE_OFFSET +#define KERN_TO_HYP(kva)	(kva) +  int create_hyp_mappings(void *from, void *to);  int create_hyp_io_mappings(void *from, void *to, phys_addr_t);  void free_hyp_pmds(void); @@ -36,6 +48,16 @@ phys_addr_t kvm_mmu_get_httbr(void);  int kvm_mmu_init(void);  void kvm_clear_hyp_idmap(void); +static inline void kvm_set_pte(pte_t *pte, pte_t new_pte) +{ +	pte_val(*pte) = new_pte; +	/* +	 * flush_pmd_entry just takes a void pointer and cleans the necessary +	 * cache entries, so we can reuse the function for ptes. +	 */ +	flush_pmd_entry(pte); +} +  static inline bool kvm_is_write_fault(unsigned long hsr)  {  	unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; @@ -47,4 +69,49 @@ static inline bool kvm_is_write_fault(unsigned long hsr)  		return true;  } +static inline void kvm_clean_pgd(pgd_t *pgd) +{ +	clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); +} + +static inline void kvm_clean_pmd_entry(pmd_t *pmd) +{ +	clean_pmd_entry(pmd); +} + +static inline void kvm_clean_pte(pte_t *pte) +{ +	clean_pte_table(pte); +} + +static inline void kvm_set_s2pte_writable(pte_t *pte) +{ +	pte_val(*pte) |= L_PTE_S2_RDWR; +} + +struct kvm; + +static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) +{ +	/* +	 * If we are going to insert an instruction page and the icache is +	 * either VIPT or PIPT, there is a potential problem where the host +	 * (or another VM) may have used the same page as this guest, and we +	 * read incorrect data from the icache.  If we're using a PIPT cache, +	 * we can invalidate just that page, but if we are using a VIPT cache +	 * we need to invalidate the entire icache - damn shame - as written +	 * in the ARM ARM (DDI 0406C.b - Page B3-1393). +	 * +	 * VIVT caches are tagged using both the ASID and the VMID and doesn't +	 * need any kind of flushing (DDI 0406C.b - Page B3-1392). +	 */ +	if (icache_is_pipt()) { +		unsigned long hva = gfn_to_hva(kvm, gfn); +		__cpuc_coherent_user_range(hva, hva + PAGE_SIZE); +	} else if (!icache_is_vivt_asid_tagged()) { +		/* any kind of VIPT cache */ +		__flush_icache_all(); +	} +} +  #endif /* __ARM_KVM_MMU_H__ */ diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h index ab97207d9cd..343744e4809 100644 --- a/arch/arm/include/asm/kvm_vgic.h +++ b/arch/arm/include/asm/kvm_vgic.h @@ -21,7 +21,6 @@  #include <linux/kernel.h>  #include <linux/kvm.h> -#include <linux/kvm_host.h>  #include <linux/irqreturn.h>  #include <linux/spinlock.h>  #include <linux/types.h> diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index 5cf2e979b4b..7d2c3c84380 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -30,6 +30,11 @@ struct hw_pci {  	void		(*postinit)(void);  	u8		(*swizzle)(struct pci_dev *dev, u8 *pin);  	int		(*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); +	resource_size_t (*align_resource)(struct pci_dev *dev, +					  const struct resource *res, +					  resource_size_t start, +					  resource_size_t size, +					  resource_size_t align);  };  /* @@ -51,6 +56,12 @@ struct pci_sys_data {  	u8		(*swizzle)(struct pci_dev *, u8 *);  					/* IRQ mapping				*/  	int		(*map_irq)(const struct pci_dev *, u8, u8); +					/* Resource alignement requirements	*/ +	resource_size_t (*align_resource)(struct pci_dev *dev, +					  const struct resource *res, +					  resource_size_t start, +					  resource_size_t size, +					  resource_size_t align);  	void		*private_data;	/* platform controller private data	*/  }; diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h new file mode 100644 index 00000000000..0f7b7620e9a --- /dev/null +++ b/arch/arm/include/asm/mcpm.h @@ -0,0 +1,209 @@ +/* + * arch/arm/include/asm/mcpm.h + * + * Created by:  Nicolas Pitre, April 2012 + * Copyright:   (C) 2012-2013  Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MCPM_H +#define MCPM_H + +/* + * Maximum number of possible clusters / CPUs per cluster. + * + * This should be sufficient for quite a while, while keeping the + * (assembly) code simpler.  When this starts to grow then we'll have + * to consider dynamic allocation. + */ +#define MAX_CPUS_PER_CLUSTER	4 +#define MAX_NR_CLUSTERS		2 + +#ifndef __ASSEMBLY__ + +#include <linux/types.h> +#include <asm/cacheflush.h> + +/* + * Platform specific code should use this symbol to set up secondary + * entry location for processors to use when released from reset. + */ +extern void mcpm_entry_point(void); + +/* + * This is used to indicate where the given CPU from given cluster should + * branch once it is ready to re-enter the kernel using ptr, or NULL if it + * should be gated.  A gated CPU is held in a WFE loop until its vector + * becomes non NULL. + */ +void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr); + +/* + * CPU/cluster power operations API for higher subsystems to use. + */ + +/** + * mcpm_cpu_power_up - make given CPU in given cluster runable + * + * @cpu: CPU number within given cluster + * @cluster: cluster number for the CPU + * + * The identified CPU is brought out of reset.  If the cluster was powered + * down then it is brought up as well, taking care not to let the other CPUs + * in the cluster run, and ensuring appropriate cluster setup. + * + * Caller must ensure the appropriate entry vector is initialized with + * mcpm_set_entry_vector() prior to calling this. + * + * This must be called in a sleepable context.  However, the implementation + * is strongly encouraged to return early and let the operation happen + * asynchronously, especially when significant delays are expected. + * + * If the operation cannot be performed then an error code is returned. + */ +int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster); + +/** + * mcpm_cpu_power_down - power the calling CPU down + * + * The calling CPU is powered down. + * + * If this CPU is found to be the "last man standing" in the cluster + * then the cluster is prepared for power-down too. + * + * This must be called with interrupts disabled. + * + * This does not return.  Re-entry in the kernel is expected via + * mcpm_entry_point. + */ +void mcpm_cpu_power_down(void); + +/** + * mcpm_cpu_suspend - bring the calling CPU in a suspended state + * + * @expected_residency: duration in microseconds the CPU is expected + *			to remain suspended, or 0 if unknown/infinity. + * + * The calling CPU is suspended.  The expected residency argument is used + * as a hint by the platform specific backend to implement the appropriate + * sleep state level according to the knowledge it has on wake-up latency + * for the given hardware. + * + * If this CPU is found to be the "last man standing" in the cluster + * then the cluster may be prepared for power-down too, if the expected + * residency makes it worthwhile. + * + * This must be called with interrupts disabled. + * + * This does not return.  Re-entry in the kernel is expected via + * mcpm_entry_point. + */ +void mcpm_cpu_suspend(u64 expected_residency); + +/** + * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up + * + * This lets the platform specific backend code perform needed housekeeping + * work.  This must be called by the newly activated CPU as soon as it is + * fully operational in kernel space, before it enables interrupts. + * + * If the operation cannot be performed then an error code is returned. + */ +int mcpm_cpu_powered_up(void); + +/* + * Platform specific methods used in the implementation of the above API. + */ +struct mcpm_platform_ops { +	int (*power_up)(unsigned int cpu, unsigned int cluster); +	void (*power_down)(void); +	void (*suspend)(u64); +	void (*powered_up)(void); +}; + +/** + * mcpm_platform_register - register platform specific power methods + * + * @ops: mcpm_platform_ops structure to register + * + * An error is returned if the registration has been done previously. + */ +int __init mcpm_platform_register(const struct mcpm_platform_ops *ops); + +/* Synchronisation structures for coordinating safe cluster setup/teardown: */ + +/* + * When modifying this structure, make sure you update the MCPM_SYNC_ defines + * to match. + */ +struct mcpm_sync_struct { +	/* individual CPU states */ +	struct { +		s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE); +	} cpus[MAX_CPUS_PER_CLUSTER]; + +	/* cluster state */ +	s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE); + +	/* inbound-side state */ +	s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE); +}; + +struct sync_struct { +	struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS]; +}; + +extern unsigned long sync_phys;	/* physical address of *mcpm_sync */ + +void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster); +void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster); +void __mcpm_outbound_leave_critical(unsigned int cluster, int state); +bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster); +int __mcpm_cluster_state(unsigned int cluster); + +int __init mcpm_sync_init( +	void (*power_up_setup)(unsigned int affinity_level)); + +void __init mcpm_smp_set_ops(void); + +#else + +/*  + * asm-offsets.h causes trouble when included in .c files, and cacheflush.h + * cannot be included in asm files.  Let's work around the conflict like this. + */ +#include <asm/asm-offsets.h> +#define __CACHE_WRITEBACK_GRANULE CACHE_WRITEBACK_GRANULE + +#endif /* ! __ASSEMBLY__ */ + +/* Definitions for mcpm_sync_struct */ +#define CPU_DOWN		0x11 +#define CPU_COMING_UP		0x12 +#define CPU_UP			0x13 +#define CPU_GOING_DOWN		0x14 + +#define CLUSTER_DOWN		0x21 +#define CLUSTER_UP		0x22 +#define CLUSTER_GOING_DOWN	0x23 + +#define INBOUND_NOT_COMING_UP	0x31 +#define INBOUND_COMING_UP	0x32 + +/* + * Offsets for the mcpm_sync_struct members, for use in asm. + * We don't want to make them global to the kernel via asm-offsets.c. + */ +#define MCPM_SYNC_CLUSTER_CPUS	0 +#define MCPM_SYNC_CPU_SIZE	__CACHE_WRITEBACK_GRANULE +#define MCPM_SYNC_CLUSTER_CLUSTER \ +	(MCPM_SYNC_CLUSTER_CPUS + MCPM_SYNC_CPU_SIZE * MAX_CPUS_PER_CLUSTER) +#define MCPM_SYNC_CLUSTER_INBOUND \ +	(MCPM_SYNC_CLUSTER_CLUSTER + __CACHE_WRITEBACK_GRANULE) +#define MCPM_SYNC_CLUSTER_SIZE \ +	(MCPM_SYNC_CLUSTER_INBOUND + __CACHE_WRITEBACK_GRANULE) + +#endif diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 863a6611323..a7b85e0d0cc 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -27,6 +27,8 @@ void __check_vmalloc_seq(struct mm_struct *mm);  void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);  #define init_new_context(tsk,mm)	({ atomic64_set(&mm->context.id, 0); 0; }) +DECLARE_PER_CPU(atomic64_t, active_asids); +  #else	/* !CONFIG_CPU_HAS_ASID */  #ifdef CONFIG_MMU diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 6ef8afd1b64..86b8fe398b9 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -111,7 +111,7 @@  #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */  #define L_PTE_S2_MT_WRITEBACK	 (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */  #define L_PTE_S2_RDONLY		 (_AT(pteval_t, 1) << 6)   /* HAP[1]   */ -#define L_PTE_S2_RDWR		 (_AT(pteval_t, 2) << 6)   /* HAP[2:1] */ +#define L_PTE_S2_RDWR		 (_AT(pteval_t, 3) << 6)   /* HAP[2:1] */  /*   * Hyp-mode PL2 PTE definitions for LPAE. diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 80d6fc4dbe4..9bcd262a900 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -61,6 +61,15 @@ extern void __pgd_error(const char *file, int line, pgd_t);  #define FIRST_USER_ADDRESS	PAGE_SIZE  /* + * Use TASK_SIZE as the ceiling argument for free_pgtables() and + * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd + * page shared between user and kernel). + */ +#ifdef CONFIG_ARM_LPAE +#define USER_PGTABLES_CEILING	TASK_SIZE +#endif + +/*   * The pgprot_* and protection_map entries will be fixed up in runtime   * to include the cachable and bufferable bits based on memory policy,   * as well as any architecture dependent bits like global/ASID and SMP diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index cddda1f41f0..1995d1a8406 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -152,6 +152,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,  #define TIF_SYSCALL_AUDIT	9  #define TIF_SYSCALL_TRACEPOINT	10  #define TIF_SECCOMP		11	/* seccomp syscall filtering active */ +#define TIF_NOHZ		12	/* in adaptive nohz mode */  #define TIF_USING_IWMMXT	17  #define TIF_MEMDIE		18	/* is terminating due to OOM killer */  #define TIF_RESTORE_SIGMASK	20 diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 4db8c8820f0..a3625d141c1 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -14,7 +14,6 @@  #include <asm/glue.h> -#define TLB_V3_PAGE	(1 << 0)  #define TLB_V4_U_PAGE	(1 << 1)  #define TLB_V4_D_PAGE	(1 << 2)  #define TLB_V4_I_PAGE	(1 << 3) @@ -22,7 +21,6 @@  #define TLB_V6_D_PAGE	(1 << 5)  #define TLB_V6_I_PAGE	(1 << 6) -#define TLB_V3_FULL	(1 << 8)  #define TLB_V4_U_FULL	(1 << 9)  #define TLB_V4_D_FULL	(1 << 10)  #define TLB_V4_I_FULL	(1 << 11) @@ -52,7 +50,6 @@   *	=============   *   *	We have the following to choose from: - *	  v3    - ARMv3   *	  v4    - ARMv4 without write buffer   *	  v4wb  - ARMv4 with write buffer without I TLB flush entry instruction   *	  v4wbi - ARMv4 with write buffer with I TLB flush entry instruction @@ -169,7 +166,7 @@  # define v6wbi_always_flags	(-1UL)  #endif -#define v7wbi_tlb_flags_smp	(TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ +#define v7wbi_tlb_flags_smp	(TLB_WB | TLB_BARRIER | \  				 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \  				 TLB_V7_UIS_ASID | TLB_V7_UIS_BP)  #define v7wbi_tlb_flags_up	(TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ @@ -330,7 +327,6 @@ static inline void local_flush_tlb_all(void)  	if (tlb_flag(TLB_WB))  		dsb(); -	tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);  	tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);  	tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);  	tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); @@ -351,9 +347,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)  	if (tlb_flag(TLB_WB))  		dsb(); -	if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { +	if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {  		if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { -			tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);  			tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);  			tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);  			tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); @@ -385,9 +380,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)  	if (tlb_flag(TLB_WB))  		dsb(); -	if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && +	if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&  	    cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { -		tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr);  		tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);  		tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr);  		tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr); @@ -418,7 +412,6 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)  	if (tlb_flag(TLB_WB))  		dsb(); -	tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr);  	tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);  	tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);  	tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); @@ -450,6 +443,21 @@ static inline void local_flush_bp_all(void)  		isb();  } +#ifdef CONFIG_ARM_ERRATA_798181 +static inline void dummy_flush_tlb_a15_erratum(void) +{ +	/* +	 * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0. +	 */ +	asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); +	dsb(); +} +#else +static inline void dummy_flush_tlb_a15_erratum(void) +{ +} +#endif +  /*   *	flush_pmd_entry   * diff --git a/arch/arm/include/debug/uncompress.h b/arch/arm/include/debug/uncompress.h new file mode 100644 index 00000000000..0e2949b0fae --- /dev/null +++ b/arch/arm/include/debug/uncompress.h @@ -0,0 +1,7 @@ +#ifdef CONFIG_DEBUG_UNCOMPRESS +extern void putc(int c); +#else +static inline void putc(int c) {} +#endif +static inline void flush(void) {} +static inline void arch_decomp_setup(void) {} diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index 023bfeb367b..c1ee007523d 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h @@ -53,12 +53,12 @@  #define KVM_ARM_FIQ_spsr	fiq_regs[7]  struct kvm_regs { -	struct pt_regs usr_regs;/* R0_usr - R14_usr, PC, CPSR */ -	__u32 svc_regs[3];	/* SP_svc, LR_svc, SPSR_svc */ -	__u32 abt_regs[3];	/* SP_abt, LR_abt, SPSR_abt */ -	__u32 und_regs[3];	/* SP_und, LR_und, SPSR_und */ -	__u32 irq_regs[3];	/* SP_irq, LR_irq, SPSR_irq */ -	__u32 fiq_regs[8];	/* R8_fiq - R14_fiq, SPSR_fiq */ +	struct pt_regs usr_regs;	/* R0_usr - R14_usr, PC, CPSR */ +	unsigned long svc_regs[3];	/* SP_svc, LR_svc, SPSR_svc */ +	unsigned long abt_regs[3];	/* SP_abt, LR_abt, SPSR_abt */ +	unsigned long und_regs[3];	/* SP_und, LR_und, SPSR_und */ +	unsigned long irq_regs[3];	/* SP_irq, LR_irq, SPSR_irq */ +	unsigned long fiq_regs[8];	/* R8_fiq - R14_fiq, SPSR_fiq */  };  /* Supported Processor Types */ diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 923eec7105c..a53efa99369 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -149,6 +149,10 @@ int main(void)    DEFINE(DMA_BIDIRECTIONAL,	DMA_BIDIRECTIONAL);    DEFINE(DMA_TO_DEVICE,		DMA_TO_DEVICE);    DEFINE(DMA_FROM_DEVICE,	DMA_FROM_DEVICE); +  BLANK(); +  DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER); +  DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE); +  BLANK();  #ifdef CONFIG_KVM_ARM_HOST    DEFINE(VCPU_KVM,		offsetof(struct kvm_vcpu, kvm));    DEFINE(VCPU_MIDR,		offsetof(struct kvm_vcpu, arch.midr)); @@ -165,10 +169,10 @@ int main(void)    DEFINE(VCPU_PC,		offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));    DEFINE(VCPU_CPSR,		offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));    DEFINE(VCPU_IRQ_LINES,	offsetof(struct kvm_vcpu, arch.irq_lines)); -  DEFINE(VCPU_HSR,		offsetof(struct kvm_vcpu, arch.hsr)); -  DEFINE(VCPU_HxFAR,		offsetof(struct kvm_vcpu, arch.hxfar)); -  DEFINE(VCPU_HPFAR,		offsetof(struct kvm_vcpu, arch.hpfar)); -  DEFINE(VCPU_HYP_PC,		offsetof(struct kvm_vcpu, arch.hyp_pc)); +  DEFINE(VCPU_HSR,		offsetof(struct kvm_vcpu, arch.fault.hsr)); +  DEFINE(VCPU_HxFAR,		offsetof(struct kvm_vcpu, arch.fault.hxfar)); +  DEFINE(VCPU_HPFAR,		offsetof(struct kvm_vcpu, arch.fault.hpfar)); +  DEFINE(VCPU_HYP_PC,		offsetof(struct kvm_vcpu, arch.fault.hyp_pc));  #ifdef CONFIG_KVM_ARM_VGIC    DEFINE(VCPU_VGIC_CPU,		offsetof(struct kvm_vcpu, arch.vgic_cpu));    DEFINE(VGIC_CPU_HCR,		offsetof(struct vgic_cpu, vgic_hcr)); diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index a1f73b502ef..b2ed73c4548 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -462,6 +462,7 @@ static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head)  		sys->busnr   = busnr;  		sys->swizzle = hw->swizzle;  		sys->map_irq = hw->map_irq; +		sys->align_resource = hw->align_resource;  		INIT_LIST_HEAD(&sys->resources);  		if (hw->private_data) @@ -574,6 +575,8 @@ char * __init pcibios_setup(char *str)  resource_size_t pcibios_align_resource(void *data, const struct resource *res,  				resource_size_t size, resource_size_t align)  { +	struct pci_dev *dev = data; +	struct pci_sys_data *sys = dev->sysdata;  	resource_size_t start = res->start;  	if (res->flags & IORESOURCE_IO && start & 0x300) @@ -581,6 +584,9 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,  	start = (start + align - 1) & ~(align - 1); +	if (sys->align_resource) +		return sys->align_resource(dev, res, start, size, align); +  	return start;  } diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 9f6a30798b6..c66ca7e4ee9 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -353,6 +353,7 @@ ENDPROC(__pabt_svc)  #ifdef CONFIG_IRQSOFF_TRACER  	bl	trace_hardirqs_off  #endif +	ct_user_exit save = 0  	.endm  	.macro	kuser_cmpxchg_check diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 6f37ddfadbf..bc5bc0a9713 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -39,6 +39,7 @@ ret_fast_syscall:  	/* perform architecture specific actions before user return */  	arch_ret_to_user r1, lr +	ct_user_enter  	restore_user_regs fast = 1, offset = S_OFF   UNWIND(.fnend		) @@ -73,6 +74,7 @@ no_work_pending:  	/* perform architecture specific actions before user return */  	arch_ret_to_user r1, lr +	ct_user_enter save = 0  	restore_user_regs fast = 0, offset = 0  ENDPROC(ret_to_user_from_irq) @@ -273,7 +275,13 @@ ENDPROC(ftrace_graph_caller_old)   */  .macro mcount_enter +/* + * This pad compensates for the push {lr} at the call site.  Note that we are + * unable to unwind through a function which does not otherwise save its lr. + */ + UNWIND(.pad	#4)  	stmdb	sp!, {r0-r3, lr} + UNWIND(.save	{r0-r3, lr})  .endm  .macro mcount_get_lr reg @@ -286,6 +294,7 @@ ENDPROC(ftrace_graph_caller_old)  .endm  ENTRY(__gnu_mcount_nc) +UNWIND(.fnstart)  #ifdef CONFIG_DYNAMIC_FTRACE  	mov	ip, lr  	ldmia	sp!, {lr} @@ -293,17 +302,22 @@ ENTRY(__gnu_mcount_nc)  #else  	__mcount  #endif +UNWIND(.fnend)  ENDPROC(__gnu_mcount_nc)  #ifdef CONFIG_DYNAMIC_FTRACE  ENTRY(ftrace_caller) +UNWIND(.fnstart)  	__ftrace_caller +UNWIND(.fnend)  ENDPROC(ftrace_caller)  #endif  #ifdef CONFIG_FUNCTION_GRAPH_TRACER  ENTRY(ftrace_graph_caller) +UNWIND(.fnstart)  	__ftrace_graph_caller +UNWIND(.fnend)  ENDPROC(ftrace_graph_caller)  #endif @@ -391,6 +405,7 @@ ENTRY(vector_swi)  	mcr	p15, 0, ip, c1, c0		@ update control register  #endif  	enable_irq +	ct_user_exit  	get_thread_info tsk  	adr	tbl, sys_call_table		@ load syscall table pointer diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 57a1631f065..160f3376ba6 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -198,6 +198,34 @@  #endif	/* !CONFIG_THUMB2_KERNEL */  /* + * Context tracking subsystem.  Used to instrument transitions + * between user and kernel mode. + */ +	.macro ct_user_exit, save = 1 +#ifdef CONFIG_CONTEXT_TRACKING +	.if	\save +	stmdb   sp!, {r0-r3, ip, lr} +	bl	user_exit +	ldmia	sp!, {r0-r3, ip, lr} +	.else +	bl	user_exit +	.endif +#endif +	.endm + +	.macro ct_user_enter, save = 1 +#ifdef CONFIG_CONTEXT_TRACKING +	.if	\save +	stmdb   sp!, {r0-r3, ip, lr} +	bl	user_enter +	ldmia	sp!, {r0-r3, ip, lr} +	.else +	bl	user_enter +	.endif +#endif +	.endm + +/*   * These are the registers used in the syscall handler, and allow us to   * have in theory up to 7 arguments to a function - r0 to r6.   * diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 854bd22380d..5b391a689b4 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S @@ -98,8 +98,9 @@ __mmap_switched:  	str	r9, [r4]			@ Save processor ID  	str	r1, [r5]			@ Save machine type  	str	r2, [r6]			@ Save atags pointer -	bic	r4, r0, #CR_A			@ Clear 'A' bit -	stmia	r7, {r0, r4}			@ Save control register values +	cmp	r7, #0 +	bicne	r4, r0, #CR_A			@ Clear 'A' bit +	stmneia	r7, {r0, r4}			@ Save control register values  	b	start_kernel  ENDPROC(__mmap_switched) @@ -113,7 +114,11 @@ __mmap_switched_data:  	.long	processor_id			@ r4  	.long	__machine_arch_type		@ r5  	.long	__atags_pointer			@ r6 +#ifdef CONFIG_CPU_CP15  	.long	cr_alignment			@ r7 +#else +	.long	0				@ r7 +#endif  	.long	init_thread_union + THREAD_START_SP @ sp  	.size	__mmap_switched_data, . - __mmap_switched_data diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 2c228a07e58..6a2e09c952c 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -32,15 +32,21 @@   * numbers for r1.   *   */ -	.arm  	__HEAD + +#ifdef CONFIG_CPU_THUMBONLY +	.thumb +ENTRY(stext) +#else +	.arm  ENTRY(stext)   THUMB(	adr	r9, BSYM(1f)	)	@ Kernel is always entered in ARM.   THUMB(	bx	r9		)	@ If this is a Thumb-2 kernel,   THUMB(	.thumb			)	@ switch to Thumb now.   THUMB(1:			) +#endif  	setmode	PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode  						@ and irqs disabled diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index e0eb9a1cae7..8bac553fe21 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -267,7 +267,7 @@ __create_page_tables:  	addne	r6, r6, #1 << SECTION_SHIFT  	strne	r6, [r3] -#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) +#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)  	sub	r4, r4, #4			@ Fixup page table pointer  						@ for 64-bit descriptors  #endif diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 96093b75ab9..1fd749ee4a1 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -966,7 +966,7 @@ static void reset_ctrl_regs(void *unused)  	}  	if (err) { -		pr_warning("CPU %d debug is powered down!\n", cpu); +		pr_warn_once("CPU %d debug is powered down!\n", cpu);  		cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));  		return;  	} @@ -987,7 +987,7 @@ clear_vcr:  	isb();  	if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { -		pr_warning("CPU %d failed to disable vector catch\n", cpu); +		pr_warn_once("CPU %d failed to disable vector catch\n", cpu);  		return;  	} @@ -1007,7 +1007,7 @@ clear_vcr:  	}  	if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { -		pr_warning("CPU %d failed to clear debug register pairs\n", cpu); +		pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu);  		return;  	} @@ -1043,7 +1043,7 @@ static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,  	return NOTIFY_OK;  } -static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = { +static struct notifier_block dbg_cpu_pm_nb = {  	.notifier_call = dbg_cpu_pm_notify,  }; diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 146157dfe27..8c3094d0f7b 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -253,7 +253,10 @@ validate_event(struct pmu_hw_events *hw_events,  	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);  	struct pmu *leader_pmu = event->group_leader->pmu; -	if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) +	if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) +		return 1; + +	if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)  		return 1;  	return armpmu->get_event_idx(hw_events, event) >= 0; diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c index 8085417555d..fafedd86885 100644 --- a/arch/arm/kernel/return_address.c +++ b/arch/arm/kernel/return_address.c @@ -26,7 +26,7 @@ static int save_return_addr(struct stackframe *frame, void *d)  	struct return_address_data *data = d;  	if (!data->level) { -		data->addr = (void *)frame->lr; +		data->addr = (void *)frame->pc;  		return 1;  	} else { @@ -41,7 +41,8 @@ void *return_address(unsigned int level)  	struct stackframe frame;  	register unsigned long current_sp asm ("sp"); -	data.level = level + 1; +	data.level = level + 2; +	data.addr = NULL;  	frame.fp = (unsigned long)__builtin_frame_address(0);  	frame.sp = current_sp; diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index bd6f56b9ec2..59d2adb764a 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c @@ -45,12 +45,12 @@ static u32 notrace jiffy_sched_clock_read(void)  static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; -static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) +static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)  {  	return (cyc * mult) >> shift;  } -static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask) +static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask)  {  	u64 epoch_ns;  	u32 epoch_cyc; diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 3f6cbb2e3ed..728007c4a2b 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -56,7 +56,6 @@  #include <asm/virt.h>  #include "atags.h" -#include "tcm.h"  #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) @@ -291,10 +290,10 @@ static int cpu_has_aliasing_icache(unsigned int arch)  static void __init cacheid_init(void)  { -	unsigned int cachetype = read_cpuid_cachetype();  	unsigned int arch = cpu_architecture();  	if (arch >= CPU_ARCH_ARMv6) { +		unsigned int cachetype = read_cpuid_cachetype();  		if ((cachetype & (7 << 29)) == 4 << 29) {  			/* ARMv7 register format */  			arch = CPU_ARCH_ARMv7; @@ -353,6 +352,23 @@ void __init early_print(const char *str, ...)  	printk("%s", buf);  } +static void __init cpuid_init_hwcaps(void) +{ +	unsigned int divide_instrs; + +	if (cpu_architecture() < CPU_ARCH_ARMv7) +		return; + +	divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24; + +	switch (divide_instrs) { +	case 2: +		elf_hwcap |= HWCAP_IDIVA; +	case 1: +		elf_hwcap |= HWCAP_IDIVT; +	} +} +  static void __init feat_v6_fixup(void)  {  	int id = read_cpuid_id(); @@ -373,7 +389,7 @@ static void __init feat_v6_fixup(void)   *   * cpu_init sets up the per-CPU stacks.   */ -void cpu_init(void) +void notrace cpu_init(void)  {  	unsigned int cpu = smp_processor_id();  	struct stack *stk = &stacks[cpu]; @@ -483,8 +499,11 @@ static void __init setup_processor(void)  	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",  		 list->elf_name, ENDIANNESS);  	elf_hwcap = list->elf_hwcap; + +	cpuid_init_hwcaps(); +  #ifndef CONFIG_ARM_THUMB -	elf_hwcap &= ~HWCAP_THUMB; +	elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);  #endif  	feat_v6_fixup(); @@ -524,7 +543,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)  	size -= start & ~PAGE_MASK;  	bank->start = PAGE_ALIGN(start); -#ifndef CONFIG_LPAE +#ifndef CONFIG_ARM_LPAE  	if (bank->start + size < bank->start) {  		printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "  			"32-bit physical address space\n", (long long)start); @@ -778,8 +797,6 @@ void __init setup_arch(char **cmdline_p)  	reserve_crashkernel(); -	tcm_init(); -  #ifdef CONFIG_MULTI_IRQ_HANDLER  	handle_arch_irq = mdesc->handle_irq;  #endif diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 79078edbb9b..4231034b812 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -211,6 +211,13 @@ void __cpuinit __cpu_die(unsigned int cpu)  	}  	printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); +	/* +	 * platform_cpu_kill() is generally expected to do the powering off +	 * and/or cutting of clocks to the dying CPU.  Optionally, this may +	 * be done by the CPU which is dying in preference to supporting +	 * this call, but that means there is _no_ synchronisation between +	 * the requesting CPU and the dying CPU actually losing power. +	 */  	if (!platform_cpu_kill(cpu))  		printk("CPU%u: unable to kill\n", cpu);  } @@ -230,14 +237,41 @@ void __ref cpu_die(void)  	idle_task_exit();  	local_irq_disable(); -	mb(); -	/* Tell __cpu_die() that this CPU is now safe to dispose of */ +	/* +	 * Flush the data out of the L1 cache for this CPU.  This must be +	 * before the completion to ensure that data is safely written out +	 * before platform_cpu_kill() gets called - which may disable +	 * *this* CPU and power down its cache. +	 */ +	flush_cache_louis(); + +	/* +	 * Tell __cpu_die() that this CPU is now safe to dispose of.  Once +	 * this returns, power and/or clocks can be removed at any point +	 * from this CPU and its cache by platform_cpu_kill(). +	 */  	RCU_NONIDLE(complete(&cpu_died));  	/* -	 * actual CPU shutdown procedure is at least platform (if not -	 * CPU) specific. +	 * Ensure that the cache lines associated with that completion are +	 * written out.  This covers the case where _this_ CPU is doing the +	 * powering down, to ensure that the completion is visible to the +	 * CPU waiting for this one. +	 */ +	flush_cache_louis(); + +	/* +	 * The actual CPU shutdown procedure is at least platform (if not +	 * CPU) specific.  This may remove power, or it may simply spin. +	 * +	 * Platforms are generally expected *NOT* to return from this call, +	 * although there are some which do because they have no way to +	 * power down the CPU.  These platforms are the _only_ reason we +	 * have a return path which uses the fragment of assembly below. +	 * +	 * The return path should not be used for platforms which can +	 * power off the CPU.  	 */  	if (smp_ops.cpu_die)  		smp_ops.cpu_die(cpu); @@ -673,9 +707,6 @@ static int cpufreq_callback(struct notifier_block *nb,  	if (freq->flags & CPUFREQ_CONST_LOOPS)  		return NOTIFY_OK; -	if (arm_delay_ops.const_clock) -		return NOTIFY_OK; -  	if (!per_cpu(l_p_j_ref, cpu)) {  		per_cpu(l_p_j_ref, cpu) =  			per_cpu(cpu_data, cpu).loops_per_jiffy; diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index 45eac87ed66..5bc1a63284e 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c @@ -41,7 +41,7 @@ void scu_enable(void __iomem *scu_base)  #ifdef CONFIG_ARM_ERRATA_764369  	/* Cortex-A9 only */ -	if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) { +	if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090) {  		scu_ctrl = __raw_readl(scu_base + 0x30);  		if (!(scu_ctrl & 1))  			__raw_writel(scu_ctrl | 0x1, scu_base + 0x30); diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index bd030053139..9a52a07aa40 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c @@ -12,6 +12,7 @@  #include <asm/smp_plat.h>  #include <asm/tlbflush.h> +#include <asm/mmu_context.h>  /**********************************************************************/ @@ -69,12 +70,73 @@ static inline void ipi_flush_bp_all(void *ignored)  	local_flush_bp_all();  } +#ifdef CONFIG_ARM_ERRATA_798181 +static int erratum_a15_798181(void) +{ +	unsigned int midr = read_cpuid_id(); + +	/* Cortex-A15 r0p0..r3p2 affected */ +	if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2) +		return 0; +	return 1; +} +#else +static int erratum_a15_798181(void) +{ +	return 0; +} +#endif + +static void ipi_flush_tlb_a15_erratum(void *arg) +{ +	dmb(); +} + +static void broadcast_tlb_a15_erratum(void) +{ +	if (!erratum_a15_798181()) +		return; + +	dummy_flush_tlb_a15_erratum(); +	smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1); +} + +static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) +{ +	int cpu, this_cpu; +	cpumask_t mask = { CPU_BITS_NONE }; + +	if (!erratum_a15_798181()) +		return; + +	dummy_flush_tlb_a15_erratum(); +	this_cpu = get_cpu(); +	for_each_online_cpu(cpu) { +		if (cpu == this_cpu) +			continue; +		/* +		 * We only need to send an IPI if the other CPUs are running +		 * the same ASID as the one being invalidated. There is no +		 * need for locking around the active_asids check since the +		 * switch_mm() function has at least one dmb() (as required by +		 * this workaround) in case a context switch happens on +		 * another CPU after the condition below. +		 */ +		if (atomic64_read(&mm->context.id) == +		    atomic64_read(&per_cpu(active_asids, cpu))) +			cpumask_set_cpu(cpu, &mask); +	} +	smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); +	put_cpu(); +} +  void flush_tlb_all(void)  {  	if (tlb_ops_need_broadcast())  		on_each_cpu(ipi_flush_tlb_all, NULL, 1);  	else  		local_flush_tlb_all(); +	broadcast_tlb_a15_erratum();  }  void flush_tlb_mm(struct mm_struct *mm) @@ -83,6 +145,7 @@ void flush_tlb_mm(struct mm_struct *mm)  		on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);  	else  		local_flush_tlb_mm(mm); +	broadcast_tlb_mm_a15_erratum(mm);  }  void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) @@ -95,6 +158,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)  					&ta, 1);  	} else  		local_flush_tlb_page(vma, uaddr); +	broadcast_tlb_mm_a15_erratum(vma->vm_mm);  }  void flush_tlb_kernel_page(unsigned long kaddr) @@ -105,6 +169,7 @@ void flush_tlb_kernel_page(unsigned long kaddr)  		on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);  	} else  		local_flush_tlb_kernel_page(kaddr); +	broadcast_tlb_a15_erratum();  }  void flush_tlb_range(struct vm_area_struct *vma, @@ -119,6 +184,7 @@ void flush_tlb_range(struct vm_area_struct *vma,  					&ta, 1);  	} else  		local_flush_tlb_range(vma, start, end); +	broadcast_tlb_mm_a15_erratum(vma->vm_mm);  }  void flush_tlb_kernel_range(unsigned long start, unsigned long end) @@ -130,6 +196,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)  		on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);  	} else  		local_flush_tlb_kernel_range(start, end); +	broadcast_tlb_a15_erratum();  }  void flush_bp_all(void) diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index 30ae6bb4a31..f50f19e5c13 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c @@ -17,7 +17,6 @@  #include <asm/mach/map.h>  #include <asm/memory.h>  #include <asm/system_info.h> -#include "tcm.h"  static struct gen_pool *tcm_pool;  static bool dtcm_present; diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile index fc96ce6f235..8dc5e76cb78 100644 --- a/arch/arm/kvm/Makefile +++ b/arch/arm/kvm/Makefile @@ -17,7 +17,7 @@ AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)  kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)  obj-y += kvm-arm.o init.o interrupts.o -obj-y += arm.o guest.o mmu.o emulate.o reset.o +obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o  obj-y += coproc.o coproc_a15.o mmio.o psci.o  obj-$(CONFIG_KVM_ARM_VGIC) += vgic.o  obj-$(CONFIG_KVM_ARM_TIMER) += arch_timer.o diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 5a936988eb2..a0dfc2a53f9 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -30,11 +30,9 @@  #define CREATE_TRACE_POINTS  #include "trace.h" -#include <asm/unified.h>  #include <asm/uaccess.h>  #include <asm/ptrace.h>  #include <asm/mman.h> -#include <asm/cputype.h>  #include <asm/tlbflush.h>  #include <asm/cacheflush.h>  #include <asm/virt.h> @@ -44,14 +42,13 @@  #include <asm/kvm_emulate.h>  #include <asm/kvm_coproc.h>  #include <asm/kvm_psci.h> -#include <asm/opcodes.h>  #ifdef REQUIRES_VIRT  __asm__(".arch_extension	virt");  #endif  static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); -static struct vfp_hard_struct __percpu *kvm_host_vfp_state; +static kvm_kernel_vfp_t __percpu *kvm_host_vfp_state;  static unsigned long hyp_default_vectors;  /* Per-CPU variable containing the currently running vcpu. */ @@ -201,6 +198,7 @@ int kvm_dev_ioctl_check_extension(long ext)  		break;  	case KVM_CAP_ARM_SET_DEVICE_ADDR:  		r = 1; +		break;  	case KVM_CAP_NR_VCPUS:  		r = num_online_cpus();  		break; @@ -303,22 +301,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)  	return 0;  } -int __attribute_const__ kvm_target_cpu(void) -{ -	unsigned long implementor = read_cpuid_implementor(); -	unsigned long part_number = read_cpuid_part_number(); - -	if (implementor != ARM_CPU_IMP_ARM) -		return -EINVAL; - -	switch (part_number) { -	case ARM_CPU_PART_CORTEX_A15: -		return KVM_ARM_TARGET_CORTEX_A15; -	default: -		return -EINVAL; -	} -} -  int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)  {  	int ret; @@ -481,163 +463,6 @@ static void update_vttbr(struct kvm *kvm)  	spin_unlock(&kvm_vmid_lock);  } -static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ -	/* SVC called from Hyp mode should never get here */ -	kvm_debug("SVC called from Hyp mode shouldn't go here\n"); -	BUG(); -	return -EINVAL; /* Squash warning */ -} - -static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ -	trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), -		      vcpu->arch.hsr & HSR_HVC_IMM_MASK); - -	if (kvm_psci_call(vcpu)) -		return 1; - -	kvm_inject_undefined(vcpu); -	return 1; -} - -static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ -	if (kvm_psci_call(vcpu)) -		return 1; - -	kvm_inject_undefined(vcpu); -	return 1; -} - -static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ -	/* The hypervisor should never cause aborts */ -	kvm_err("Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)\n", -		vcpu->arch.hxfar, vcpu->arch.hsr); -	return -EFAULT; -} - -static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ -	/* This is either an error in the ws. code or an external abort */ -	kvm_err("Data Abort taken from Hyp mode at %#08x (HSR: %#08x)\n", -		vcpu->arch.hxfar, vcpu->arch.hsr); -	return -EFAULT; -} - -typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); -static exit_handle_fn arm_exit_handlers[] = { -	[HSR_EC_WFI]		= kvm_handle_wfi, -	[HSR_EC_CP15_32]	= kvm_handle_cp15_32, -	[HSR_EC_CP15_64]	= kvm_handle_cp15_64, -	[HSR_EC_CP14_MR]	= kvm_handle_cp14_access, -	[HSR_EC_CP14_LS]	= kvm_handle_cp14_load_store, -	[HSR_EC_CP14_64]	= kvm_handle_cp14_access, -	[HSR_EC_CP_0_13]	= kvm_handle_cp_0_13_access, -	[HSR_EC_CP10_ID]	= kvm_handle_cp10_id, -	[HSR_EC_SVC_HYP]	= handle_svc_hyp, -	[HSR_EC_HVC]		= handle_hvc, -	[HSR_EC_SMC]		= handle_smc, -	[HSR_EC_IABT]		= kvm_handle_guest_abort, -	[HSR_EC_IABT_HYP]	= handle_pabt_hyp, -	[HSR_EC_DABT]		= kvm_handle_guest_abort, -	[HSR_EC_DABT_HYP]	= handle_dabt_hyp, -}; - -/* - * A conditional instruction is allowed to trap, even though it - * wouldn't be executed.  So let's re-implement the hardware, in - * software! - */ -static bool kvm_condition_valid(struct kvm_vcpu *vcpu) -{ -	unsigned long cpsr, cond, insn; - -	/* -	 * Exception Code 0 can only happen if we set HCR.TGE to 1, to -	 * catch undefined instructions, and then we won't get past -	 * the arm_exit_handlers test anyway. -	 */ -	BUG_ON(((vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT) == 0); - -	/* Top two bits non-zero?  Unconditional. */ -	if (vcpu->arch.hsr >> 30) -		return true; - -	cpsr = *vcpu_cpsr(vcpu); - -	/* Is condition field valid? */ -	if ((vcpu->arch.hsr & HSR_CV) >> HSR_CV_SHIFT) -		cond = (vcpu->arch.hsr & HSR_COND) >> HSR_COND_SHIFT; -	else { -		/* This can happen in Thumb mode: examine IT state. */ -		unsigned long it; - -		it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3); - -		/* it == 0 => unconditional. */ -		if (it == 0) -			return true; - -		/* The cond for this insn works out as the top 4 bits. */ -		cond = (it >> 4); -	} - -	/* Shift makes it look like an ARM-mode instruction */ -	insn = cond << 28; -	return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL; -} - -/* - * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on - * proper exit to QEMU. - */ -static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, -		       int exception_index) -{ -	unsigned long hsr_ec; - -	switch (exception_index) { -	case ARM_EXCEPTION_IRQ: -		return 1; -	case ARM_EXCEPTION_UNDEFINED: -		kvm_err("Undefined exception in Hyp mode at: %#08x\n", -			vcpu->arch.hyp_pc); -		BUG(); -		panic("KVM: Hypervisor undefined exception!\n"); -	case ARM_EXCEPTION_DATA_ABORT: -	case ARM_EXCEPTION_PREF_ABORT: -	case ARM_EXCEPTION_HVC: -		hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT; - -		if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) -		    || !arm_exit_handlers[hsr_ec]) { -			kvm_err("Unkown exception class: %#08lx, " -				"hsr: %#08x\n", hsr_ec, -				(unsigned int)vcpu->arch.hsr); -			BUG(); -		} - -		/* -		 * See ARM ARM B1.14.1: "Hyp traps on instructions -		 * that fail their condition code check" -		 */ -		if (!kvm_condition_valid(vcpu)) { -			bool is_wide = vcpu->arch.hsr & HSR_IL; -			kvm_skip_instr(vcpu, is_wide); -			return 1; -		} - -		return arm_exit_handlers[hsr_ec](vcpu, run); -	default: -		kvm_pr_unimpl("Unsupported exception type: %d", -			      exception_index); -		run->exit_reason = KVM_EXIT_INTERNAL_ERROR; -		return 0; -	} -} -  static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)  {  	if (likely(vcpu->arch.has_run_once)) @@ -972,7 +797,6 @@ long kvm_arch_vm_ioctl(struct file *filp,  static void cpu_init_hyp_mode(void *vector)  {  	unsigned long long pgd_ptr; -	unsigned long pgd_low, pgd_high;  	unsigned long hyp_stack_ptr;  	unsigned long stack_page;  	unsigned long vector_ptr; @@ -981,20 +805,11 @@ static void cpu_init_hyp_mode(void *vector)  	__hyp_set_vectors((unsigned long)vector);  	pgd_ptr = (unsigned long long)kvm_mmu_get_httbr(); -	pgd_low = (pgd_ptr & ((1ULL << 32) - 1)); -	pgd_high = (pgd_ptr >> 32ULL);  	stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);  	hyp_stack_ptr = stack_page + PAGE_SIZE;  	vector_ptr = (unsigned long)__kvm_hyp_vector; -	/* -	 * Call initialization code, and switch to the full blown -	 * HYP code. The init code doesn't need to preserve these registers as -	 * r1-r3 and r12 are already callee save according to the AAPCS. -	 * Note that we slightly misuse the prototype by casing the pgd_low to -	 * a void *. -	 */ -	kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr); +	__cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);  }  /** @@ -1077,7 +892,7 @@ static int init_hyp_mode(void)  	/*  	 * Map the host VFP structures  	 */ -	kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct); +	kvm_host_vfp_state = alloc_percpu(kvm_kernel_vfp_t);  	if (!kvm_host_vfp_state) {  		err = -ENOMEM;  		kvm_err("Cannot allocate host VFP state\n"); @@ -1085,7 +900,7 @@ static int init_hyp_mode(void)  	}  	for_each_possible_cpu(cpu) { -		struct vfp_hard_struct *vfp; +		kvm_kernel_vfp_t *vfp;  		vfp = per_cpu_ptr(kvm_host_vfp_state, cpu);  		err = create_hyp_mappings(vfp, vfp + 1); diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 4ea9a982269..8eea97be1ed 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -76,14 +76,14 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,  			const struct coproc_params *p,  			const struct coproc_reg *r)  { -	u32 val; +	unsigned long val;  	int cpu; -	cpu = get_cpu(); -  	if (!p->is_write)  		return read_from_write_only(vcpu, p); +	cpu = get_cpu(); +  	cpumask_setall(&vcpu->arch.require_dcache_flush);  	cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); @@ -293,12 +293,12 @@ static int emulate_cp15(struct kvm_vcpu *vcpu,  		if (likely(r->access(vcpu, params, r))) {  			/* Skip instruction, since it was emulated */ -			kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1); +			kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));  			return 1;  		}  		/* If access function fails, it should complain. */  	} else { -		kvm_err("Unsupported guest CP15 access at: %08x\n", +		kvm_err("Unsupported guest CP15 access at: %08lx\n",  			*vcpu_pc(vcpu));  		print_cp_instr(params);  	} @@ -315,14 +315,14 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)  {  	struct coproc_params params; -	params.CRm = (vcpu->arch.hsr >> 1) & 0xf; -	params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; -	params.is_write = ((vcpu->arch.hsr & 1) == 0); +	params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf; +	params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf; +	params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);  	params.is_64bit = true; -	params.Op1 = (vcpu->arch.hsr >> 16) & 0xf; +	params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;  	params.Op2 = 0; -	params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf; +	params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;  	params.CRn = 0;  	return emulate_cp15(vcpu, ¶ms); @@ -347,14 +347,14 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)  {  	struct coproc_params params; -	params.CRm = (vcpu->arch.hsr >> 1) & 0xf; -	params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; -	params.is_write = ((vcpu->arch.hsr & 1) == 0); +	params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf; +	params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf; +	params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);  	params.is_64bit = false; -	params.CRn = (vcpu->arch.hsr >> 10) & 0xf; -	params.Op1 = (vcpu->arch.hsr >> 14) & 0x7; -	params.Op2 = (vcpu->arch.hsr >> 17) & 0x7; +	params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; +	params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7; +	params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;  	params.Rt2 = 0;  	return emulate_cp15(vcpu, ¶ms); diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h index 992adfafa2f..b7301d3e479 100644 --- a/arch/arm/kvm/coproc.h +++ b/arch/arm/kvm/coproc.h @@ -84,7 +84,7 @@ static inline bool read_zero(struct kvm_vcpu *vcpu,  static inline bool write_to_read_only(struct kvm_vcpu *vcpu,  				      const struct coproc_params *params)  { -	kvm_debug("CP15 write to read-only register at: %08x\n", +	kvm_debug("CP15 write to read-only register at: %08lx\n",  		  *vcpu_pc(vcpu));  	print_cp_instr(params);  	return false; @@ -93,7 +93,7 @@ static inline bool write_to_read_only(struct kvm_vcpu *vcpu,  static inline bool read_from_write_only(struct kvm_vcpu *vcpu,  					const struct coproc_params *params)  { -	kvm_debug("CP15 read to write-only register at: %08x\n", +	kvm_debug("CP15 read to write-only register at: %08lx\n",  		  *vcpu_pc(vcpu));  	print_cp_instr(params);  	return false; diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c index d61450ac666..bdede9e7da5 100644 --- a/arch/arm/kvm/emulate.c +++ b/arch/arm/kvm/emulate.c @@ -20,6 +20,7 @@  #include <linux/kvm_host.h>  #include <asm/kvm_arm.h>  #include <asm/kvm_emulate.h> +#include <asm/opcodes.h>  #include <trace/events/kvm.h>  #include "trace.h" @@ -109,10 +110,10 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {   * Return a pointer to the register number valid in the current mode of   * the virtual CPU.   */ -u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) +unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)  { -	u32 *reg_array = (u32 *)&vcpu->arch.regs; -	u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK; +	unsigned long *reg_array = (unsigned long *)&vcpu->arch.regs; +	unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;  	switch (mode) {  	case USR_MODE...SVC_MODE: @@ -141,9 +142,9 @@ u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)  /*   * Return the SPSR for the current mode of the virtual CPU.   */ -u32 *vcpu_spsr(struct kvm_vcpu *vcpu) +unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu)  { -	u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK; +	unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;  	switch (mode) {  	case SVC_MODE:  		return &vcpu->arch.regs.KVM_ARM_SVC_spsr; @@ -160,20 +161,48 @@ u32 *vcpu_spsr(struct kvm_vcpu *vcpu)  	}  } -/** - * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest - * @vcpu:	the vcpu pointer - * @run:	the kvm_run structure pointer - * - * Simply sets the wait_for_interrupts flag on the vcpu structure, which will - * halt execution of world-switches and schedule other host processes until - * there is an incoming IRQ or FIQ to the VM. +/* + * A conditional instruction is allowed to trap, even though it + * wouldn't be executed.  So let's re-implement the hardware, in + * software!   */ -int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run) +bool kvm_condition_valid(struct kvm_vcpu *vcpu)  { -	trace_kvm_wfi(*vcpu_pc(vcpu)); -	kvm_vcpu_block(vcpu); -	return 1; +	unsigned long cpsr, cond, insn; + +	/* +	 * Exception Code 0 can only happen if we set HCR.TGE to 1, to +	 * catch undefined instructions, and then we won't get past +	 * the arm_exit_handlers test anyway. +	 */ +	BUG_ON(!kvm_vcpu_trap_get_class(vcpu)); + +	/* Top two bits non-zero?  Unconditional. */ +	if (kvm_vcpu_get_hsr(vcpu) >> 30) +		return true; + +	cpsr = *vcpu_cpsr(vcpu); + +	/* Is condition field valid? */ +	if ((kvm_vcpu_get_hsr(vcpu) & HSR_CV) >> HSR_CV_SHIFT) +		cond = (kvm_vcpu_get_hsr(vcpu) & HSR_COND) >> HSR_COND_SHIFT; +	else { +		/* This can happen in Thumb mode: examine IT state. */ +		unsigned long it; + +		it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3); + +		/* it == 0 => unconditional. */ +		if (it == 0) +			return true; + +		/* The cond for this insn works out as the top 4 bits. */ +		cond = (it >> 4); +	} + +	/* Shift makes it look like an ARM-mode instruction */ +	insn = cond << 28; +	return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL;  }  /** @@ -257,9 +286,9 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)   */  void kvm_inject_undefined(struct kvm_vcpu *vcpu)  { -	u32 new_lr_value; -	u32 new_spsr_value; -	u32 cpsr = *vcpu_cpsr(vcpu); +	unsigned long new_lr_value; +	unsigned long new_spsr_value; +	unsigned long cpsr = *vcpu_cpsr(vcpu);  	u32 sctlr = vcpu->arch.cp15[c1_SCTLR];  	bool is_thumb = (cpsr & PSR_T_BIT);  	u32 vect_offset = 4; @@ -291,9 +320,9 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)   */  static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)  { -	u32 new_lr_value; -	u32 new_spsr_value; -	u32 cpsr = *vcpu_cpsr(vcpu); +	unsigned long new_lr_value; +	unsigned long new_spsr_value; +	unsigned long cpsr = *vcpu_cpsr(vcpu);  	u32 sctlr = vcpu->arch.cp15[c1_SCTLR];  	bool is_thumb = (cpsr & PSR_T_BIT);  	u32 vect_offset; diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index 2339d9609d3..152d0361218 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c @@ -22,6 +22,7 @@  #include <linux/module.h>  #include <linux/vmalloc.h>  #include <linux/fs.h> +#include <asm/cputype.h>  #include <asm/uaccess.h>  #include <asm/kvm.h>  #include <asm/kvm_asm.h> @@ -180,6 +181,22 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,  	return -EINVAL;  } +int __attribute_const__ kvm_target_cpu(void) +{ +	unsigned long implementor = read_cpuid_implementor(); +	unsigned long part_number = read_cpuid_part_number(); + +	if (implementor != ARM_CPU_IMP_ARM) +		return -EINVAL; + +	switch (part_number) { +	case ARM_CPU_PART_CORTEX_A15: +		return KVM_ARM_TARGET_CORTEX_A15; +	default: +		return -EINVAL; +	} +} +  int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,  			const struct kvm_vcpu_init *init)  { diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c new file mode 100644 index 00000000000..26ad17310a1 --- /dev/null +++ b/arch/arm/kvm/handle_exit.c @@ -0,0 +1,164 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#include <linux/kvm.h> +#include <linux/kvm_host.h> +#include <asm/kvm_emulate.h> +#include <asm/kvm_coproc.h> +#include <asm/kvm_mmu.h> +#include <asm/kvm_psci.h> +#include <trace/events/kvm.h> + +#include "trace.h" + +#include "trace.h" + +typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); + +static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ +	/* SVC called from Hyp mode should never get here */ +	kvm_debug("SVC called from Hyp mode shouldn't go here\n"); +	BUG(); +	return -EINVAL; /* Squash warning */ +} + +static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ +	trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), +		      kvm_vcpu_hvc_get_imm(vcpu)); + +	if (kvm_psci_call(vcpu)) +		return 1; + +	kvm_inject_undefined(vcpu); +	return 1; +} + +static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ +	if (kvm_psci_call(vcpu)) +		return 1; + +	kvm_inject_undefined(vcpu); +	return 1; +} + +static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ +	/* The hypervisor should never cause aborts */ +	kvm_err("Prefetch Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n", +		kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu)); +	return -EFAULT; +} + +static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ +	/* This is either an error in the ws. code or an external abort */ +	kvm_err("Data Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n", +		kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu)); +	return -EFAULT; +} + +/** + * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest + * @vcpu:	the vcpu pointer + * @run:	the kvm_run structure pointer + * + * Simply sets the wait_for_interrupts flag on the vcpu structure, which will + * halt execution of world-switches and schedule other host processes until + * there is an incoming IRQ or FIQ to the VM. + */ +static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ +	trace_kvm_wfi(*vcpu_pc(vcpu)); +	kvm_vcpu_block(vcpu); +	return 1; +} + +static exit_handle_fn arm_exit_handlers[] = { +	[HSR_EC_WFI]		= kvm_handle_wfi, +	[HSR_EC_CP15_32]	= kvm_handle_cp15_32, +	[HSR_EC_CP15_64]	= kvm_handle_cp15_64, +	[HSR_EC_CP14_MR]	= kvm_handle_cp14_access, +	[HSR_EC_CP14_LS]	= kvm_handle_cp14_load_store, +	[HSR_EC_CP14_64]	= kvm_handle_cp14_access, +	[HSR_EC_CP_0_13]	= kvm_handle_cp_0_13_access, +	[HSR_EC_CP10_ID]	= kvm_handle_cp10_id, +	[HSR_EC_SVC_HYP]	= handle_svc_hyp, +	[HSR_EC_HVC]		= handle_hvc, +	[HSR_EC_SMC]		= handle_smc, +	[HSR_EC_IABT]		= kvm_handle_guest_abort, +	[HSR_EC_IABT_HYP]	= handle_pabt_hyp, +	[HSR_EC_DABT]		= kvm_handle_guest_abort, +	[HSR_EC_DABT_HYP]	= handle_dabt_hyp, +}; + +static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) +{ +	u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); + +	if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) || +	    !arm_exit_handlers[hsr_ec]) { +		kvm_err("Unkown exception class: hsr: %#08x\n", +			(unsigned int)kvm_vcpu_get_hsr(vcpu)); +		BUG(); +	} + +	return arm_exit_handlers[hsr_ec]; +} + +/* + * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on + * proper exit to userspace. + */ +int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, +		       int exception_index) +{ +	exit_handle_fn exit_handler; + +	switch (exception_index) { +	case ARM_EXCEPTION_IRQ: +		return 1; +	case ARM_EXCEPTION_UNDEFINED: +		kvm_err("Undefined exception in Hyp mode at: %#08lx\n", +			kvm_vcpu_get_hyp_pc(vcpu)); +		BUG(); +		panic("KVM: Hypervisor undefined exception!\n"); +	case ARM_EXCEPTION_DATA_ABORT: +	case ARM_EXCEPTION_PREF_ABORT: +	case ARM_EXCEPTION_HVC: +		/* +		 * See ARM ARM B1.14.1: "Hyp traps on instructions +		 * that fail their condition code check" +		 */ +		if (!kvm_condition_valid(vcpu)) { +			kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); +			return 1; +		} + +		exit_handler = kvm_get_exit_handler(vcpu); + +		return exit_handler(vcpu, run); +	default: +		kvm_pr_unimpl("Unsupported exception type: %d", +			      exception_index); +		run->exit_reason = KVM_EXIT_INTERNAL_ERROR; +		return 0; +	} +} diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index 8ca87ab0919..f7793df62f5 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S @@ -35,15 +35,18 @@ __kvm_hyp_code_start:  /********************************************************************   * Flush per-VMID TLBs   * - * void __kvm_tlb_flush_vmid(struct kvm *kvm); + * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);   *   * We rely on the hardware to broadcast the TLB invalidation to all CPUs   * inside the inner-shareable domain (which is the case for all v7   * implementations).  If we come across a non-IS SMP implementation, we'll   * have to use an IPI based mechanism. Until then, we stick to the simple   * hardware assisted version. + * + * As v7 does not support flushing per IPA, just nuke the whole TLB + * instead, ignoring the ipa value.   */ -ENTRY(__kvm_tlb_flush_vmid) +ENTRY(__kvm_tlb_flush_vmid_ipa)  	push	{r2, r3}  	add	r0, r0, #KVM_VTTBR @@ -60,7 +63,7 @@ ENTRY(__kvm_tlb_flush_vmid)  	pop	{r2, r3}  	bx	lr -ENDPROC(__kvm_tlb_flush_vmid) +ENDPROC(__kvm_tlb_flush_vmid_ipa)  /********************************************************************   * Flush TLBs and instruction caches of all CPUs inside the inner-shareable @@ -235,9 +238,9 @@ ENTRY(kvm_call_hyp)   * instruction is issued since all traps are disabled when running the host   * kernel as per the Hyp-mode initialization at boot time.   * - * HVC instructions cause a trap to the vector page + offset 0x18 (see hyp_hvc + * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc   * below) when the HVC instruction is called from SVC mode (i.e. a guest or the - * host kernel) and they cause a trap to the vector page + offset 0xc when HVC + * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC   * instructions are called from within Hyp-mode.   *   * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index 98a870ff1a5..72a12f2171b 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c @@ -33,16 +33,16 @@   */  int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)  { -	__u32 *dest; +	unsigned long *dest;  	unsigned int len;  	int mask;  	if (!run->mmio.is_write) {  		dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt); -		memset(dest, 0, sizeof(int)); +		*dest = 0;  		len = run->mmio.len; -		if (len > 4) +		if (len > sizeof(unsigned long))  			return -EINVAL;  		memcpy(dest, run->mmio.data, len); @@ -50,7 +50,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)  		trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,  				*((u64 *)run->mmio.data)); -		if (vcpu->arch.mmio_decode.sign_extend && len < 4) { +		if (vcpu->arch.mmio_decode.sign_extend && +		    len < sizeof(unsigned long)) {  			mask = 1U << ((len * 8) - 1);  			*dest = (*dest ^ mask) - mask;  		} @@ -65,40 +66,29 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,  	unsigned long rt, len;  	bool is_write, sign_extend; -	if ((vcpu->arch.hsr >> 8) & 1) { +	if (kvm_vcpu_dabt_isextabt(vcpu)) {  		/* cache operation on I/O addr, tell guest unsupported */ -		kvm_inject_dabt(vcpu, vcpu->arch.hxfar); +		kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));  		return 1;  	} -	if ((vcpu->arch.hsr >> 7) & 1) { +	if (kvm_vcpu_dabt_iss1tw(vcpu)) {  		/* page table accesses IO mem: tell guest to fix its TTBR */ -		kvm_inject_dabt(vcpu, vcpu->arch.hxfar); +		kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));  		return 1;  	} -	switch ((vcpu->arch.hsr >> 22) & 0x3) { -	case 0: -		len = 1; -		break; -	case 1: -		len = 2; -		break; -	case 2: -		len = 4; -		break; -	default: -		kvm_err("Hardware is weird: SAS 0b11 is reserved\n"); -		return -EFAULT; -	} +	len = kvm_vcpu_dabt_get_as(vcpu); +	if (unlikely(len < 0)) +		return len; -	is_write = vcpu->arch.hsr & HSR_WNR; -	sign_extend = vcpu->arch.hsr & HSR_SSE; -	rt = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT; +	is_write = kvm_vcpu_dabt_iswrite(vcpu); +	sign_extend = kvm_vcpu_dabt_issext(vcpu); +	rt = kvm_vcpu_dabt_get_rd(vcpu);  	if (kvm_vcpu_reg_is_pc(vcpu, rt)) {  		/* IO memory trying to read/write pc */ -		kvm_inject_pabt(vcpu, vcpu->arch.hxfar); +		kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));  		return 1;  	} @@ -112,7 +102,7 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,  	 * The MMIO instruction is emulated and should not be re-executed  	 * in the guest.  	 */ -	kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1); +	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));  	return 0;  } @@ -130,7 +120,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,  	 * space do its magic.  	 */ -	if (vcpu->arch.hsr & HSR_ISV) { +	if (kvm_vcpu_dabt_isvalid(vcpu)) {  		ret = decode_hsr(vcpu, fault_ipa, &mmio);  		if (ret)  			return ret; diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 99e07c7dd74..2f12e405640 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -20,7 +20,6 @@  #include <linux/kvm_host.h>  #include <linux/io.h>  #include <trace/events/kvm.h> -#include <asm/idmap.h>  #include <asm/pgalloc.h>  #include <asm/cacheflush.h>  #include <asm/kvm_arm.h> @@ -28,8 +27,6 @@  #include <asm/kvm_mmio.h>  #include <asm/kvm_asm.h>  #include <asm/kvm_emulate.h> -#include <asm/mach/map.h> -#include <trace/events/kvm.h>  #include "trace.h" @@ -37,19 +34,9 @@ extern char  __hyp_idmap_text_start[], __hyp_idmap_text_end[];  static DEFINE_MUTEX(kvm_hyp_pgd_mutex); -static void kvm_tlb_flush_vmid(struct kvm *kvm) +static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)  { -	kvm_call_hyp(__kvm_tlb_flush_vmid, kvm); -} - -static void kvm_set_pte(pte_t *pte, pte_t new_pte) -{ -	pte_val(*pte) = new_pte; -	/* -	 * flush_pmd_entry just takes a void pointer and cleans the necessary -	 * cache entries, so we can reuse the function for ptes. -	 */ -	flush_pmd_entry(pte); +	kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);  }  static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, @@ -98,33 +85,42 @@ static void free_ptes(pmd_t *pmd, unsigned long addr)  	}  } +static void free_hyp_pgd_entry(unsigned long addr) +{ +	pgd_t *pgd; +	pud_t *pud; +	pmd_t *pmd; +	unsigned long hyp_addr = KERN_TO_HYP(addr); + +	pgd = hyp_pgd + pgd_index(hyp_addr); +	pud = pud_offset(pgd, hyp_addr); + +	if (pud_none(*pud)) +		return; +	BUG_ON(pud_bad(*pud)); + +	pmd = pmd_offset(pud, hyp_addr); +	free_ptes(pmd, addr); +	pmd_free(NULL, pmd); +	pud_clear(pud); +} +  /**   * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables   *   * Assumes this is a page table used strictly in Hyp-mode and therefore contains - * only mappings in the kernel memory area, which is above PAGE_OFFSET. + * either mappings in the kernel memory area (above PAGE_OFFSET), or + * device mappings in the vmalloc range (from VMALLOC_START to VMALLOC_END).   */  void free_hyp_pmds(void)  { -	pgd_t *pgd; -	pud_t *pud; -	pmd_t *pmd;  	unsigned long addr;  	mutex_lock(&kvm_hyp_pgd_mutex); -	for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) { -		pgd = hyp_pgd + pgd_index(addr); -		pud = pud_offset(pgd, addr); - -		if (pud_none(*pud)) -			continue; -		BUG_ON(pud_bad(*pud)); - -		pmd = pmd_offset(pud, addr); -		free_ptes(pmd, addr); -		pmd_free(NULL, pmd); -		pud_clear(pud); -	} +	for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) +		free_hyp_pgd_entry(addr); +	for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) +		free_hyp_pgd_entry(addr);  	mutex_unlock(&kvm_hyp_pgd_mutex);  } @@ -136,7 +132,9 @@ static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,  	struct page *page;  	for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { -		pte = pte_offset_kernel(pmd, addr); +		unsigned long hyp_addr = KERN_TO_HYP(addr); + +		pte = pte_offset_kernel(pmd, hyp_addr);  		BUG_ON(!virt_addr_valid(addr));  		page = virt_to_page(addr);  		kvm_set_pte(pte, mk_pte(page, PAGE_HYP)); @@ -151,7 +149,9 @@ static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start,  	unsigned long addr;  	for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { -		pte = pte_offset_kernel(pmd, addr); +		unsigned long hyp_addr = KERN_TO_HYP(addr); + +		pte = pte_offset_kernel(pmd, hyp_addr);  		BUG_ON(pfn_valid(*pfn_base));  		kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE));  		(*pfn_base)++; @@ -166,12 +166,13 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,  	unsigned long addr, next;  	for (addr = start; addr < end; addr = next) { -		pmd = pmd_offset(pud, addr); +		unsigned long hyp_addr = KERN_TO_HYP(addr); +		pmd = pmd_offset(pud, hyp_addr);  		BUG_ON(pmd_sect(*pmd));  		if (pmd_none(*pmd)) { -			pte = pte_alloc_one_kernel(NULL, addr); +			pte = pte_alloc_one_kernel(NULL, hyp_addr);  			if (!pte) {  				kvm_err("Cannot allocate Hyp pte\n");  				return -ENOMEM; @@ -206,17 +207,23 @@ static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)  	unsigned long addr, next;  	int err = 0; -	BUG_ON(start > end); -	if (start < PAGE_OFFSET) +	if (start >= end) +		return -EINVAL; +	/* Check for a valid kernel memory mapping */ +	if (!pfn_base && (!virt_addr_valid(from) || !virt_addr_valid(to - 1))) +		return -EINVAL; +	/* Check for a valid kernel IO mapping */ +	if (pfn_base && (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1)))  		return -EINVAL;  	mutex_lock(&kvm_hyp_pgd_mutex);  	for (addr = start; addr < end; addr = next) { -		pgd = hyp_pgd + pgd_index(addr); -		pud = pud_offset(pgd, addr); +		unsigned long hyp_addr = KERN_TO_HYP(addr); +		pgd = hyp_pgd + pgd_index(hyp_addr); +		pud = pud_offset(pgd, hyp_addr);  		if (pud_none_or_clear_bad(pud)) { -			pmd = pmd_alloc_one(NULL, addr); +			pmd = pmd_alloc_one(NULL, hyp_addr);  			if (!pmd) {  				kvm_err("Cannot allocate Hyp pmd\n");  				err = -ENOMEM; @@ -236,12 +243,13 @@ out:  }  /** - * create_hyp_mappings - map a kernel virtual address range in Hyp mode + * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode   * @from:	The virtual kernel start address of the range   * @to:		The virtual kernel end address of the range (exclusive)   * - * The same virtual address as the kernel virtual address is also used in - * Hyp-mode mapping to the same underlying physical pages. + * The same virtual address as the kernel virtual address is also used + * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying + * physical pages.   *   * Note: Wrapping around zero in the "to" address is not supported.   */ @@ -251,10 +259,13 @@ int create_hyp_mappings(void *from, void *to)  }  /** - * create_hyp_io_mappings - map a physical IO range in Hyp mode - * @from:	The virtual HYP start address of the range - * @to:		The virtual HYP end address of the range (exclusive) + * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode + * @from:	The kernel start VA of the range + * @to:		The kernel end VA of the range (exclusive)   * @addr:	The physical start address which gets mapped + * + * The resulting HYP VA is the same as the kernel VA, modulo + * HYP_PAGE_OFFSET.   */  int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr)  { @@ -290,7 +301,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)  	VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));  	memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t)); -	clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); +	kvm_clean_pgd(pgd);  	kvm->arch.pgd = pgd;  	return 0; @@ -422,22 +433,22 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,  			return 0; /* ignore calls from kvm_set_spte_hva */  		pmd = mmu_memory_cache_alloc(cache);  		pud_populate(NULL, pud, pmd); -		pmd += pmd_index(addr);  		get_page(virt_to_page(pud)); -	} else -		pmd = pmd_offset(pud, addr); +	} + +	pmd = pmd_offset(pud, addr);  	/* Create 2nd stage page table mapping - Level 2 */  	if (pmd_none(*pmd)) {  		if (!cache)  			return 0; /* ignore calls from kvm_set_spte_hva */  		pte = mmu_memory_cache_alloc(cache); -		clean_pte_table(pte); +		kvm_clean_pte(pte);  		pmd_populate_kernel(NULL, pmd, pte); -		pte += pte_index(addr);  		get_page(virt_to_page(pmd)); -	} else -		pte = pte_offset_kernel(pmd, addr); +	} + +	pte = pte_offset_kernel(pmd, addr);  	if (iomap && pte_present(*pte))  		return -EFAULT; @@ -446,7 +457,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,  	old_pte = *pte;  	kvm_set_pte(pte, *new_pte);  	if (pte_present(old_pte)) -		kvm_tlb_flush_vmid(kvm); +		kvm_tlb_flush_vmid_ipa(kvm, addr);  	else  		get_page(virt_to_page(pte)); @@ -473,7 +484,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,  	pfn = __phys_to_pfn(pa);  	for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { -		pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE | L_PTE_S2_RDWR); +		pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE); +		kvm_set_s2pte_writable(&pte);  		ret = mmu_topup_memory_cache(&cache, 2, 2);  		if (ret) @@ -492,29 +504,6 @@ out:  	return ret;  } -static void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) -{ -	/* -	 * If we are going to insert an instruction page and the icache is -	 * either VIPT or PIPT, there is a potential problem where the host -	 * (or another VM) may have used the same page as this guest, and we -	 * read incorrect data from the icache.  If we're using a PIPT cache, -	 * we can invalidate just that page, but if we are using a VIPT cache -	 * we need to invalidate the entire icache - damn shame - as written -	 * in the ARM ARM (DDI 0406C.b - Page B3-1393). -	 * -	 * VIVT caches are tagged using both the ASID and the VMID and doesn't -	 * need any kind of flushing (DDI 0406C.b - Page B3-1392). -	 */ -	if (icache_is_pipt()) { -		unsigned long hva = gfn_to_hva(kvm, gfn); -		__cpuc_coherent_user_range(hva, hva + PAGE_SIZE); -	} else if (!icache_is_vivt_asid_tagged()) { -		/* any kind of VIPT cache */ -		__flush_icache_all(); -	} -} -  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,  			  gfn_t gfn, struct kvm_memory_slot *memslot,  			  unsigned long fault_status) @@ -526,7 +515,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,  	unsigned long mmu_seq;  	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; -	write_fault = kvm_is_write_fault(vcpu->arch.hsr); +	write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));  	if (fault_status == FSC_PERM && !write_fault) {  		kvm_err("Unexpected L2 read permission error\n");  		return -EFAULT; @@ -560,7 +549,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,  	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))  		goto out_unlock;  	if (writable) { -		pte_val(new_pte) |= L_PTE_S2_RDWR; +		kvm_set_s2pte_writable(&new_pte);  		kvm_set_pfn_dirty(pfn);  	}  	stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false); @@ -585,7 +574,6 @@ out_unlock:   */  int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)  { -	unsigned long hsr_ec;  	unsigned long fault_status;  	phys_addr_t fault_ipa;  	struct kvm_memory_slot *memslot; @@ -593,18 +581,17 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)  	gfn_t gfn;  	int ret, idx; -	hsr_ec = vcpu->arch.hsr >> HSR_EC_SHIFT; -	is_iabt = (hsr_ec == HSR_EC_IABT); -	fault_ipa = ((phys_addr_t)vcpu->arch.hpfar & HPFAR_MASK) << 8; +	is_iabt = kvm_vcpu_trap_is_iabt(vcpu); +	fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); -	trace_kvm_guest_fault(*vcpu_pc(vcpu), vcpu->arch.hsr, -			      vcpu->arch.hxfar, fault_ipa); +	trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu), +			      kvm_vcpu_get_hfar(vcpu), fault_ipa);  	/* Check the stage-2 fault is trans. fault or write fault */ -	fault_status = (vcpu->arch.hsr & HSR_FSC_TYPE); +	fault_status = kvm_vcpu_trap_get_fault(vcpu);  	if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { -		kvm_err("Unsupported fault status: EC=%#lx DFCS=%#lx\n", -			hsr_ec, fault_status); +		kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n", +			kvm_vcpu_trap_get_class(vcpu), fault_status);  		return -EFAULT;  	} @@ -614,7 +601,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)  	if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {  		if (is_iabt) {  			/* Prefetch Abort on I/O address */ -			kvm_inject_pabt(vcpu, vcpu->arch.hxfar); +			kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));  			ret = 1;  			goto out_unlock;  		} @@ -626,8 +613,13 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)  			goto out_unlock;  		} -		/* Adjust page offset */ -		fault_ipa |= vcpu->arch.hxfar & ~PAGE_MASK; +		/* +		 * The IPA is reported as [MAX:12], so we need to +		 * complement it with the bottom 12 bits from the +		 * faulting VA. This is always 12 bits, irrespective +		 * of the page size. +		 */ +		fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);  		ret = io_mem_abort(vcpu, run, fault_ipa);  		goto out_unlock;  	} @@ -682,7 +674,7 @@ static void handle_hva_to_gpa(struct kvm *kvm,  static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)  {  	unmap_stage2_range(kvm, gpa, PAGE_SIZE); -	kvm_tlb_flush_vmid(kvm); +	kvm_tlb_flush_vmid_ipa(kvm, gpa);  }  int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) @@ -776,7 +768,7 @@ void kvm_clear_hyp_idmap(void)  		pmd = pmd_offset(pud, addr);  		pud_clear(pud); -		clean_pmd_entry(pmd); +		kvm_clean_pmd_entry(pmd);  		pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK));  	} while (pgd++, addr = next, addr < end);  } diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c index c9a17316e9f..17c5ac7d10e 100644 --- a/arch/arm/kvm/vgic.c +++ b/arch/arm/kvm/vgic.c @@ -883,8 +883,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)  			  lr, irq, vgic_cpu->vgic_lr[lr]);  		BUG_ON(!test_bit(lr, vgic_cpu->lr_used));  		vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; - -		goto out; +		return true;  	}  	/* Try to use another LR for this interrupt */ @@ -898,7 +897,6 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)  	vgic_cpu->vgic_irq_lr_map[irq] = lr;  	set_bit(lr, vgic_cpu->lr_used); -out:  	if (!vgic_irq_is_edge(vcpu, irq))  		vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; @@ -1018,21 +1016,6 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)  	kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); -	/* -	 * We do not need to take the distributor lock here, since the only -	 * action we perform is clearing the irq_active_bit for an EOIed -	 * level interrupt.  There is a potential race with -	 * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we -	 * check if the interrupt is already active. Two possibilities: -	 * -	 * - The queuing is occurring on the same vcpu: cannot happen, -	 *   as we're already in the context of this vcpu, and -	 *   executing the handler -	 * - The interrupt has been migrated to another vcpu, and we -	 *   ignore this interrupt for this run. Big deal. It is still -	 *   pending though, and will get considered when this vcpu -	 *   exits. -	 */  	if (vgic_cpu->vgic_misr & GICH_MISR_EOI) {  		/*  		 * Some level interrupts have been EOIed. Clear their @@ -1054,6 +1037,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)  			} else {  				vgic_cpu_irq_clear(vcpu, irq);  			} + +			/* +			 * Despite being EOIed, the LR may not have +			 * been marked as empty. +			 */ +			set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr); +			vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;  		}  	} @@ -1064,9 +1054,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)  }  /* - * Sync back the VGIC state after a guest run. We do not really touch - * the distributor here (the irq_pending_on_cpu bit is safe to set), - * so there is no need for taking its lock. + * Sync back the VGIC state after a guest run. The distributor lock is + * needed so we don't get preempted in the middle of the state processing.   */  static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)  { @@ -1112,10 +1101,14 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)  void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)  { +	struct vgic_dist *dist = &vcpu->kvm->arch.vgic; +  	if (!irqchip_in_kernel(vcpu->kvm))  		return; +	spin_lock(&dist->lock);  	__kvm_vgic_sync_hwstate(vcpu); +	spin_unlock(&dist->lock);  }  int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) @@ -1484,7 +1477,7 @@ int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)  	if (addr & ~KVM_PHYS_MASK)  		return -E2BIG; -	if (addr & ~PAGE_MASK) +	if (addr & (SZ_4K - 1))  		return -EINVAL;  	mutex_lock(&kvm->lock); diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c index 6b93f6a1a3c..64dbfa57204 100644 --- a/arch/arm/lib/delay.c +++ b/arch/arm/lib/delay.c @@ -58,7 +58,7 @@ static void __timer_delay(unsigned long cycles)  static void __timer_const_udelay(unsigned long xloops)  {  	unsigned long long loops = xloops; -	loops *= loops_per_jiffy; +	loops *= arm_delay_ops.ticks_per_jiffy;  	__timer_delay(loops >> UDELAY_SHIFT);  } @@ -73,11 +73,13 @@ void __init register_current_timer_delay(const struct delay_timer *timer)  		pr_info("Switching to timer-based delay loop\n");  		delay_timer			= timer;  		lpj_fine			= timer->freq / HZ; -		loops_per_jiffy			= lpj_fine; + +		/* cpufreq may scale loops_per_jiffy, so keep a private copy */ +		arm_delay_ops.ticks_per_jiffy	= lpj_fine;  		arm_delay_ops.delay		= __timer_delay;  		arm_delay_ops.const_udelay	= __timer_const_udelay;  		arm_delay_ops.udelay		= __timer_udelay; -		arm_delay_ops.const_clock	= true; +  		delay_calibrated		= true;  	} else {  		pr_info("Ignoring duplicate/late registration of read_current_timer delay\n"); diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c index e698f26cc0c..52e4bb5cf12 100644 --- a/arch/arm/mach-cns3xxx/core.c +++ b/arch/arm/mach-cns3xxx/core.c @@ -22,19 +22,9 @@  static struct map_desc cns3xxx_io_desc[] __initdata = {  	{ -		.virtual	= CNS3XXX_TC11MP_TWD_BASE_VIRT, -		.pfn		= __phys_to_pfn(CNS3XXX_TC11MP_TWD_BASE), -		.length		= SZ_4K, -		.type		= MT_DEVICE, -	}, { -		.virtual	= CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT, -		.pfn		= __phys_to_pfn(CNS3XXX_TC11MP_GIC_CPU_BASE), -		.length		= SZ_4K, -		.type		= MT_DEVICE, -	}, { -		.virtual	= CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT, -		.pfn		= __phys_to_pfn(CNS3XXX_TC11MP_GIC_DIST_BASE), -		.length		= SZ_4K, +		.virtual	= CNS3XXX_TC11MP_SCU_BASE_VIRT, +		.pfn		= __phys_to_pfn(CNS3XXX_TC11MP_SCU_BASE), +		.length		= SZ_8K,  		.type		= MT_DEVICE,  	}, {  		.virtual	= CNS3XXX_TIMER1_2_3_BASE_VIRT, diff --git a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h index 191c8e57f28..b1021aafa48 100644 --- a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h +++ b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h @@ -94,10 +94,10 @@  #define RTC_INTR_STS_OFFSET			0x34  #define CNS3XXX_MISC_BASE			0x76000000	/* Misc Control */ -#define CNS3XXX_MISC_BASE_VIRT			0xFFF07000	/* Misc Control */ +#define CNS3XXX_MISC_BASE_VIRT			0xFB000000	/* Misc Control */  #define CNS3XXX_PM_BASE				0x77000000	/* Power Management Control */ -#define CNS3XXX_PM_BASE_VIRT			0xFFF08000 +#define CNS3XXX_PM_BASE_VIRT			0xFB001000  #define PM_CLK_GATE_OFFSET			0x00  #define PM_SOFT_RST_OFFSET			0x04 @@ -109,7 +109,7 @@  #define PM_PLL_HM_PD_OFFSET			0x1C  #define CNS3XXX_UART0_BASE			0x78000000	/* UART 0 */ -#define CNS3XXX_UART0_BASE_VIRT			0xFFF09000 +#define CNS3XXX_UART0_BASE_VIRT			0xFB002000  #define CNS3XXX_UART1_BASE			0x78400000	/* UART 1 */  #define CNS3XXX_UART1_BASE_VIRT			0xFFF0A000 @@ -130,7 +130,7 @@  #define CNS3XXX_I2S_BASE_VIRT			0xFFF10000  #define CNS3XXX_TIMER1_2_3_BASE			0x7C800000	/* Timer */ -#define CNS3XXX_TIMER1_2_3_BASE_VIRT		0xFFF10800 +#define CNS3XXX_TIMER1_2_3_BASE_VIRT		0xFB003000  #define TIMER1_COUNTER_OFFSET			0x00  #define TIMER1_AUTO_RELOAD_OFFSET		0x04 @@ -227,16 +227,16 @@   * Testchip peripheral and fpga gic regions   */  #define CNS3XXX_TC11MP_SCU_BASE			0x90000000	/* IRQ, Test chip */ -#define CNS3XXX_TC11MP_SCU_BASE_VIRT		0xFF000000 +#define CNS3XXX_TC11MP_SCU_BASE_VIRT		0xFB004000  #define CNS3XXX_TC11MP_GIC_CPU_BASE		0x90000100	/* Test chip interrupt controller CPU interface */ -#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT	0xFF000100 +#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT	(CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x100)  #define CNS3XXX_TC11MP_TWD_BASE			0x90000600 -#define CNS3XXX_TC11MP_TWD_BASE_VIRT		0xFF000600 +#define CNS3XXX_TC11MP_TWD_BASE_VIRT		(CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x600)  #define CNS3XXX_TC11MP_GIC_DIST_BASE		0x90001000	/* Test chip interrupt controller distributor */ -#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT	0xFF001000 +#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT	(CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x1000)  #define CNS3XXX_TC11MP_L220_BASE		0x92002000	/* L220 registers */  #define CNS3XXX_TC11MP_L220_BASE_VIRT		0xFF002000 diff --git a/arch/arm/mach-ep93xx/include/mach/uncompress.h b/arch/arm/mach-ep93xx/include/mach/uncompress.h index d2afb4dd82a..b5cc77d2380 100644 --- a/arch/arm/mach-ep93xx/include/mach/uncompress.h +++ b/arch/arm/mach-ep93xx/include/mach/uncompress.h @@ -47,9 +47,13 @@ static void __raw_writel(unsigned int value, unsigned int ptr)  static inline void putc(int c)  { -	/* Transmit fifo not full?  */ -	while (__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF) -		; +	int i; + +	for (i = 0; i < 10000; i++) { +		/* Transmit fifo not full? */ +		if (!(__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF)) +			break; +	}  	__raw_writeb(c, PHYS_UART_DATA);  } diff --git a/arch/arm/mach-exynos/hotplug.c b/arch/arm/mach-exynos/hotplug.c index c3f825b2794..af90cfa2f82 100644 --- a/arch/arm/mach-exynos/hotplug.c +++ b/arch/arm/mach-exynos/hotplug.c @@ -28,7 +28,6 @@ static inline void cpu_enter_lowpower_a9(void)  {  	unsigned int v; -	flush_cache_all();  	asm volatile(  	"	mcr	p15, 0, %1, c7, c5, 0\n"  	"	mcr	p15, 0, %1, c7, c10, 4\n" diff --git a/arch/arm/mach-highbank/hotplug.c b/arch/arm/mach-highbank/hotplug.c index f30c5284339..a019e4e86e5 100644 --- a/arch/arm/mach-highbank/hotplug.c +++ b/arch/arm/mach-highbank/hotplug.c @@ -14,7 +14,6 @@   * this program.  If not, see <http://www.gnu.org/licenses/>.   */  #include <linux/kernel.h> -  #include <asm/cacheflush.h>  #include "core.h" @@ -28,13 +27,11 @@ extern void secondary_startup(void);   */  void __ref highbank_cpu_die(unsigned int cpu)  { -	flush_cache_all(); -  	highbank_set_cpu_jump(cpu, phys_to_virt(0)); -	highbank_set_core_pwr(); -	cpu_do_idle(); +	flush_cache_louis(); +	highbank_set_core_pwr(); -	/* We should never return from idle */ -	panic("highbank: cpu %d unexpectedly exit from shutdown\n", cpu); +	while (1) +		cpu_do_idle();  } diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c index e13a8fa5e62..2193c834f55 100644 --- a/arch/arm/mach-imx/clk-imx35.c +++ b/arch/arm/mach-imx/clk-imx35.c @@ -257,6 +257,7 @@ int __init mx35_clocks_init(void)  	clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");  	clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0");  	clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0"); +	clk_register_clkdev(clk[admux_gate], "audmux", NULL);  	clk_prepare_enable(clk[spba_gate]);  	clk_prepare_enable(clk[gpio1_gate]); @@ -265,6 +266,7 @@ int __init mx35_clocks_init(void)  	clk_prepare_enable(clk[iim_gate]);  	clk_prepare_enable(clk[emi_gate]);  	clk_prepare_enable(clk[max_gate]); +	clk_prepare_enable(clk[iomuxc_gate]);  	/*  	 * SCC is needed to boot via mmc after a watchdog reset. The clock code diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c index 2f9ff93a4e6..d38e54f5b6d 100644 --- a/arch/arm/mach-imx/clk-imx6q.c +++ b/arch/arm/mach-imx/clk-imx6q.c @@ -115,7 +115,7 @@ static const char *gpu2d_core_sels[]	= { "axi", "pll3_usb_otg", "pll2_pfd0_352m"  static const char *gpu3d_core_sels[]	= { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };  static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", };  static const char *ipu_sels[]		= { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", }; -static const char *ldb_di_sels[]	= { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_pfd1_540m", }; +static const char *ldb_di_sels[]	= { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };  static const char *ipu_di_pre_sels[]	= { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };  static const char *ipu1_di0_sels[]	= { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };  static const char *ipu1_di1_sels[]	= { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; @@ -443,7 +443,6 @@ int __init mx6q_clocks_init(void)  	clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0");  	clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0"); -	clk_register_clkdev(clk[twd], NULL, "smp_twd");  	clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL);  	clk_register_clkdev(clk[ahb], "ahb", NULL);  	clk_register_clkdev(clk[cko1], "cko1", NULL); diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h index 5a800bfcec5..5bf4a97ab24 100644 --- a/arch/arm/mach-imx/common.h +++ b/arch/arm/mach-imx/common.h @@ -110,6 +110,8 @@ void tzic_handle_irq(struct pt_regs *);  extern void imx_enable_cpu(int cpu, bool enable);  extern void imx_set_cpu_jump(int cpu, void *jump_addr); +extern u32 imx_get_cpu_arg(int cpu); +extern void imx_set_cpu_arg(int cpu, u32 arg);  extern void v7_cpu_resume(void);  extern u32 *pl310_get_save_ptr(void);  #ifdef CONFIG_SMP diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c index 7bc5fe15dda..5e91112dcbe 100644 --- a/arch/arm/mach-imx/hotplug.c +++ b/arch/arm/mach-imx/hotplug.c @@ -11,7 +11,6 @@   */  #include <linux/errno.h> -#include <asm/cacheflush.h>  #include <asm/cp15.h>  #include "common.h" @@ -20,7 +19,6 @@ static inline void cpu_enter_lowpower(void)  {  	unsigned int v; -	flush_cache_all();  	asm volatile(  		"mcr	p15, 0, %1, c7, c5, 0\n"  	"	mcr	p15, 0, %1, c7, c10, 4\n" @@ -46,11 +44,23 @@ static inline void cpu_enter_lowpower(void)  void imx_cpu_die(unsigned int cpu)  {  	cpu_enter_lowpower(); +	/* +	 * We use the cpu jumping argument register to sync with +	 * imx_cpu_kill() which is running on cpu0 and waiting for +	 * the register being cleared to kill the cpu. +	 */ +	imx_set_cpu_arg(cpu, ~0);  	cpu_do_idle();  }  int imx_cpu_kill(unsigned int cpu)  { +	unsigned long timeout = jiffies + msecs_to_jiffies(50); + +	while (imx_get_cpu_arg(cpu) == 0) +		if (time_after(jiffies, timeout)) +			return 0;  	imx_enable_cpu(cpu, false); +	imx_set_cpu_arg(cpu, 0);  	return 1;  } diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c index e15f1555c59..09a742f8c7a 100644 --- a/arch/arm/mach-imx/src.c +++ b/arch/arm/mach-imx/src.c @@ -43,6 +43,18 @@ void imx_set_cpu_jump(int cpu, void *jump_addr)  		       src_base + SRC_GPR1 + cpu * 8);  } +u32 imx_get_cpu_arg(int cpu) +{ +	cpu = cpu_logical_map(cpu); +	return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4); +} + +void imx_set_cpu_arg(int cpu, u32 arg) +{ +	cpu = cpu_logical_map(cpu); +	writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4); +} +  void imx_src_prepare_restart(void)  {  	u32 val; diff --git a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c index f655b2637b0..e5f70415905 100644 --- a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c +++ b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c @@ -20,10 +20,15 @@ static struct mv643xx_eth_platform_data iomega_ix2_200_ge00_data = {  	.duplex         = DUPLEX_FULL,  }; +static struct mv643xx_eth_platform_data iomega_ix2_200_ge01_data = { +        .phy_addr       = MV643XX_ETH_PHY_ADDR(11), +}; +  void __init iomega_ix2_200_init(void)  {  	/*  	 * Basic setup. Needs to be called early.  	 */ -	kirkwood_ge01_init(&iomega_ix2_200_ge00_data); +	kirkwood_ge00_init(&iomega_ix2_200_ge00_data); +	kirkwood_ge01_init(&iomega_ix2_200_ge01_data);  } diff --git a/arch/arm/mach-kirkwood/guruplug-setup.c b/arch/arm/mach-kirkwood/guruplug-setup.c index 1c6e736cbbf..08dd739aa70 100644 --- a/arch/arm/mach-kirkwood/guruplug-setup.c +++ b/arch/arm/mach-kirkwood/guruplug-setup.c @@ -53,6 +53,8 @@ static struct mv_sata_platform_data guruplug_sata_data = {  static struct mvsdio_platform_data guruplug_mvsdio_data = {  	/* unfortunately the CD signal has not been connected */ +	.gpio_card_detect = -1, +	.gpio_write_protect = -1,  };  static struct gpio_led guruplug_led_pins[] = { diff --git a/arch/arm/mach-kirkwood/openrd-setup.c b/arch/arm/mach-kirkwood/openrd-setup.c index 8ddd69fdc93..6a6eb548307 100644 --- a/arch/arm/mach-kirkwood/openrd-setup.c +++ b/arch/arm/mach-kirkwood/openrd-setup.c @@ -55,6 +55,7 @@ static struct mv_sata_platform_data openrd_sata_data = {  static struct mvsdio_platform_data openrd_mvsdio_data = {  	.gpio_card_detect = 29,	/* MPP29 used as SD card detect */ +	.gpio_write_protect = -1,  };  static unsigned int openrd_mpp_config[] __initdata = { diff --git a/arch/arm/mach-kirkwood/rd88f6281-setup.c b/arch/arm/mach-kirkwood/rd88f6281-setup.c index c7d93b48926..d24223166e0 100644 --- a/arch/arm/mach-kirkwood/rd88f6281-setup.c +++ b/arch/arm/mach-kirkwood/rd88f6281-setup.c @@ -69,6 +69,7 @@ static struct mv_sata_platform_data rd88f6281_sata_data = {  static struct mvsdio_platform_data rd88f6281_mvsdio_data = {  	.gpio_card_detect = 28, +	.gpio_write_protect = -1,  };  static unsigned int rd88f6281_mpp_config[] __initdata = { diff --git a/arch/arm/mach-msm/hotplug.c b/arch/arm/mach-msm/hotplug.c index 750446feb44..326a87261f9 100644 --- a/arch/arm/mach-msm/hotplug.c +++ b/arch/arm/mach-msm/hotplug.c @@ -10,16 +10,12 @@  #include <linux/errno.h>  #include <linux/smp.h> -#include <asm/cacheflush.h>  #include <asm/smp_plat.h>  #include "common.h"  static inline void cpu_enter_lowpower(void)  { -	/* Just flush the cache. Changing the coherency is not yet -	 * available on msm. */ -	flush_cache_all();  }  static inline void cpu_leave_lowpower(void) diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 2969027f02f..f9fd77e8f1f 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c @@ -62,7 +62,10 @@ static int msm_timer_set_next_event(unsigned long cycles,  {  	u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); -	writel_relaxed(0, event_base + TIMER_CLEAR); +	ctrl &= ~TIMER_ENABLE_EN; +	writel_relaxed(ctrl, event_base + TIMER_ENABLE); + +	writel_relaxed(ctrl, event_base + TIMER_CLEAR);  	writel_relaxed(cycles, event_base + TIMER_MATCH_VAL);  	writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE);  	return 0; diff --git a/arch/arm/mach-mvebu/irq-armada-370-xp.c b/arch/arm/mach-mvebu/irq-armada-370-xp.c index 274ff58271d..d5970f5a1e8 100644 --- a/arch/arm/mach-mvebu/irq-armada-370-xp.c +++ b/arch/arm/mach-mvebu/irq-armada-370-xp.c @@ -44,6 +44,8 @@  #define ARMADA_370_XP_MAX_PER_CPU_IRQS		(28) +#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ	(5) +  #define ACTIVE_DOORBELLS			(8)  static DEFINE_RAW_SPINLOCK(irq_controller_lock); @@ -59,36 +61,26 @@ static struct irq_domain *armada_370_xp_mpic_domain;   */  static void armada_370_xp_irq_mask(struct irq_data *d)  { -#ifdef CONFIG_SMP  	irq_hw_number_t hwirq = irqd_to_hwirq(d); -	if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS) +	if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)  		writel(hwirq, main_int_base +  				ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);  	else  		writel(hwirq, per_cpu_int_base +  				ARMADA_370_XP_INT_SET_MASK_OFFS); -#else -	writel(irqd_to_hwirq(d), -	       per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS); -#endif  }  static void armada_370_xp_irq_unmask(struct irq_data *d)  { -#ifdef CONFIG_SMP  	irq_hw_number_t hwirq = irqd_to_hwirq(d); -	if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS) +	if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)  		writel(hwirq, main_int_base +  				ARMADA_370_XP_INT_SET_ENABLE_OFFS);  	else  		writel(hwirq, per_cpu_int_base +  				ARMADA_370_XP_INT_CLEAR_MASK_OFFS); -#else -	writel(irqd_to_hwirq(d), -	       per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); -#endif  }  #ifdef CONFIG_SMP @@ -144,10 +136,14 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,  				      unsigned int virq, irq_hw_number_t hw)  {  	armada_370_xp_irq_mask(irq_get_irq_data(virq)); -	writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); +	if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) +		writel(hw, per_cpu_int_base + +			ARMADA_370_XP_INT_CLEAR_MASK_OFFS); +	else +		writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);  	irq_set_status_flags(virq, IRQ_LEVEL); -	if (hw < ARMADA_370_XP_MAX_PER_CPU_IRQS) { +	if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {  		irq_set_percpu_devid(virq);  		irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,  					handle_percpu_devid_irq); diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c index cb7c6ae2e3f..6c4f766365a 100644 --- a/arch/arm/mach-omap1/clock_data.c +++ b/arch/arm/mach-omap1/clock_data.c @@ -543,15 +543,6 @@ static struct clk usb_dc_ck = {  	/* Direct from ULPD, no parent */  	.rate		= 48000000,  	.enable_reg	= OMAP1_IO_ADDRESS(SOFT_REQ_REG), -	.enable_bit	= USB_REQ_EN_SHIFT, -}; - -static struct clk usb_dc_ck7xx = { -	.name		= "usb_dc_ck", -	.ops		= &clkops_generic, -	/* Direct from ULPD, no parent */ -	.rate		= 48000000, -	.enable_reg	= OMAP1_IO_ADDRESS(SOFT_REQ_REG),  	.enable_bit	= SOFT_USB_OTG_DPLL_REQ_SHIFT,  }; @@ -727,8 +718,7 @@ static struct omap_clk omap_clks[] = {  	CLK(NULL,	"usb_clko",	&usb_clko,	CK_16XX | CK_1510 | CK_310),  	CLK(NULL,	"usb_hhc_ck",	&usb_hhc_ck1510, CK_1510 | CK_310),  	CLK(NULL,	"usb_hhc_ck",	&usb_hhc_ck16xx, CK_16XX), -	CLK(NULL,	"usb_dc_ck",	&usb_dc_ck,	CK_16XX), -	CLK(NULL,	"usb_dc_ck",	&usb_dc_ck7xx,	CK_7XX), +	CLK(NULL,	"usb_dc_ck",	&usb_dc_ck,	CK_16XX | CK_7XX),  	CLK(NULL,	"mclk",		&mclk_1510,	CK_1510 | CK_310),  	CLK(NULL,	"mclk",		&mclk_16xx,	CK_16XX),  	CLK(NULL,	"bclk",		&bclk_1510,	CK_1510 | CK_310), diff --git a/arch/arm/mach-omap2/cclock44xx_data.c b/arch/arm/mach-omap2/cclock44xx_data.c index 3d58f335f17..0c6834ae1fc 100644 --- a/arch/arm/mach-omap2/cclock44xx_data.c +++ b/arch/arm/mach-omap2/cclock44xx_data.c @@ -52,6 +52,13 @@   */  #define OMAP4_DPLL_ABE_DEFFREQ				98304000 +/* + * OMAP4 USB DPLL default frequency. In OMAP4430 TRM version V, section + * "3.6.3.9.5 DPLL_USB Preferred Settings" shows that the preferred + * locked frequency for the USB DPLL is 960MHz. + */ +#define OMAP4_DPLL_USB_DEFFREQ				960000000 +  /* Root clocks */  DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0); @@ -1011,6 +1018,10 @@ DEFINE_CLK_OMAP_MUX(hsmmc2_fclk, "l3_init_clkdm", hsmmc1_fclk_sel,  		    OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK,  		    hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops); +DEFINE_CLK_GATE(ocp2scp_usb_phy_phy_48m, "func_48m_fclk", &func_48m_fclk, 0x0, +		OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL, +		OMAP4430_OPTFCLKEN_PHY_48M_SHIFT, 0x0, NULL); +  DEFINE_CLK_GATE(sha2md5_fck, "l3_div_ck", &l3_div_ck, 0x0,  		OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL,  		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); @@ -1538,6 +1549,7 @@ static struct omap_clk omap44xx_clks[] = {  	CLK(NULL,	"per_mcbsp4_gfclk",			&per_mcbsp4_gfclk,	CK_443X),  	CLK(NULL,	"hsmmc1_fclk",			&hsmmc1_fclk,	CK_443X),  	CLK(NULL,	"hsmmc2_fclk",			&hsmmc2_fclk,	CK_443X), +	CLK(NULL,	"ocp2scp_usb_phy_phy_48m",	&ocp2scp_usb_phy_phy_48m,	CK_443X),  	CLK(NULL,	"sha2md5_fck",			&sha2md5_fck,	CK_443X),  	CLK(NULL,	"slimbus1_fclk_1",		&slimbus1_fclk_1,	CK_443X),  	CLK(NULL,	"slimbus1_fclk_0",		&slimbus1_fclk_0,	CK_443X), @@ -1705,5 +1717,13 @@ int __init omap4xxx_clk_init(void)  	if (rc)  		pr_err("%s: failed to configure ABE DPLL!\n", __func__); +	/* +	 * Lock USB DPLL on OMAP4 devices so that the L3INIT power +	 * domain can transition to retention state when not in use. +	 */ +	rc = clk_set_rate(&dpll_usb_ck, OMAP4_DPLL_USB_DEFFREQ); +	if (rc) +		pr_err("%s: failed to configure USB DPLL!\n", __func__); +  	return 0;  } diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index 40f4a03d728..d6ba13e1c54 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -293,5 +293,8 @@ extern void omap_reserve(void);  struct omap_hwmod;  extern int omap_dss_reset(struct omap_hwmod *); +/* SoC specific clock initializer */ +extern int (*omap_clk_init)(void); +  #endif /* __ASSEMBLER__ */  #endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */ diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index 8a68f1ec66b..577298ed5a4 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c @@ -300,7 +300,7 @@ void __init omap3xxx_check_revision(void)  	 * If the processor type is Cortex-A8 and the revision is 0x0  	 * it means its Cortex r0p0 which is 3430 ES1.0.  	 */ -	cpuid = read_cpuid(CPUID_ID); +	cpuid = read_cpuid_id();  	if ((((cpuid >> 4) & 0xfff) == 0xc08) && ((cpuid & 0xf) == 0x0)) {  		omap_revision = OMAP3430_REV_ES1_0;  		cpu_rev = "1.0"; @@ -460,7 +460,7 @@ void __init omap4xxx_check_revision(void)  	 * Use ARM register to detect the correct ES version  	 */  	if (!rev && (hawkeye != 0xb94e) && (hawkeye != 0xb975)) { -		idcode = read_cpuid(CPUID_ID); +		idcode = read_cpuid_id();  		rev = (idcode & 0xf) - 1;  	} diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index 2c3fdd65387..5c445ca1e27 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -55,6 +55,12 @@  #include "prm44xx.h"  /* + * omap_clk_init: points to a function that does the SoC-specific + * clock initializations + */ +int (*omap_clk_init)(void); + +/*   * The machine specific code may provide the extra mapping besides the   * default mapping provided here.   */ @@ -397,7 +403,7 @@ void __init omap2420_init_early(void)  	omap242x_clockdomains_init();  	omap2420_hwmod_init();  	omap_hwmod_init_postsetup(); -	omap2420_clk_init(); +	omap_clk_init = omap2420_clk_init;  }  void __init omap2420_init_late(void) @@ -427,7 +433,7 @@ void __init omap2430_init_early(void)  	omap243x_clockdomains_init();  	omap2430_hwmod_init();  	omap_hwmod_init_postsetup(); -	omap2430_clk_init(); +	omap_clk_init = omap2430_clk_init;  }  void __init omap2430_init_late(void) @@ -462,7 +468,7 @@ void __init omap3_init_early(void)  	omap3xxx_clockdomains_init();  	omap3xxx_hwmod_init();  	omap_hwmod_init_postsetup(); -	omap3xxx_clk_init(); +	omap_clk_init = omap3xxx_clk_init;  }  void __init omap3430_init_early(void) @@ -500,7 +506,7 @@ void __init ti81xx_init_early(void)  	omap3xxx_clockdomains_init();  	omap3xxx_hwmod_init();  	omap_hwmod_init_postsetup(); -	omap3xxx_clk_init(); +	omap_clk_init = omap3xxx_clk_init;  }  void __init omap3_init_late(void) @@ -568,7 +574,7 @@ void __init am33xx_init_early(void)  	am33xx_clockdomains_init();  	am33xx_hwmod_init();  	omap_hwmod_init_postsetup(); -	am33xx_clk_init(); +	omap_clk_init = am33xx_clk_init;  }  #endif @@ -593,7 +599,7 @@ void __init omap4430_init_early(void)  	omap44xx_clockdomains_init();  	omap44xx_hwmod_init();  	omap_hwmod_init_postsetup(); -	omap4xxx_clk_init(); +	omap_clk_init = omap4xxx_clk_init;  }  void __init omap4430_init_late(void) diff --git a/arch/arm/mach-omap2/omap-hotplug.c b/arch/arm/mach-omap2/omap-hotplug.c index e712d1725a8..ceb30a59bf2 100644 --- a/arch/arm/mach-omap2/omap-hotplug.c +++ b/arch/arm/mach-omap2/omap-hotplug.c @@ -35,9 +35,6 @@ void __ref omap4_cpu_die(unsigned int cpu)  	unsigned int boot_cpu = 0;  	void __iomem *base = omap_get_wakeupgen_base(); -	flush_cache_all(); -	dsb(); -  	/*  	 * we're ready for shutdown now, so do it  	 */ diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index d9727218dd0..7f5626d8fd3 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c @@ -209,7 +209,7 @@ static void __init omap4_smp_init_cpus(void)  	unsigned int i = 0, ncores = 1, cpu_id;  	/* Use ARM cpuid check here, as SoC detection will not work so early */ -	cpu_id = read_cpuid(CPUID_ID) & CPU_MASK; +	cpu_id = read_cpuid_id() & CPU_MASK;  	if (cpu_id == CPU_CORTEX_A9) {  		/*  		 * Currently we can't call ioremap here because diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index c2c798c08c2..a202a478510 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -1368,7 +1368,9 @@ static void _enable_sysc(struct omap_hwmod *oh)  	}  	if (sf & SYSC_HAS_MIDLEMODE) { -		if (oh->flags & HWMOD_SWSUP_MSTANDBY) { +		if (oh->flags & HWMOD_FORCE_MSTANDBY) { +			idlemode = HWMOD_IDLEMODE_FORCE; +		} else if (oh->flags & HWMOD_SWSUP_MSTANDBY) {  			idlemode = HWMOD_IDLEMODE_NO;  		} else {  			if (sf & SYSC_HAS_ENAWAKEUP) @@ -1440,7 +1442,8 @@ static void _idle_sysc(struct omap_hwmod *oh)  	}  	if (sf & SYSC_HAS_MIDLEMODE) { -		if (oh->flags & HWMOD_SWSUP_MSTANDBY) { +		if ((oh->flags & HWMOD_SWSUP_MSTANDBY) || +		    (oh->flags & HWMOD_FORCE_MSTANDBY)) {  			idlemode = HWMOD_IDLEMODE_FORCE;  		} else {  			if (sf & SYSC_HAS_ENAWAKEUP) diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index d43d9b608ed..d5dc935f606 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h @@ -427,8 +427,8 @@ struct omap_hwmod_omap4_prcm {   *   * HWMOD_SWSUP_SIDLE: omap_hwmod code should manually bring module in and out   *     of idle, rather than relying on module smart-idle - * HWMOD_SWSUP_MSTDBY: omap_hwmod code should manually bring module in and out - *     of standby, rather than relying on module smart-standby + * HWMOD_SWSUP_MSTANDBY: omap_hwmod code should manually bring module in and + *     out of standby, rather than relying on module smart-standby   * HWMOD_INIT_NO_RESET: don't reset this module at boot - important for   *     SDRAM controller, etc. XXX probably belongs outside the main hwmod file   *     XXX Should be HWMOD_SETUP_NO_RESET @@ -459,6 +459,10 @@ struct omap_hwmod_omap4_prcm {   *     correctly, or this is being abused to deal with some PM latency   *     issues -- but we're currently suffering from a shortage of   *     folks who are able to track these issues down properly. + * HWMOD_FORCE_MSTANDBY: Always keep MIDLEMODE bits cleared so that device + *     is kept in force-standby mode. Failing to do so causes PM problems + *     with musb on OMAP3630 at least. Note that musb has a dedicated register + *     to control MSTANDBY signal when MIDLEMODE is set to force-standby.   */  #define HWMOD_SWSUP_SIDLE			(1 << 0)  #define HWMOD_SWSUP_MSTANDBY			(1 << 1) @@ -471,6 +475,7 @@ struct omap_hwmod_omap4_prcm {  #define HWMOD_16BIT_REG				(1 << 8)  #define HWMOD_EXT_OPT_MAIN_CLK			(1 << 9)  #define HWMOD_BLOCK_WFI				(1 << 10) +#define HWMOD_FORCE_MSTANDBY			(1 << 11)  /*   * omap_hwmod._int_flags definitions diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index ac7e03ec952..5112d04e7b7 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -1707,9 +1707,14 @@ static struct omap_hwmod omap3xxx_usbhsotg_hwmod = {  	 * Erratum ID: i479  idle_req / idle_ack mechanism potentially  	 * broken when autoidle is enabled  	 * workaround is to disable the autoidle bit at module level. +	 * +	 * Enabling the device in any other MIDLEMODE setting but force-idle +	 * causes core_pwrdm not enter idle states at least on OMAP3630. +	 * Note that musb has OTG_FORCESTDBY register that controls MSTANDBY +	 * signal when MIDLEMODE is set to force-idle.  	 */  	.flags		= HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE -				| HWMOD_SWSUP_MSTANDBY, +				| HWMOD_FORCE_MSTANDBY,  };  /* usb_otg_hs */ diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 0e47d2e1687..9e0576569e0 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -2714,6 +2714,10 @@ static struct omap_ocp2scp_dev ocp2scp_dev_attr[] = {  	{ }  }; +static struct omap_hwmod_opt_clk ocp2scp_usb_phy_opt_clks[] = { +	{ .role = "48mhz", .clk = "ocp2scp_usb_phy_phy_48m" }, +}; +  /* ocp2scp_usb_phy */  static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {  	.name		= "ocp2scp_usb_phy", @@ -2728,6 +2732,8 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {  		},  	},  	.dev_attr	= ocp2scp_dev_attr, +	.opt_clks	= ocp2scp_usb_phy_opt_clks, +	.opt_clks_cnt	= ARRAY_SIZE(ocp2scp_usb_phy_opt_clks),  };  /* diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index 2bdd4cf17a8..f62b509ed08 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -547,6 +547,8 @@ static inline void __init realtime_counter_init(void)  			       clksrc_nr, clksrc_src)			\  void __init omap##name##_gptimer_timer_init(void)			\  {									\ +	if (omap_clk_init)						\ +		omap_clk_init();					\  	omap_dmtimer_init();						\  	omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop);	\  	omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src);	\ @@ -556,6 +558,8 @@ void __init omap##name##_gptimer_timer_init(void)			\  				clksrc_nr, clksrc_src)			\  void __init omap##name##_sync32k_timer_init(void)		\  {									\ +	if (omap_clk_init)						\ +		omap_clk_init();					\  	omap_dmtimer_init();						\  	omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop);	\  	/* Enable the use of clocksource="gp_timer" kernel parameter */	\ diff --git a/arch/arm/mach-prima2/hotplug.c b/arch/arm/mach-prima2/hotplug.c index f4b17cbabab..0ab2f8bae28 100644 --- a/arch/arm/mach-prima2/hotplug.c +++ b/arch/arm/mach-prima2/hotplug.c @@ -10,13 +10,10 @@  #include <linux/errno.h>  #include <linux/smp.h> -#include <asm/cacheflush.h>  #include <asm/smp_plat.h>  static inline void platform_do_lowpower(unsigned int cpu)  { -	flush_cache_all(); -  	/* we put the platform to just WFI */  	for (;;) {  		__asm__ __volatile__("dsb\n\t" "wfi\n\t" diff --git a/arch/arm/mach-realview/hotplug.c b/arch/arm/mach-realview/hotplug.c index 53818e5cd3a..ac22dd41b13 100644 --- a/arch/arm/mach-realview/hotplug.c +++ b/arch/arm/mach-realview/hotplug.c @@ -12,7 +12,6 @@  #include <linux/errno.h>  #include <linux/smp.h> -#include <asm/cacheflush.h>  #include <asm/cp15.h>  #include <asm/smp_plat.h> @@ -20,7 +19,6 @@ static inline void cpu_enter_lowpower(void)  {  	unsigned int v; -	flush_cache_all();  	asm volatile(  	"	mcr	p15, 0, %1, c7, c5, 0\n"  	"	mcr	p15, 0, %1, c7, c10, 4\n" diff --git a/arch/arm/mach-s3c24xx/include/mach/irqs.h b/arch/arm/mach-s3c24xx/include/mach/irqs.h index b7a9f4d469e..1e73f5fa865 100644 --- a/arch/arm/mach-s3c24xx/include/mach/irqs.h +++ b/arch/arm/mach-s3c24xx/include/mach/irqs.h @@ -188,10 +188,8 @@  #if defined(CONFIG_CPU_S3C2416)  #define NR_IRQS (IRQ_S3C2416_I2S1 + 1) -#elif defined(CONFIG_CPU_S3C2443) -#define NR_IRQS (IRQ_S3C2443_AC97+1)  #else -#define NR_IRQS (IRQ_S3C2440_AC97+1) +#define NR_IRQS (IRQ_S3C2443_AC97 + 1)  #endif  /* compatibility define. */ diff --git a/arch/arm/mach-s3c24xx/irq.c b/arch/arm/mach-s3c24xx/irq.c index cb9f5e011e7..d8ba9bee4c7 100644 --- a/arch/arm/mach-s3c24xx/irq.c +++ b/arch/arm/mach-s3c24xx/irq.c @@ -500,7 +500,7 @@ struct s3c_irq_intc *s3c24xx_init_intc(struct device_node *np,  		base = (void *)0xfd000000;  		intc->reg_mask = base + 0xa4; -		intc->reg_pending = base + 0x08; +		intc->reg_pending = base + 0xa8;  		irq_num = 20;  		irq_start = S3C2410_IRQ(32);  		irq_offset = 4; diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c index acb46a94ccd..2f1ef1bc805 100644 --- a/arch/arm/mach-shmobile/smp-sh73a0.c +++ b/arch/arm/mach-shmobile/smp-sh73a0.c @@ -119,14 +119,6 @@ static int sh73a0_cpu_kill(unsigned int cpu)  static void sh73a0_cpu_die(unsigned int cpu)  { -	/* -	 * The ARM MPcore does not issue a cache coherency request for the L1 -	 * cache when powering off single CPUs. We must take care of this and -	 * further caches. -	 */ -	dsb(); -	flush_cache_all(); -  	/* Set power off mode. This takes the CPU out of the MP cluster */  	scu_power_mode(scu_base_addr(), SCU_PM_POWEROFF); diff --git a/arch/arm/mach-spear13xx/hotplug.c b/arch/arm/mach-spear13xx/hotplug.c index a7d2dd11a4f..d97749c642c 100644 --- a/arch/arm/mach-spear13xx/hotplug.c +++ b/arch/arm/mach-spear13xx/hotplug.c @@ -13,7 +13,6 @@  #include <linux/kernel.h>  #include <linux/errno.h>  #include <linux/smp.h> -#include <asm/cacheflush.h>  #include <asm/cp15.h>  #include <asm/smp_plat.h> @@ -21,7 +20,6 @@ static inline void cpu_enter_lowpower(void)  {  	unsigned int v; -	flush_cache_all();  	asm volatile(  	"	mcr	p15, 0, %1, c7, c5, 0\n"  	"	dsb\n" diff --git a/arch/arm/mach-tegra/common.h b/arch/arm/mach-tegra/common.h index 32f8eb3fe34..5900cc44f78 100644 --- a/arch/arm/mach-tegra/common.h +++ b/arch/arm/mach-tegra/common.h @@ -2,4 +2,3 @@ extern struct smp_operations tegra_smp_ops;  extern int tegra_cpu_kill(unsigned int cpu);  extern void tegra_cpu_die(unsigned int cpu); -extern int tegra_cpu_disable(unsigned int cpu); diff --git a/arch/arm/mach-tegra/hotplug.c b/arch/arm/mach-tegra/hotplug.c index a599f6e36de..e8323bc9577 100644 --- a/arch/arm/mach-tegra/hotplug.c +++ b/arch/arm/mach-tegra/hotplug.c @@ -12,7 +12,6 @@  #include <linux/smp.h>  #include <linux/clk/tegra.h> -#include <asm/cacheflush.h>  #include <asm/smp_plat.h>  #include "sleep.h" @@ -47,15 +46,6 @@ void __ref tegra_cpu_die(unsigned int cpu)  	BUG();  } -int tegra_cpu_disable(unsigned int cpu) -{ -	/* -	 * we don't allow CPU 0 to be shutdown (it is still too special -	 * e.g. clock tick interrupts) -	 */ -	return cpu == 0 ? -EPERM : 0; -} -  #ifdef CONFIG_ARCH_TEGRA_2x_SOC  extern void tegra20_hotplug_shutdown(void);  void __init tegra20_hotplug_init(void) diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c index 2c6b3d55213..ec33ec86aad 100644 --- a/arch/arm/mach-tegra/platsmp.c +++ b/arch/arm/mach-tegra/platsmp.c @@ -192,6 +192,5 @@ struct smp_operations tegra_smp_ops __initdata = {  #ifdef CONFIG_HOTPLUG_CPU  	.cpu_kill		= tegra_cpu_kill,  	.cpu_die		= tegra_cpu_die, -	.cpu_disable		= tegra_cpu_disable,  #endif  }; diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c index 051b62c2710..7f2cb6c5e2c 100644 --- a/arch/arm/mach-ux500/board-mop500-sdi.c +++ b/arch/arm/mach-ux500/board-mop500-sdi.c @@ -81,7 +81,6 @@ static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = {  #endif  struct mmci_platform_data mop500_sdi0_data = { -	.ios_handler	= mop500_sdi0_ios_handler,  	.ocr_mask	= MMC_VDD_29_30,  	.f_max		= 50000000,  	.capabilities	= MMC_CAP_4_BIT_DATA | diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index b03457881c4..87d2d7b38ce 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c @@ -12,6 +12,7 @@  #include <linux/init.h>  #include <linux/interrupt.h>  #include <linux/platform_device.h> +#include <linux/clk.h>  #include <linux/io.h>  #include <linux/i2c.h>  #include <linux/platform_data/i2c-nomadik.h> @@ -439,6 +440,15 @@ static void mop500_prox_deactivate(struct device *dev)  	regulator_put(prox_regulator);  } +void mop500_snowball_ethernet_clock_enable(void) +{ +	struct clk *clk; + +	clk = clk_get_sys("fsmc", NULL); +	if (!IS_ERR(clk)) +		clk_prepare_enable(clk); +} +  static struct cryp_platform_data u8500_cryp1_platform_data = {  		.mem_to_engine = {  				.dir = STEDMA40_MEM_TO_PERIPH, @@ -683,6 +693,8 @@ static void __init snowball_init_machine(void)  	mop500_audio_init(parent);  	mop500_uart_init(parent); +	mop500_snowball_ethernet_clock_enable(); +  	/* This board has full regulator constraints */  	regulator_has_full_constraints();  } diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h index eaa605f5d90..d38951be70d 100644 --- a/arch/arm/mach-ux500/board-mop500.h +++ b/arch/arm/mach-ux500/board-mop500.h @@ -104,6 +104,7 @@ void __init mop500_pinmaps_init(void);  void __init snowball_pinmaps_init(void);  void __init hrefv60_pinmaps_init(void);  void mop500_audio_init(struct device *parent); +void mop500_snowball_ethernet_clock_enable(void);  int __init mop500_uib_init(void);  void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info, diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index 19235cf7bbe..f1a58184437 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c @@ -312,9 +312,10 @@ static void __init u8500_init_machine(void)  	/* Pinmaps must be in place before devices register */  	if (of_machine_is_compatible("st-ericsson,mop500"))  		mop500_pinmaps_init(); -	else if (of_machine_is_compatible("calaosystems,snowball-a9500")) +	else if (of_machine_is_compatible("calaosystems,snowball-a9500")) {  		snowball_pinmaps_init(); -	else if (of_machine_is_compatible("st-ericsson,hrefv60+")) +		mop500_snowball_ethernet_clock_enable(); +	} else if (of_machine_is_compatible("st-ericsson,hrefv60+"))  		hrefv60_pinmaps_init();  	else if (of_machine_is_compatible("st-ericsson,ccu9540")) {}  		/* TODO: Add pinmaps for ccu9540 board. */ diff --git a/arch/arm/mach-ux500/hotplug.c b/arch/arm/mach-ux500/hotplug.c index 2f6af259015..1c55a55dd89 100644 --- a/arch/arm/mach-ux500/hotplug.c +++ b/arch/arm/mach-ux500/hotplug.c @@ -12,7 +12,6 @@  #include <linux/errno.h>  #include <linux/smp.h> -#include <asm/cacheflush.h>  #include <asm/smp_plat.h>  #include <mach/setup.h> @@ -24,8 +23,6 @@   */  void __ref ux500_cpu_die(unsigned int cpu)  { -	flush_cache_all(); -  	/* directly enter low power state, skipping secure registers */  	for (;;) {  		__asm__ __volatile__("dsb\n\t" "wfi\n\t" diff --git a/arch/arm/mach-vexpress/hotplug.c b/arch/arm/mach-vexpress/hotplug.c index a141b98d84f..f0ce6b8f5e7 100644 --- a/arch/arm/mach-vexpress/hotplug.c +++ b/arch/arm/mach-vexpress/hotplug.c @@ -12,7 +12,6 @@  #include <linux/errno.h>  #include <linux/smp.h> -#include <asm/cacheflush.h>  #include <asm/smp_plat.h>  #include <asm/cp15.h> @@ -20,7 +19,6 @@ static inline void cpu_enter_lowpower(void)  {  	unsigned int v; -	flush_cache_all();  	asm volatile(  		"mcr	p15, 0, %1, c7, c5, 0\n"  	"	mcr	p15, 0, %1, c7, c10, 4\n" diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 025d1732873..35955b54944 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -43,7 +43,7 @@ config CPU_ARM740T  	depends on !MMU  	select CPU_32v4T  	select CPU_ABRT_LV4T -	select CPU_CACHE_V3	# although the core is v4t +	select CPU_CACHE_V4  	select CPU_CP15_MPU  	select CPU_PABRT_LEGACY  	help @@ -397,6 +397,13 @@ config CPU_V7  	select CPU_PABRT_V7  	select CPU_TLB_V7 if MMU +config CPU_THUMBONLY +	bool +	# There are no CPUs available with MMU that don't implement an ARM ISA: +	depends on !MMU +	help +	  Select this if your CPU doesn't support the 32 bit ARM instructions. +  # Figure out what processor architecture version we should be using.  # This defines the compiler instruction set which depends on the machine type.  config CPU_32v3 @@ -469,9 +476,6 @@ config CPU_PABRT_V7  	bool  # The cache model -config CPU_CACHE_V3 -	bool -  config CPU_CACHE_V4  	bool @@ -608,7 +612,7 @@ config ARCH_DMA_ADDR_T_64BIT  	bool  config ARM_THUMB -	bool "Support Thumb user binaries" +	bool "Support Thumb user binaries" if !CPU_THUMBONLY  	depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON  	default y  	help diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 4e333fa2756..9e51be96f63 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -33,7 +33,6 @@ obj-$(CONFIG_CPU_PABRT_LEGACY)	+= pabort-legacy.o  obj-$(CONFIG_CPU_PABRT_V6)	+= pabort-v6.o  obj-$(CONFIG_CPU_PABRT_V7)	+= pabort-v7.o -obj-$(CONFIG_CPU_CACHE_V3)	+= cache-v3.o  obj-$(CONFIG_CPU_CACHE_V4)	+= cache-v4.o  obj-$(CONFIG_CPU_CACHE_V4WT)	+= cache-v4wt.o  obj-$(CONFIG_CPU_CACHE_V4WB)	+= cache-v4wb.o diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index db26e2e543f..6f4585b8907 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -961,12 +961,14 @@ static int __init alignment_init(void)  		return -ENOMEM;  #endif +#ifdef CONFIG_CPU_CP15  	if (cpu_is_v6_unaligned()) {  		cr_alignment &= ~CR_A;  		cr_no_alignment &= ~CR_A;  		set_cr(cr_alignment);  		ai_usermode = safe_usermode(ai_usermode, false);  	} +#endif  	hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN,  			"alignment exception"); diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index dd3d59122cc..48bc3c0a87c 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c @@ -343,6 +343,7 @@ void __init feroceon_l2_init(int __l2_wt_override)  	outer_cache.inv_range = feroceon_l2_inv_range;  	outer_cache.clean_range = feroceon_l2_clean_range;  	outer_cache.flush_range = feroceon_l2_flush_range; +	outer_cache.inv_all = l2_inv_all;  	enable_l2(); diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c2f37390308..c465faca51b 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -299,7 +299,7 @@ static void l2x0_unlock(u32 cache_id)  	int lockregs;  	int i; -	switch (cache_id) { +	switch (cache_id & L2X0_CACHE_ID_PART_MASK) {  	case L2X0_CACHE_ID_PART_L310:  		lockregs = 8;  		break; @@ -333,15 +333,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)  	if (cache_id_part_number_from_dt)  		cache_id = cache_id_part_number_from_dt;  	else -		cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID) -			& L2X0_CACHE_ID_PART_MASK; +		cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);  	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);  	aux &= aux_mask;  	aux |= aux_val;  	/* Determine the number of ways */ -	switch (cache_id) { +	switch (cache_id & L2X0_CACHE_ID_PART_MASK) {  	case L2X0_CACHE_ID_PART_L310:  		if (aux & (1 << 16))  			ways = 16; @@ -725,7 +724,6 @@ static const struct l2x0_of_data pl310_data = {  		.flush_all   = l2x0_flush_all,  		.inv_all     = l2x0_inv_all,  		.disable     = l2x0_disable, -		.set_debug   = pl310_set_debug,  	},  }; @@ -814,9 +812,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)  		data->save();  	of_init = true; -	l2x0_init(l2x0_base, aux_val, aux_mask); -  	memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); +	l2x0_init(l2x0_base, aux_val, aux_mask);  	return 0;  } diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S deleted file mode 100644 index 8a3fadece8d..00000000000 --- a/arch/arm/mm/cache-v3.S +++ /dev/null @@ -1,137 +0,0 @@ -/* - *  linux/arch/arm/mm/cache-v3.S - * - *  Copyright (C) 1997-2002 Russell king - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <linux/linkage.h> -#include <linux/init.h> -#include <asm/page.h> -#include "proc-macros.S" - -/* - *	flush_icache_all() - * - *	Unconditionally clean and invalidate the entire icache. - */ -ENTRY(v3_flush_icache_all) -	mov	pc, lr -ENDPROC(v3_flush_icache_all) - -/* - *	flush_user_cache_all() - * - *	Invalidate all cache entries in a particular address - *	space. - * - *	- mm	- mm_struct describing address space - */ -ENTRY(v3_flush_user_cache_all) -	/* FALLTHROUGH */ -/* - *	flush_kern_cache_all() - * - *	Clean and invalidate the entire cache. - */ -ENTRY(v3_flush_kern_cache_all) -	/* FALLTHROUGH */ - -/* - *	flush_user_cache_range(start, end, flags) - * - *	Invalidate a range of cache entries in the specified - *	address space. - * - *	- start - start address (may not be aligned) - *	- end	- end address (exclusive, may not be aligned) - *	- flags	- vma_area_struct flags describing address space - */ -ENTRY(v3_flush_user_cache_range) -	mov	ip, #0 -	mcreq	p15, 0, ip, c7, c0, 0		@ flush ID cache -	mov	pc, lr - -/* - *	coherent_kern_range(start, end) - * - *	Ensure coherency between the Icache and the Dcache in the - *	region described by start.  If you have non-snooping - *	Harvard caches, you need to implement this function. - * - *	- start  - virtual start address - *	- end	 - virtual end address - */ -ENTRY(v3_coherent_kern_range) -	/* FALLTHROUGH */ - -/* - *	coherent_user_range(start, end) - * - *	Ensure coherency between the Icache and the Dcache in the - *	region described by start.  If you have non-snooping - *	Harvard caches, you need to implement this function. - * - *	- start  - virtual start address - *	- end	 - virtual end address - */ -ENTRY(v3_coherent_user_range) -	mov	r0, #0 -	mov	pc, lr - -/* - *	flush_kern_dcache_area(void *page, size_t size) - * - *	Ensure no D cache aliasing occurs, either with itself or - *	the I cache - * - *	- addr	- kernel address - *	- size	- region size - */ -ENTRY(v3_flush_kern_dcache_area) -	/* FALLTHROUGH */ - -/* - *	dma_flush_range(start, end) - * - *	Clean and invalidate the specified virtual address range. - * - *	- start  - virtual start address - *	- end	 - virtual end address - */ -ENTRY(v3_dma_flush_range) -	mov	r0, #0 -	mcr	p15, 0, r0, c7, c0, 0		@ flush ID cache -	mov	pc, lr - -/* - *	dma_unmap_area(start, size, dir) - *	- start	- kernel virtual start address - *	- size	- size of region - *	- dir	- DMA direction - */ -ENTRY(v3_dma_unmap_area) -	teq	r2, #DMA_TO_DEVICE -	bne	v3_dma_flush_range -	/* FALLTHROUGH */ - -/* - *	dma_map_area(start, size, dir) - *	- start	- kernel virtual start address - *	- size	- size of region - *	- dir	- DMA direction - */ -ENTRY(v3_dma_map_area) -	mov	pc, lr -ENDPROC(v3_dma_unmap_area) -ENDPROC(v3_dma_map_area) - -	.globl	v3_flush_kern_cache_louis -	.equ	v3_flush_kern_cache_louis, v3_flush_kern_cache_all - -	__INITDATA - -	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) -	define_cache_functions v3 diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index 43e5d77be67..a7ba68f59f0 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S @@ -58,7 +58,7 @@ ENTRY(v4_flush_kern_cache_all)  ENTRY(v4_flush_user_cache_range)  #ifdef CONFIG_CPU_CP15  	mov	ip, #0 -	mcreq	p15, 0, ip, c7, c7, 0		@ flush ID cache +	mcr	p15, 0, ip, c7, c7, 0		@ flush ID cache  	mov	pc, lr  #else  	/* FALLTHROUGH */ diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index a5a4b2bc42b..2ac37372ef5 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -48,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);  static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);  static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); -static DEFINE_PER_CPU(atomic64_t, active_asids); +DEFINE_PER_CPU(atomic64_t, active_asids);  static DEFINE_PER_CPU(u64, reserved_asids);  static cpumask_t tlb_flush_pending; @@ -215,6 +215,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)  	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {  		local_flush_bp_all();  		local_flush_tlb_all(); +		dummy_flush_tlb_a15_erratum();  	}  	atomic64_set(&per_cpu(active_asids, cpu), asid); diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index e9db6b4bf65..ef3e0f3aac9 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -823,16 +823,17 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,  		if (PageHighMem(page)) {  			if (len + offset > PAGE_SIZE)  				len = PAGE_SIZE - offset; -			vaddr = kmap_high_get(page); -			if (vaddr) { -				vaddr += offset; -				op(vaddr, len, dir); -				kunmap_high(page); -			} else if (cache_is_vipt()) { -				/* unmapped pages might still be cached */ + +			if (cache_is_vipt_nonaliasing()) {  				vaddr = kmap_atomic(page);  				op(vaddr + offset, len, dir);  				kunmap_atomic(vaddr); +			} else { +				vaddr = kmap_high_get(page); +				if (vaddr) { +					op(vaddr + offset, len, dir); +					kunmap_high(page); +				}  			}  		} else {  			vaddr = page_address(page) + offset; diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 1c8f7f56417..0d473cce501 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -170,15 +170,18 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)  	if (!PageHighMem(page)) {  		__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);  	} else { -		void *addr = kmap_high_get(page); -		if (addr) { -			__cpuc_flush_dcache_area(addr, PAGE_SIZE); -			kunmap_high(page); -		} else if (cache_is_vipt()) { -			/* unmapped pages might still be cached */ +		void *addr; + +		if (cache_is_vipt_nonaliasing()) {  			addr = kmap_atomic(page);  			__cpuc_flush_dcache_area(addr, PAGE_SIZE);  			kunmap_atomic(addr); +		} else { +			addr = kmap_high_get(page); +			if (addr) { +				__cpuc_flush_dcache_area(addr, PAGE_SIZE); +				kunmap_high(page); +			}  		}  	} diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e95a996ab78..e0d8565671a 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -34,6 +34,7 @@  #include <asm/mach/pci.h>  #include "mm.h" +#include "tcm.h"  /*   * empty_zero_page is a special page that is used for @@ -112,6 +113,7 @@ static struct cachepolicy cache_policies[] __initdata = {  	}  }; +#ifdef CONFIG_CPU_CP15  /*   * These are useful for identifying cache coherency   * problems by allowing the cache or the cache and @@ -210,6 +212,22 @@ void adjust_cr(unsigned long mask, unsigned long set)  }  #endif +#else /* ifdef CONFIG_CPU_CP15 */ + +static int __init early_cachepolicy(char *p) +{ +	pr_warning("cachepolicy kernel parameter not supported without cp15\n"); +} +early_param("cachepolicy", early_cachepolicy); + +static int __init noalign_setup(char *__unused) +{ +	pr_warning("noalign kernel parameter not supported without cp15\n"); +} +__setup("noalign", noalign_setup); + +#endif /* ifdef CONFIG_CPU_CP15 / else */ +  #define PROT_PTE_DEVICE		L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN  #define PROT_SECT_DEVICE	PMD_TYPE_SECT|PMD_SECT_AP_WRITE @@ -598,39 +616,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,  	} while (pte++, addr += PAGE_SIZE, addr != end);  } -static void __init alloc_init_section(pud_t *pud, unsigned long addr, -				      unsigned long end, phys_addr_t phys, -				      const struct mem_type *type) +static void __init map_init_section(pmd_t *pmd, unsigned long addr, +			unsigned long end, phys_addr_t phys, +			const struct mem_type *type)  { -	pmd_t *pmd = pmd_offset(pud, addr); - +#ifndef CONFIG_ARM_LPAE  	/* -	 * Try a section mapping - end, addr and phys must all be aligned -	 * to a section boundary.  Note that PMDs refer to the individual -	 * L1 entries, whereas PGDs refer to a group of L1 entries making -	 * up one logical pointer to an L2 table. +	 * In classic MMU format, puds and pmds are folded in to +	 * the pgds. pmd_offset gives the PGD entry. PGDs refer to a +	 * group of L1 entries making up one logical pointer to +	 * an L2 table (2MB), where as PMDs refer to the individual +	 * L1 entries (1MB). Hence increment to get the correct +	 * offset for odd 1MB sections. +	 * (See arch/arm/include/asm/pgtable-2level.h)  	 */ -	if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) { -		pmd_t *p = pmd; - -#ifndef CONFIG_ARM_LPAE -		if (addr & SECTION_SIZE) -			pmd++; +	if (addr & SECTION_SIZE) +		pmd++;  #endif +	do { +		*pmd = __pmd(phys | type->prot_sect); +		phys += SECTION_SIZE; +	} while (pmd++, addr += SECTION_SIZE, addr != end); -		do { -			*pmd = __pmd(phys | type->prot_sect); -			phys += SECTION_SIZE; -		} while (pmd++, addr += SECTION_SIZE, addr != end); +	flush_pmd_entry(pmd); +} -		flush_pmd_entry(p); -	} else { +static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, +				      unsigned long end, phys_addr_t phys, +				      const struct mem_type *type) +{ +	pmd_t *pmd = pmd_offset(pud, addr); +	unsigned long next; + +	do {  		/* -		 * No need to loop; pte's aren't interested in the -		 * individual L1 entries. +		 * With LPAE, we must loop over to map +		 * all the pmds for the given range.  		 */ -		alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); -	} +		next = pmd_addr_end(addr, end); + +		/* +		 * Try a section mapping - addr, next and phys must all be +		 * aligned to a section boundary. +		 */ +		if (type->prot_sect && +				((addr | next | phys) & ~SECTION_MASK) == 0) { +			map_init_section(pmd, addr, next, phys, type); +		} else { +			alloc_init_pte(pmd, addr, next, +						__phys_to_pfn(phys), type); +		} + +		phys += next - addr; + +	} while (pmd++, addr = next, addr != end);  }  static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, @@ -641,7 +680,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,  	do {  		next = pud_addr_end(addr, end); -		alloc_init_section(pud, addr, next, phys, type); +		alloc_init_pmd(pud, addr, next, phys, type);  		phys += next - addr;  	} while (pud++, addr = next, addr != end);  } @@ -1256,6 +1295,7 @@ void __init paging_init(struct machine_desc *mdesc)  	dma_contiguous_remap();  	devicemaps_init(mdesc);  	kmap_init(); +	tcm_init();  	top_pmd = pmd_off_k(0xffff0000); diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index dc5de5d53f2..fde2d2a794c 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S @@ -77,24 +77,27 @@ __arm740_setup:  	mcr	p15, 0, r0, c6,	c0		@ set area 0, default  	ldr	r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM -	ldr	r1, =(CONFIG_DRAM_SIZE >> 12)	@ size of RAM (must be >= 4KB) -	mov	r2, #10				@ 11 is the minimum (4KB) -1:	add	r2, r2, #1			@ area size *= 2 -	mov	r1, r1, lsr #1 +	ldr	r3, =(CONFIG_DRAM_SIZE >> 12)	@ size of RAM (must be >= 4KB) +	mov	r4, #10				@ 11 is the minimum (4KB) +1:	add	r4, r4, #1			@ area size *= 2 +	movs	r3, r3, lsr #1  	bne	1b				@ count not zero r-shift -	orr	r0, r0, r2, lsl #1		@ the area register value +	orr	r0, r0, r4, lsl #1		@ the area register value  	orr	r0, r0, #1			@ set enable bit  	mcr	p15, 0, r0, c6,	c1		@ set area 1, RAM  	ldr	r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH -	ldr	r1, =(CONFIG_FLASH_SIZE >> 12)	@ size of FLASH (must be >= 4KB) -	mov	r2, #10				@ 11 is the minimum (4KB) -1:	add	r2, r2, #1			@ area size *= 2 -	mov	r1, r1, lsr #1 +	ldr	r3, =(CONFIG_FLASH_SIZE >> 12)	@ size of FLASH (must be >= 4KB) +	cmp	r3, #0 +	moveq	r0, #0 +	beq	2f +	mov	r4, #10				@ 11 is the minimum (4KB) +1:	add	r4, r4, #1			@ area size *= 2 +	movs	r3, r3, lsr #1  	bne	1b				@ count not zero r-shift -	orr	r0, r0, r2, lsl #1		@ the area register value +	orr	r0, r0, r4, lsl #1		@ the area register value  	orr	r0, r0, #1			@ set enable bit -	mcr	p15, 0, r0, c6,	c2		@ set area 2, ROM/FLASH +2:	mcr	p15, 0, r0, c6,	c2		@ set area 2, ROM/FLASH  	mov	r0, #0x06  	mcr	p15, 0, r0, c2, c0		@ Region 1&2 cacheable @@ -137,13 +140,14 @@ __arm740_proc_info:  	.long	0x41807400  	.long	0xfffffff0  	.long	0 +	.long	0  	b	__arm740_setup  	.long	cpu_arch_name  	.long	cpu_elf_name -	.long	HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT +	.long	HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT  	.long	cpu_arm740_name  	.long	arm740_processor_functions  	.long	0  	.long	0 -	.long	v3_cache_fns			@ cache model +	.long	v4_cache_fns			@ cache model  	.size	__arm740_proc_info, . - __arm740_proc_info diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 2c3b9421ab5..2556cf1c2da 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext)  /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */  .globl	cpu_arm920_suspend_size  .equ	cpu_arm920_suspend_size, 4 * 3 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND  ENTRY(cpu_arm920_do_suspend)  	stmfd	sp!, {r4 - r6, lr}  	mrc	p15, 0, r4, c13, c0, 0	@ PID diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index f1803f7e297..344c8a548cc 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext)  /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */  .globl	cpu_arm926_suspend_size  .equ	cpu_arm926_suspend_size, 4 * 3 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND  ENTRY(cpu_arm926_do_suspend)  	stmfd	sp!, {r4 - r6, lr}  	mrc	p15, 0, r4, c13, c0, 0	@ PID diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 82f9cdc751d..0b60dd3d742 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext)  .globl	cpu_mohawk_suspend_size  .equ	cpu_mohawk_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND  ENTRY(cpu_mohawk_do_suspend)  	stmfd	sp!, {r4 - r9, lr}  	mrc	p14, 0, r4, c6, c0, 0	@ clock configuration, for turbo mode diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 3aa0da11fd8..d92dfd08142 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext)  .globl	cpu_sa1100_suspend_size  .equ	cpu_sa1100_suspend_size, 4 * 3 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND  ENTRY(cpu_sa1100_do_suspend)  	stmfd	sp!, {r4 - r6, lr}  	mrc	p15, 0, r4, c3, c0, 0		@ domain ID diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c index 3e6210b4d6d..054b491ff76 100644 --- a/arch/arm/mm/proc-syms.c +++ b/arch/arm/mm/proc-syms.c @@ -17,7 +17,9 @@  #ifndef MULTI_CPU  EXPORT_SYMBOL(cpu_dcache_clean_area); +#ifdef CONFIG_MMU  EXPORT_SYMBOL(cpu_set_pte_ext); +#endif  #else  EXPORT_SYMBOL(processor);  #endif diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index bcaaa8de932..919405e20b8 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -80,12 +80,10 @@ ENTRY(cpu_v6_do_idle)  	mov	pc, lr  ENTRY(cpu_v6_dcache_clean_area) -#ifndef TLB_CAN_READ_FROM_L1_CACHE  1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry  	add	r0, r0, #D_CACHE_LINE_SIZE  	subs	r1, r1, #D_CACHE_LINE_SIZE  	bhi	1b -#endif  	mov	pc, lr  /* @@ -138,7 +136,7 @@ ENTRY(cpu_v6_set_pte_ext)  /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */  .globl	cpu_v6_suspend_size  .equ	cpu_v6_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND  ENTRY(cpu_v6_do_suspend)  	stmfd	sp!, {r4 - r9, lr}  	mrc	p15, 0, r4, c13, c0, 0	@ FCSE/PID diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index 78f520bc0e9..9704097c450 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S @@ -110,7 +110,8 @@ ENTRY(cpu_v7_set_pte_ext)   ARM(	str	r3, [r0, #2048]! )   THUMB(	add	r0, r0, #2048 )   THUMB(	str	r3, [r0] ) -	mcr	p15, 0, r0, c7, c10, 1		@ flush_pte +	ALT_SMP(mov	pc,lr) +	ALT_UP (mcr	p15, 0, r0, c7, c10, 1)		@ flush_pte  #endif  	mov	pc, lr  ENDPROC(cpu_v7_set_pte_ext) diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 6ffd78c0f9a..363027e811d 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S @@ -73,7 +73,8 @@ ENTRY(cpu_v7_set_pte_ext)  	tst	r3, #1 << (55 - 32)		@ L_PTE_DIRTY  	orreq	r2, #L_PTE_RDONLY  1:	strd	r2, r3, [r0] -	mcr	p15, 0, r0, c7, c10, 1		@ flush_pte +	ALT_SMP(mov	pc, lr) +	ALT_UP (mcr	p15, 0, r0, c7, c10, 1)		@ flush_pte  #endif  	mov	pc, lr  ENDPROC(cpu_v7_set_pte_ext) diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 3a3c015f8d5..2c73a7301ff 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -75,14 +75,14 @@ ENTRY(cpu_v7_do_idle)  ENDPROC(cpu_v7_do_idle)  ENTRY(cpu_v7_dcache_clean_area) -#ifndef TLB_CAN_READ_FROM_L1_CACHE +	ALT_SMP(mov	pc, lr)			@ MP extensions imply L1 PTW +	ALT_UP(W(nop))  	dcache_line_size r2, r3  1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry  	add	r0, r0, r2  	subs	r1, r1, r2  	bhi	1b  	dsb -#endif  	mov	pc, lr  ENDPROC(cpu_v7_dcache_clean_area) @@ -402,6 +402,8 @@ __v7_ca9mp_proc_info:  	__v7_proc __v7_ca9mp_setup  	.size	__v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info +#endif	/* CONFIG_ARM_LPAE */ +  	/*  	 * Marvell PJ4B processor.  	 */ @@ -411,7 +413,6 @@ __v7_pj4b_proc_info:  	.long	0xfffffff0  	__v7_proc __v7_pj4b_setup  	.size	__v7_pj4b_proc_info, . - __v7_pj4b_proc_info -#endif	/* CONFIG_ARM_LPAE */  	/*  	 * ARM Ltd. Cortex A7 processor. @@ -420,7 +421,7 @@ __v7_pj4b_proc_info:  __v7_ca7mp_proc_info:  	.long	0x410fc070  	.long	0xff0ffff0 -	__v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV +	__v7_proc __v7_ca7mp_setup  	.size	__v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info  	/* @@ -430,10 +431,25 @@ __v7_ca7mp_proc_info:  __v7_ca15mp_proc_info:  	.long	0x410fc0f0  	.long	0xff0ffff0 -	__v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV +	__v7_proc __v7_ca15mp_setup  	.size	__v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info  	/* +	 * Qualcomm Inc. Krait processors. +	 */ +	.type	__krait_proc_info, #object +__krait_proc_info: +	.long	0x510f0400		@ Required ID value +	.long	0xff0ffc00		@ Mask for ID +	/* +	 * Some Krait processors don't indicate support for SDIV and UDIV +	 * instructions in the ARM instruction set, even though they actually +	 * do support them. +	 */ +	__v7_proc __v7_setup, hwcaps = HWCAP_IDIV +	.size	__krait_proc_info, . - __krait_proc_info + +	/*  	 * Match any ARMv7 processor core.  	 */  	.type	__v7_proc_info, #object diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index eb93d6487f3..e8efd83b6f2 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext)  .globl	cpu_xsc3_suspend_size  .equ	cpu_xsc3_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND  ENTRY(cpu_xsc3_do_suspend)  	stmfd	sp!, {r4 - r9, lr}  	mrc	p14, 0, r4, c6, c0, 0	@ clock configuration, for turbo mode diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 25510361aa1..e766f889bfd 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext)  .globl	cpu_xscale_suspend_size  .equ	cpu_xscale_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND  ENTRY(cpu_xscale_do_suspend)  	stmfd	sp!, {r4 - r9, lr}  	mrc	p14, 0, r4, c6, c0, 0	@ clock configuration, for turbo mode diff --git a/arch/arm/kernel/tcm.h b/arch/arm/mm/tcm.h index 8015ad434a4..8015ad434a4 100644 --- a/arch/arm/kernel/tcm.h +++ b/arch/arm/mm/tcm.h diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types index 831e1fdfdb2..a10297da122 100644 --- a/arch/arm/tools/mach-types +++ b/arch/arm/tools/mach-types @@ -16,7 +16,7 @@  # are merged into mainline or have been edited in the machine database  # within the last 12 months.  References to machine_is_NAME() do not count!  # -# Last update: Thu Apr 26 08:44:23 2012 +# Last update: Fri Mar 22 17:24:50 2013  #  # machine_is_xxx	CONFIG_xxxx		MACH_TYPE_xxx		number  # @@ -64,8 +64,8 @@ h7201			ARCH_H7201		H7201			161  h7202			ARCH_H7202		H7202			162  iq80321			ARCH_IQ80321		IQ80321			169  ks8695			ARCH_KS8695		KS8695			180 -karo			ARCH_KARO		KARO			190  smdk2410		ARCH_SMDK2410		SMDK2410		193 +ceiva			ARCH_CEIVA		CEIVA			200  voiceblue		MACH_VOICEBLUE		VOICEBLUE		218  h5400			ARCH_H5400		H5400			220  omap_innovator		MACH_OMAP_INNOVATOR	OMAP_INNOVATOR		234 @@ -95,6 +95,7 @@ lpd7a400		MACH_LPD7A400		LPD7A400		389  lpd7a404		MACH_LPD7A404		LPD7A404		390  csb337			MACH_CSB337		CSB337			399  mainstone		MACH_MAINSTONE		MAINSTONE		406 +lite300			MACH_LITE300		LITE300			408  xcep			MACH_XCEP		XCEP			413  arcom_vulcan		MACH_ARCOM_VULCAN	ARCOM_VULCAN		414  nomadik			MACH_NOMADIK		NOMADIK			420 @@ -131,12 +132,14 @@ kb9200			MACH_KB9200		KB9200			612  sx1			MACH_SX1		SX1			613  ixdp465			MACH_IXDP465		IXDP465			618  ixdp2351		MACH_IXDP2351		IXDP2351		619 +cm4008			MACH_CM4008		CM4008			624  iq80332			MACH_IQ80332		IQ80332			629  gtwx5715		MACH_GTWX5715		GTWX5715		641  csb637			MACH_CSB637		CSB637			648  n30			MACH_N30		N30			656  nec_mp900		MACH_NEC_MP900		NEC_MP900		659  kafa			MACH_KAFA		KAFA			662 +cm41xx			MACH_CM41XX		CM41XX			672  ts72xx			MACH_TS72XX		TS72XX			673  otom			MACH_OTOM		OTOM			680  nexcoder_2440		MACH_NEXCODER_2440	NEXCODER_2440		681 @@ -149,6 +152,7 @@ colibri			MACH_COLIBRI		COLIBRI			729  gateway7001		MACH_GATEWAY7001	GATEWAY7001		731  pcm027			MACH_PCM027		PCM027			732  anubis			MACH_ANUBIS		ANUBIS			734 +xboardgp8		MACH_XBOARDGP8		XBOARDGP8		742  akita			MACH_AKITA		AKITA			744  e330			MACH_E330		E330			753  nokia770		MACH_NOKIA770		NOKIA770		755 @@ -157,9 +161,11 @@ edb9315a		MACH_EDB9315A		EDB9315A		772  stargate2		MACH_STARGATE2		STARGATE2		774  intelmote2		MACH_INTELMOTE2		INTELMOTE2		775  trizeps4		MACH_TRIZEPS4		TRIZEPS4		776 +pnx4008			MACH_PNX4008		PNX4008			782  cpuat91			MACH_CPUAT91		CPUAT91			787  iq81340sc		MACH_IQ81340SC		IQ81340SC		799  iq81340mc		MACH_IQ81340MC		IQ81340MC		801 +se4200			MACH_SE4200		SE4200			809  micro9			MACH_MICRO9		MICRO9			811  micro9l			MACH_MICRO9L		MICRO9L			812  omap_palmte		MACH_OMAP_PALMTE	OMAP_PALMTE		817 @@ -178,6 +184,7 @@ mx21ads			MACH_MX21ADS		MX21ADS			851  ams_delta		MACH_AMS_DELTA		AMS_DELTA		862  nas100d			MACH_NAS100D		NAS100D			865  magician		MACH_MAGICIAN		MAGICIAN		875 +cm4002			MACH_CM4002		CM4002			876  nxdkn			MACH_NXDKN		NXDKN			880  palmtx			MACH_PALMTX		PALMTX			885  s3c2413			MACH_S3C2413		S3C2413			887 @@ -203,7 +210,6 @@ omap_fsample		MACH_OMAP_FSAMPLE	OMAP_FSAMPLE		970  snapper_cl15		MACH_SNAPPER_CL15	SNAPPER_CL15		986  omap_palmz71		MACH_OMAP_PALMZ71	OMAP_PALMZ71		993  smdk2412		MACH_SMDK2412		SMDK2412		1009 -bkde303			MACH_BKDE303		BKDE303			1021  smdk2413		MACH_SMDK2413		SMDK2413		1022  aml_m5900		MACH_AML_M5900		AML_M5900		1024  balloon3		MACH_BALLOON3		BALLOON3		1029 @@ -214,6 +220,7 @@ fsg			MACH_FSG		FSG			1091  at91sam9260ek		MACH_AT91SAM9260EK	AT91SAM9260EK		1099  glantank		MACH_GLANTANK		GLANTANK		1100  n2100			MACH_N2100		N2100			1101 +im42xx			MACH_IM42XX		IM42XX			1105  qt2410			MACH_QT2410		QT2410			1108  kixrp435		MACH_KIXRP435		KIXRP435		1109  cc9p9360dev		MACH_CC9P9360DEV	CC9P9360DEV		1114 @@ -247,6 +254,7 @@ csb726			MACH_CSB726		CSB726			1359  davinci_dm6467_evm	MACH_DAVINCI_DM6467_EVM	DAVINCI_DM6467_EVM	1380  davinci_dm355_evm	MACH_DAVINCI_DM355_EVM	DAVINCI_DM355_EVM	1381  littleton		MACH_LITTLETON		LITTLETON		1388 +im4004			MACH_IM4004		IM4004			1400  realview_pb11mp		MACH_REALVIEW_PB11MP	REALVIEW_PB11MP		1407  mx27_3ds		MACH_MX27_3DS		MX27_3DS		1430  halibut			MACH_HALIBUT		HALIBUT			1439 @@ -268,6 +276,7 @@ dns323			MACH_DNS323		DNS323			1542  omap3_beagle		MACH_OMAP3_BEAGLE	OMAP3_BEAGLE		1546  nokia_n810		MACH_NOKIA_N810		NOKIA_N810		1548  pcm038			MACH_PCM038		PCM038			1551 +sg310			MACH_SG310		SG310			1564  ts209			MACH_TS209		TS209			1565  at91cap9adk		MACH_AT91CAP9ADK	AT91CAP9ADK		1566  mx31moboard		MACH_MX31MOBOARD	MX31MOBOARD		1574 @@ -371,7 +380,6 @@ pcm043			MACH_PCM043		PCM043			2072  sheevaplug		MACH_SHEEVAPLUG		SHEEVAPLUG		2097  avengers_lite		MACH_AVENGERS_LITE	AVENGERS_LITE		2104  mx51_babbage		MACH_MX51_BABBAGE	MX51_BABBAGE		2125 -tx37			MACH_TX37		TX37			2127  rd78x00_masa		MACH_RD78X00_MASA	RD78X00_MASA		2135  dm355_leopard		MACH_DM355_LEOPARD	DM355_LEOPARD		2138  ts219			MACH_TS219		TS219			2139 @@ -380,12 +388,12 @@ davinci_da850_evm	MACH_DAVINCI_DA850_EVM	DAVINCI_DA850_EVM	2157  at91sam9g10ek		MACH_AT91SAM9G10EK	AT91SAM9G10EK		2159  omap_4430sdp		MACH_OMAP_4430SDP	OMAP_4430SDP		2160  magx_zn5		MACH_MAGX_ZN5		MAGX_ZN5		2162 -tx25			MACH_TX25		TX25			2177  omap3_torpedo		MACH_OMAP3_TORPEDO	OMAP3_TORPEDO		2178  anw6410			MACH_ANW6410		ANW6410			2183  imx27_visstrim_m10	MACH_IMX27_VISSTRIM_M10	IMX27_VISSTRIM_M10	2187  portuxg20		MACH_PORTUXG20		PORTUXG20		2191  smdkc110		MACH_SMDKC110		SMDKC110		2193 +cabespresso		MACH_CABESPRESSO	CABESPRESSO		2194  omap3517evm		MACH_OMAP3517EVM	OMAP3517EVM		2200  netspace_v2		MACH_NETSPACE_V2	NETSPACE_V2		2201  netspace_max_v2		MACH_NETSPACE_MAX_V2	NETSPACE_MAX_V2		2202 @@ -404,6 +412,7 @@ bigdisk			MACH_BIGDISK		BIGDISK			2283  at91sam9g20ek_2mmc	MACH_AT91SAM9G20EK_2MMC	AT91SAM9G20EK_2MMC	2288  bcmring			MACH_BCMRING		BCMRING			2289  mahimahi		MACH_MAHIMAHI		MAHIMAHI		2304 +cerebric		MACH_CEREBRIC		CEREBRIC		2311  smdk6442		MACH_SMDK6442		SMDK6442		2324  openrd_base		MACH_OPENRD_BASE	OPENRD_BASE		2325  devkit8000		MACH_DEVKIT8000		DEVKIT8000		2330 @@ -423,10 +432,10 @@ raumfeld_rc		MACH_RAUMFELD_RC	RAUMFELD_RC		2413  raumfeld_connector	MACH_RAUMFELD_CONNECTOR	RAUMFELD_CONNECTOR	2414  raumfeld_speaker	MACH_RAUMFELD_SPEAKER	RAUMFELD_SPEAKER	2415  tnetv107x		MACH_TNETV107X		TNETV107X		2418 -mx51_m2id		MACH_MX51_M2ID		MX51_M2ID		2428  smdkv210		MACH_SMDKV210		SMDKV210		2456  omap_zoom3		MACH_OMAP_ZOOM3		OMAP_ZOOM3		2464  omap_3630sdp		MACH_OMAP_3630SDP	OMAP_3630SDP		2465 +cybook2440		MACH_CYBOOK2440		CYBOOK2440		2466  smartq7			MACH_SMARTQ7		SMARTQ7			2479  watson_efm_plugin	MACH_WATSON_EFM_PLUGIN	WATSON_EFM_PLUGIN	2491  g4evm			MACH_G4EVM		G4EVM			2493 @@ -434,12 +443,10 @@ omapl138_hawkboard	MACH_OMAPL138_HAWKBOARD	OMAPL138_HAWKBOARD	2495  ts41x			MACH_TS41X		TS41X			2502  phy3250			MACH_PHY3250		PHY3250			2511  mini6410		MACH_MINI6410		MINI6410		2520 -tx51			MACH_TX51		TX51			2529  mx28evk			MACH_MX28EVK		MX28EVK			2531  smartq5			MACH_SMARTQ5		SMARTQ5			2534  davinci_dm6467tevm	MACH_DAVINCI_DM6467TEVM	DAVINCI_DM6467TEVM	2548  mxt_td60		MACH_MXT_TD60		MXT_TD60		2550 -pca101			MACH_PCA101		PCA101			2595  capc7117		MACH_CAPC7117		CAPC7117		2612  icontrol		MACH_ICONTROL		ICONTROL		2624  gplugd			MACH_GPLUGD		GPLUGD			2625 @@ -465,6 +472,7 @@ igep0030		MACH_IGEP0030		IGEP0030		2717  sbc3530			MACH_SBC3530		SBC3530			2722  saarb			MACH_SAARB		SAARB			2727  harmony			MACH_HARMONY		HARMONY			2731 +cybook_orizon		MACH_CYBOOK_ORIZON	CYBOOK_ORIZON		2733  msm7x30_fluid		MACH_MSM7X30_FLUID	MSM7X30_FLUID		2741  cm_t3517		MACH_CM_T3517		CM_T3517		2750  wbd222			MACH_WBD222		WBD222			2753 @@ -480,10 +488,8 @@ eukrea_cpuimx35sd	MACH_EUKREA_CPUIMX35SD	EUKREA_CPUIMX35SD	2821  eukrea_cpuimx51sd	MACH_EUKREA_CPUIMX51SD	EUKREA_CPUIMX51SD	2822  eukrea_cpuimx51		MACH_EUKREA_CPUIMX51	EUKREA_CPUIMX51		2823  smdkc210		MACH_SMDKC210		SMDKC210		2838 -pcaal1			MACH_PCAAL1		PCAAL1			2843  t5325			MACH_T5325		T5325			2846  income			MACH_INCOME		INCOME			2849 -mx257sx			MACH_MX257SX		MX257SX			2861  goni			MACH_GONI		GONI			2862  bv07			MACH_BV07		BV07			2882  openrd_ultimate		MACH_OPENRD_ULTIMATE	OPENRD_ULTIMATE		2884 @@ -491,7 +497,6 @@ devixp			MACH_DEVIXP		DEVIXP			2885  miccpt			MACH_MICCPT		MICCPT			2886  mic256			MACH_MIC256		MIC256			2887  u5500			MACH_U5500		U5500			2890 -pov15hd			MACH_POV15HD		POV15HD			2910  linkstation_lschl	MACH_LINKSTATION_LSCHL	LINKSTATION_LSCHL	2913  smdkv310		MACH_SMDKV310		SMDKV310		2925  wm8505_7in_netbook	MACH_WM8505_7IN_NETBOOK	WM8505_7IN_NETBOOK	2928 @@ -518,7 +523,6 @@ prima2_evb		MACH_PRIMA2_EVB		PRIMA2_EVB		3103  paz00			MACH_PAZ00		PAZ00			3128  acmenetusfoxg20		MACH_ACMENETUSFOXG20	ACMENETUSFOXG20		3129  ag5evm			MACH_AG5EVM		AG5EVM			3189 -tsunagi			MACH_TSUNAGI		TSUNAGI			3197  ics_if_voip		MACH_ICS_IF_VOIP	ICS_IF_VOIP		3206  wlf_cragg_6410		MACH_WLF_CRAGG_6410	WLF_CRAGG_6410		3207  trimslice		MACH_TRIMSLICE		TRIMSLICE		3209 @@ -529,8 +533,6 @@ msm8960_sim		MACH_MSM8960_SIM	MSM8960_SIM		3230  msm8960_rumi3		MACH_MSM8960_RUMI3	MSM8960_RUMI3		3231  gsia18s			MACH_GSIA18S		GSIA18S			3234  mx53_loco		MACH_MX53_LOCO		MX53_LOCO		3273 -tx53			MACH_TX53		TX53			3279 -encore			MACH_ENCORE		ENCORE			3284  wario			MACH_WARIO		WARIO			3288  cm_t3730		MACH_CM_T3730		CM_T3730		3290  hrefv60			MACH_HREFV60		HREFV60			3293 @@ -538,603 +540,24 @@ armlex4210		MACH_ARMLEX4210		ARMLEX4210		3361  snowball		MACH_SNOWBALL		SNOWBALL		3363  xilinx_ep107		MACH_XILINX_EP107	XILINX_EP107		3378  nuri			MACH_NURI		NURI			3379 -wtplug			MACH_WTPLUG		WTPLUG			3412 -veridis_a300		MACH_VERIDIS_A300	VERIDIS_A300		3448  origen			MACH_ORIGEN		ORIGEN			3455 -wm8650refboard		MACH_WM8650REFBOARD	WM8650REFBOARD		3472 -xarina			MACH_XARINA		XARINA			3476 -sdvr			MACH_SDVR		SDVR			3478 -acer_maya		MACH_ACER_MAYA		ACER_MAYA		3479 -pico			MACH_PICO		PICO			3480 -cwmx233			MACH_CWMX233		CWMX233			3481 -cwam1808		MACH_CWAM1808		CWAM1808		3482 -cwdm365			MACH_CWDM365		CWDM365			3483 -mx51_moray		MACH_MX51_MORAY		MX51_MORAY		3484 -thales_cbc		MACH_THALES_CBC		THALES_CBC		3485 -bluepoint		MACH_BLUEPOINT		BLUEPOINT		3486 -dir665			MACH_DIR665		DIR665			3487 -acmerover1		MACH_ACMEROVER1		ACMEROVER1		3488 -shooter_ct		MACH_SHOOTER_CT		SHOOTER_CT		3489 -bliss			MACH_BLISS		BLISS			3490 -blissc			MACH_BLISSC		BLISSC			3491 -thales_adc		MACH_THALES_ADC		THALES_ADC		3492 -ubisys_p9d_evp		MACH_UBISYS_P9D_EVP	UBISYS_P9D_EVP		3493 -atdgp318		MACH_ATDGP318		ATDGP318		3494 -dma210u			MACH_DMA210U		DMA210U			3495 -em_t3			MACH_EM_T3		EM_T3			3496 -htx3250			MACH_HTX3250		HTX3250			3497 -g50			MACH_G50		G50			3498 -eco5			MACH_ECO5		ECO5			3499 -wintergrasp		MACH_WINTERGRASP	WINTERGRASP		3500 -puro			MACH_PURO		PURO			3501 -shooter_k		MACH_SHOOTER_K		SHOOTER_K		3502  nspire			MACH_NSPIRE		NSPIRE			3503 -mickxx			MACH_MICKXX		MICKXX			3504 -lxmb			MACH_LXMB		LXMB			3505 -adam			MACH_ADAM		ADAM			3507 -b1004			MACH_B1004		B1004			3508 -oboea			MACH_OBOEA		OBOEA			3509 -a1015			MACH_A1015		A1015			3510 -robin_vbdt30		MACH_ROBIN_VBDT30	ROBIN_VBDT30		3511 -tegra_enterprise	MACH_TEGRA_ENTERPRISE	TEGRA_ENTERPRISE	3512 -rfl108200_mk10		MACH_RFL108200_MK10	RFL108200_MK10		3513 -rfl108300_mk16		MACH_RFL108300_MK16	RFL108300_MK16		3514 -rover_v7		MACH_ROVER_V7		ROVER_V7		3515 -miphone			MACH_MIPHONE		MIPHONE			3516 -femtobts		MACH_FEMTOBTS		FEMTOBTS		3517 -monopoli		MACH_MONOPOLI		MONOPOLI		3518 -boss			MACH_BOSS		BOSS			3519 -davinci_dm368_vtam	MACH_DAVINCI_DM368_VTAM	DAVINCI_DM368_VTAM	3520 -clcon			MACH_CLCON		CLCON			3521  nokia_rm696		MACH_NOKIA_RM696	NOKIA_RM696		3522 -tahiti			MACH_TAHITI		TAHITI			3523 -fighter			MACH_FIGHTER		FIGHTER			3524 -sgh_i710		MACH_SGH_I710		SGH_I710		3525 -integreproscb		MACH_INTEGREPROSCB	INTEGREPROSCB		3526 -monza			MACH_MONZA		MONZA			3527 -calimain		MACH_CALIMAIN		CALIMAIN		3528 -mx6q_sabreauto		MACH_MX6Q_SABREAUTO	MX6Q_SABREAUTO		3529 -gma01x			MACH_GMA01X		GMA01X			3530 -sbc51			MACH_SBC51		SBC51			3531 -fit			MACH_FIT		FIT			3532 -steelhead		MACH_STEELHEAD		STEELHEAD		3533 -panther			MACH_PANTHER		PANTHER			3534 -msm8960_liquid		MACH_MSM8960_LIQUID	MSM8960_LIQUID		3535 -lexikonct		MACH_LEXIKONCT		LEXIKONCT		3536 -ns2816_stb		MACH_NS2816_STB		NS2816_STB		3537 -sei_mm2_lpc3250		MACH_SEI_MM2_LPC3250	SEI_MM2_LPC3250		3538 -cmimx53			MACH_CMIMX53		CMIMX53			3539 -sandwich		MACH_SANDWICH		SANDWICH		3540 -chief			MACH_CHIEF		CHIEF			3541 -pogo_e02		MACH_POGO_E02		POGO_E02		3542  mikrap_x168		MACH_MIKRAP_X168	MIKRAP_X168		3543 -htcmozart		MACH_HTCMOZART		HTCMOZART		3544 -htcgold			MACH_HTCGOLD		HTCGOLD			3545 -mt72xx			MACH_MT72XX		MT72XX			3546 -mx51_ivy		MACH_MX51_IVY		MX51_IVY		3547 -mx51_lvd		MACH_MX51_LVD		MX51_LVD		3548 -omap3_wiser2		MACH_OMAP3_WISER2	OMAP3_WISER2		3549 -dreamplug		MACH_DREAMPLUG		DREAMPLUG		3550 -cobas_c_111		MACH_COBAS_C_111	COBAS_C_111		3551 -cobas_u_411		MACH_COBAS_U_411	COBAS_U_411		3552 -hssd			MACH_HSSD		HSSD			3553 -iom35x			MACH_IOM35X		IOM35X			3554 -psom_omap		MACH_PSOM_OMAP		PSOM_OMAP		3555 -iphone_2g		MACH_IPHONE_2G		IPHONE_2G		3556 -iphone_3g		MACH_IPHONE_3G		IPHONE_3G		3557 -ipod_touch_1g		MACH_IPOD_TOUCH_1G	IPOD_TOUCH_1G		3558 -pharos_tpc		MACH_PHAROS_TPC		PHAROS_TPC		3559 -mx53_hydra		MACH_MX53_HYDRA		MX53_HYDRA		3560 -ns2816_dev_board	MACH_NS2816_DEV_BOARD	NS2816_DEV_BOARD	3561 -iphone_3gs		MACH_IPHONE_3GS		IPHONE_3GS		3562 -iphone_4		MACH_IPHONE_4		IPHONE_4		3563 -ipod_touch_4g		MACH_IPOD_TOUCH_4G	IPOD_TOUCH_4G		3564 -dragon_e1100		MACH_DRAGON_E1100	DRAGON_E1100		3565 -topside			MACH_TOPSIDE		TOPSIDE			3566 -irisiii			MACH_IRISIII		IRISIII			3567  deto_macarm9		MACH_DETO_MACARM9	DETO_MACARM9		3568 -eti_d1			MACH_ETI_D1		ETI_D1			3569 -som3530sdk		MACH_SOM3530SDK		SOM3530SDK		3570 -oc_engine		MACH_OC_ENGINE		OC_ENGINE		3571 -apq8064_sim		MACH_APQ8064_SIM	APQ8064_SIM		3572 -alps			MACH_ALPS		ALPS			3575 -tny_t3730		MACH_TNY_T3730		TNY_T3730		3576 -geryon_nfe		MACH_GERYON_NFE		GERYON_NFE		3577 -ns2816_ref_board	MACH_NS2816_REF_BOARD	NS2816_REF_BOARD	3578 -silverstone		MACH_SILVERSTONE	SILVERSTONE		3579 -mtt2440			MACH_MTT2440		MTT2440			3580 -ynicdb			MACH_YNICDB		YNICDB			3581 -bct			MACH_BCT		BCT			3582 -tuscan			MACH_TUSCAN		TUSCAN			3583 -xbt_sam9g45		MACH_XBT_SAM9G45	XBT_SAM9G45		3584 -enbw_cmc		MACH_ENBW_CMC		ENBW_CMC		3585 -ch104mx257		MACH_CH104MX257		CH104MX257		3587 -openpri			MACH_OPENPRI		OPENPRI			3588 -am335xevm		MACH_AM335XEVM		AM335XEVM		3589 -picodmb			MACH_PICODMB		PICODMB			3590 -waluigi			MACH_WALUIGI		WALUIGI			3591 -punicag7		MACH_PUNICAG7		PUNICAG7		3592 -ipad_1g			MACH_IPAD_1G		IPAD_1G			3593 -appletv_2g		MACH_APPLETV_2G		APPLETV_2G		3594 -mach_ecog45		MACH_MACH_ECOG45	MACH_ECOG45		3595 -ait_cam_enc_4xx		MACH_AIT_CAM_ENC_4XX	AIT_CAM_ENC_4XX		3596 -runnymede		MACH_RUNNYMEDE		RUNNYMEDE		3597 -play			MACH_PLAY		PLAY			3598 -hw90260			MACH_HW90260		HW90260			3599 -tagh			MACH_TAGH		TAGH			3600 -filbert			MACH_FILBERT		FILBERT			3601 -getinge_netcomv3	MACH_GETINGE_NETCOMV3	GETINGE_NETCOMV3	3602 -cw20			MACH_CW20		CW20			3603 -cinema			MACH_CINEMA		CINEMA			3604 -cinema_tea		MACH_CINEMA_TEA		CINEMA_TEA		3605 -cinema_coffee		MACH_CINEMA_COFFEE	CINEMA_COFFEE		3606 -cinema_juice		MACH_CINEMA_JUICE	CINEMA_JUICE		3607 -mx53_mirage2		MACH_MX53_MIRAGE2	MX53_MIRAGE2		3609 -mx53_efikasb		MACH_MX53_EFIKASB	MX53_EFIKASB		3610 -stm_b2000		MACH_STM_B2000		STM_B2000		3612  m28evk			MACH_M28EVK		M28EVK			3613 -pda			MACH_PDA		PDA			3614 -meraki_mr58		MACH_MERAKI_MR58	MERAKI_MR58		3615  kota2			MACH_KOTA2		KOTA2			3616 -letcool			MACH_LETCOOL		LETCOOL			3617 -mx27iat			MACH_MX27IAT		MX27IAT			3618 -apollo_td		MACH_APOLLO_TD		APOLLO_TD		3619 -arena			MACH_ARENA		ARENA			3620 -gsngateway		MACH_GSNGATEWAY		GSNGATEWAY		3621 -lf2000			MACH_LF2000		LF2000			3622  bonito			MACH_BONITO		BONITO			3623 -asymptote		MACH_ASYMPTOTE		ASYMPTOTE		3624 -bst2brd			MACH_BST2BRD		BST2BRD			3625 -tx335s			MACH_TX335S		TX335S			3626 -pelco_tesla		MACH_PELCO_TESLA	PELCO_TESLA		3627 -rrhtestplat		MACH_RRHTESTPLAT	RRHTESTPLAT		3628 -vidtonic_pro		MACH_VIDTONIC_PRO	VIDTONIC_PRO		3629 -pl_apollo		MACH_PL_APOLLO		PL_APOLLO		3630 -pl_phoenix		MACH_PL_PHOENIX		PL_PHOENIX		3631 -m28cu3			MACH_M28CU3		M28CU3			3632 -vvbox_hd		MACH_VVBOX_HD		VVBOX_HD		3633 -coreware_sam9260_	MACH_COREWARE_SAM9260_	COREWARE_SAM9260_	3634 -marmaduke		MACH_MARMADUKE		MARMADUKE		3635 -amg_xlcore_camera	MACH_AMG_XLCORE_CAMERA	AMG_XLCORE_CAMERA	3636  omap3_egf		MACH_OMAP3_EGF		OMAP3_EGF		3637  smdk4212		MACH_SMDK4212		SMDK4212		3638 -dnp9200			MACH_DNP9200		DNP9200			3639 -tf101			MACH_TF101		TF101			3640 -omap3silvio		MACH_OMAP3SILVIO	OMAP3SILVIO		3641 -picasso2		MACH_PICASSO2		PICASSO2		3642 -vangogh2		MACH_VANGOGH2		VANGOGH2		3643 -olpc_xo_1_75		MACH_OLPC_XO_1_75	OLPC_XO_1_75		3644 -gx400			MACH_GX400		GX400			3645 -gs300			MACH_GS300		GS300			3646 -acer_a9			MACH_ACER_A9		ACER_A9			3647 -vivow_evm		MACH_VIVOW_EVM		VIVOW_EVM		3648 -veloce_cxq		MACH_VELOCE_CXQ		VELOCE_CXQ		3649 -veloce_cxm		MACH_VELOCE_CXM		VELOCE_CXM		3650 -p1852			MACH_P1852		P1852			3651 -naxy100			MACH_NAXY100		NAXY100			3652 -taishan			MACH_TAISHAN		TAISHAN			3653 -touchlink		MACH_TOUCHLINK		TOUCHLINK		3654 -stm32f103ze		MACH_STM32F103ZE	STM32F103ZE		3655 -mcx			MACH_MCX		MCX			3656 -stm_nmhdk_fli7610	MACH_STM_NMHDK_FLI7610	STM_NMHDK_FLI7610	3657 -top28x			MACH_TOP28X		TOP28X			3658 -okl4vp_microvisor	MACH_OKL4VP_MICROVISOR	OKL4VP_MICROVISOR	3659 -pop			MACH_POP		POP			3660 -layer			MACH_LAYER		LAYER			3661 -trondheim		MACH_TRONDHEIM		TRONDHEIM		3662 -eva			MACH_EVA		EVA			3663 -trust_taurus		MACH_TRUST_TAURUS	TRUST_TAURUS		3664 -ns2816_huashan		MACH_NS2816_HUASHAN	NS2816_HUASHAN		3665 -ns2816_yangcheng	MACH_NS2816_YANGCHENG	NS2816_YANGCHENG	3666 -p852			MACH_P852		P852			3667 -flea3			MACH_FLEA3		FLEA3			3668 -bowfin			MACH_BOWFIN		BOWFIN			3669 -mv88de3100		MACH_MV88DE3100		MV88DE3100		3670 -pia_am35x		MACH_PIA_AM35X		PIA_AM35X		3671 -cedar			MACH_CEDAR		CEDAR			3672 -picasso_e		MACH_PICASSO_E		PICASSO_E		3673 -samsung_e60		MACH_SAMSUNG_E60	SAMSUNG_E60		3674 -sdvr_mini		MACH_SDVR_MINI		SDVR_MINI		3676 -omap3_ij3k		MACH_OMAP3_IJ3K		OMAP3_IJ3K		3677 -modasmc1		MACH_MODASMC1		MODASMC1		3678 -apq8064_rumi3		MACH_APQ8064_RUMI3	APQ8064_RUMI3		3679 -matrix506		MACH_MATRIX506		MATRIX506		3680 -msm9615_mtp		MACH_MSM9615_MTP	MSM9615_MTP		3681 -dm36x_spawndc		MACH_DM36X_SPAWNDC	DM36X_SPAWNDC		3682 -sff792			MACH_SFF792		SFF792			3683 -am335xiaevm		MACH_AM335XIAEVM	AM335XIAEVM		3684 -g3c2440			MACH_G3C2440		G3C2440			3685 -tion270			MACH_TION270		TION270			3686 -w22q7arm02		MACH_W22Q7ARM02		W22Q7ARM02		3687 -omap_cat		MACH_OMAP_CAT		OMAP_CAT		3688 -at91sam9n12ek		MACH_AT91SAM9N12EK	AT91SAM9N12EK		3689 -morrison		MACH_MORRISON		MORRISON		3690 -svdu			MACH_SVDU		SVDU			3691 -lpp01			MACH_LPP01		LPP01			3692 -ubc283			MACH_UBC283		UBC283			3693 -zeppelin		MACH_ZEPPELIN		ZEPPELIN		3694 -motus			MACH_MOTUS		MOTUS			3695 -neomainboard		MACH_NEOMAINBOARD	NEOMAINBOARD		3696 -devkit3250		MACH_DEVKIT3250		DEVKIT3250		3697 -devkit7000		MACH_DEVKIT7000		DEVKIT7000		3698 -fmc_uic			MACH_FMC_UIC		FMC_UIC			3699 -fmc_dcm			MACH_FMC_DCM		FMC_DCM			3700 -batwm			MACH_BATWM		BATWM			3701 -atlas6cb		MACH_ATLAS6CB		ATLAS6CB		3702 -blue			MACH_BLUE		BLUE			3705 -colorado		MACH_COLORADO		COLORADO		3706 -popc			MACH_POPC		POPC			3707 -promwad_jade		MACH_PROMWAD_JADE	PROMWAD_JADE		3708 -amp			MACH_AMP		AMP			3709 -gnet_amp		MACH_GNET_AMP		GNET_AMP		3710 -toques			MACH_TOQUES		TOQUES			3711  apx4devkit		MACH_APX4DEVKIT		APX4DEVKIT		3712 -dct_storm		MACH_DCT_STORM		DCT_STORM		3713 -owl			MACH_OWL		OWL			3715 -cogent_csb1741		MACH_COGENT_CSB1741	COGENT_CSB1741		3716 -adillustra610		MACH_ADILLUSTRA610	ADILLUSTRA610		3718 -ecafe_na04		MACH_ECAFE_NA04		ECAFE_NA04		3719 -popct			MACH_POPCT		POPCT			3720 -omap3_helena		MACH_OMAP3_HELENA	OMAP3_HELENA		3721 -ach			MACH_ACH		ACH			3722 -module_dtb		MACH_MODULE_DTB		MODULE_DTB		3723 -oslo_elisabeth		MACH_OSLO_ELISABETH	OSLO_ELISABETH		3725 -tt01			MACH_TT01		TT01			3726 -msm8930_cdp		MACH_MSM8930_CDP	MSM8930_CDP		3727 -msm8930_mtp		MACH_MSM8930_MTP	MSM8930_MTP		3728 -msm8930_fluid		MACH_MSM8930_FLUID	MSM8930_FLUID		3729 -ltu11			MACH_LTU11		LTU11			3730 -am1808_spawnco		MACH_AM1808_SPAWNCO	AM1808_SPAWNCO		3731 -flx6410			MACH_FLX6410		FLX6410			3732 -mx6q_qsb		MACH_MX6Q_QSB		MX6Q_QSB		3733 -mx53_plt424		MACH_MX53_PLT424	MX53_PLT424		3734 -jasmine			MACH_JASMINE		JASMINE			3735 -l138_owlboard_plus	MACH_L138_OWLBOARD_PLUS	L138_OWLBOARD_PLUS	3736 -wr21			MACH_WR21		WR21			3737 -peaboy			MACH_PEABOY		PEABOY			3739 -mx28_plato		MACH_MX28_PLATO		MX28_PLATO		3740 -kacom2			MACH_KACOM2		KACOM2			3741 -slco			MACH_SLCO		SLCO			3742 -imx51pico		MACH_IMX51PICO		IMX51PICO		3743 -glink1			MACH_GLINK1		GLINK1			3744 -diamond			MACH_DIAMOND		DIAMOND			3745 -d9000			MACH_D9000		D9000			3746 -w5300e01		MACH_W5300E01		W5300E01		3747 -im6000			MACH_IM6000		IM6000			3748 -mx51_fred51		MACH_MX51_FRED51	MX51_FRED51		3749 -stm32f2			MACH_STM32F2		STM32F2			3750 -ville			MACH_VILLE		VILLE			3751 -ptip_murnau		MACH_PTIP_MURNAU	PTIP_MURNAU		3752 -ptip_classic		MACH_PTIP_CLASSIC	PTIP_CLASSIC		3753 -mx53grb			MACH_MX53GRB		MX53GRB			3754 -gagarin			MACH_GAGARIN		GAGARIN			3755 -nas2big			MACH_NAS2BIG		NAS2BIG			3757 -superfemto		MACH_SUPERFEMTO		SUPERFEMTO		3758 -teufel			MACH_TEUFEL		TEUFEL			3759 -dinara			MACH_DINARA		DINARA			3760 -vanquish		MACH_VANQUISH		VANQUISH		3761 -zipabox1		MACH_ZIPABOX1		ZIPABOX1		3762 -u9540			MACH_U9540		U9540			3763 -jet			MACH_JET		JET			3764  smdk4412		MACH_SMDK4412		SMDK4412		3765 -elite			MACH_ELITE		ELITE			3766 -spear320_hmi		MACH_SPEAR320_HMI	SPEAR320_HMI		3767 -ontario			MACH_ONTARIO		ONTARIO			3768 -mx6q_sabrelite		MACH_MX6Q_SABRELITE	MX6Q_SABRELITE		3769 -vc200			MACH_VC200		VC200			3770 -msm7625a_ffa		MACH_MSM7625A_FFA	MSM7625A_FFA		3771 -msm7625a_surf		MACH_MSM7625A_SURF	MSM7625A_SURF		3772 -benthossbp		MACH_BENTHOSSBP		BENTHOSSBP		3773 -smdk5210		MACH_SMDK5210		SMDK5210		3774 -empq2300		MACH_EMPQ2300		EMPQ2300		3775 -minipos			MACH_MINIPOS		MINIPOS			3776 -omap5_sevm		MACH_OMAP5_SEVM		OMAP5_SEVM		3777 -shelter			MACH_SHELTER		SHELTER			3778 -omap3_devkit8500	MACH_OMAP3_DEVKIT8500	OMAP3_DEVKIT8500	3779 -edgetd			MACH_EDGETD		EDGETD			3780 -copperyard		MACH_COPPERYARD		COPPERYARD		3781 -edge_u			MACH_EDGE_U		EDGE_U			3783 -edge_td			MACH_EDGE_TD		EDGE_TD			3784 -wdss			MACH_WDSS		WDSS			3785 -dl_pb25			MACH_DL_PB25		DL_PB25			3786 -dss11			MACH_DSS11		DSS11			3787 -cpa			MACH_CPA		CPA			3788 -aptp2000		MACH_APTP2000		APTP2000		3789  marzen			MACH_MARZEN		MARZEN			3790 -st_turbine		MACH_ST_TURBINE		ST_TURBINE		3791 -gtl_it3300		MACH_GTL_IT3300		GTL_IT3300		3792 -mx6_mule		MACH_MX6_MULE		MX6_MULE		3793 -v7pxa_dt		MACH_V7PXA_DT		V7PXA_DT		3794 -v7mmp_dt		MACH_V7MMP_DT		V7MMP_DT		3795 -dragon7			MACH_DRAGON7		DRAGON7			3796  krome			MACH_KROME		KROME			3797 -oratisdante		MACH_ORATISDANTE	ORATISDANTE		3798 -fathom			MACH_FATHOM		FATHOM			3799 -dns325			MACH_DNS325		DNS325			3800 -sarnen			MACH_SARNEN		SARNEN			3801 -ubisys_g1		MACH_UBISYS_G1		UBISYS_G1		3802 -mx53_pf1		MACH_MX53_PF1		MX53_PF1		3803 -asanti			MACH_ASANTI		ASANTI			3804 -volta			MACH_VOLTA		VOLTA			3805 -knight			MACH_KNIGHT		KNIGHT			3807 -beaglebone		MACH_BEAGLEBONE		BEAGLEBONE		3808 -becker			MACH_BECKER		BECKER			3809 -fc360			MACH_FC360		FC360			3810 -pmi2_xls		MACH_PMI2_XLS		PMI2_XLS		3811 -taranto			MACH_TARANTO		TARANTO			3812 -plutux			MACH_PLUTUX		PLUTUX			3813 -ipmp_medcom		MACH_IPMP_MEDCOM	IPMP_MEDCOM		3814 -absolut			MACH_ABSOLUT		ABSOLUT			3815 -awpb3			MACH_AWPB3		AWPB3			3816 -nfp32xx_dt		MACH_NFP32XX_DT		NFP32XX_DT		3817 -dl_pb53			MACH_DL_PB53		DL_PB53			3818 -acu_ii			MACH_ACU_II		ACU_II			3819 -avalon			MACH_AVALON		AVALON			3820 -sphinx			MACH_SPHINX		SPHINX			3821 -titan_t			MACH_TITAN_T		TITAN_T			3822 -harvest_boris		MACH_HARVEST_BORIS	HARVEST_BORIS		3823 -mach_msm7x30_m3s	MACH_MACH_MSM7X30_M3S	MACH_MSM7X30_M3S	3824 -smdk5250		MACH_SMDK5250		SMDK5250		3825 -imxt_lite		MACH_IMXT_LITE		IMXT_LITE		3826 -imxt_std		MACH_IMXT_STD		IMXT_STD		3827 -imxt_log		MACH_IMXT_LOG		IMXT_LOG		3828 -imxt_nav		MACH_IMXT_NAV		IMXT_NAV		3829 -imxt_full		MACH_IMXT_FULL		IMXT_FULL		3830 -ag09015			MACH_AG09015		AG09015			3831 -am3517_mt_ventoux	MACH_AM3517_MT_VENTOUX	AM3517_MT_VENTOUX	3832 -dp1arm9			MACH_DP1ARM9		DP1ARM9			3833 -picasso_m		MACH_PICASSO_M		PICASSO_M		3834 -video_gadget		MACH_VIDEO_GADGET	VIDEO_GADGET		3835 -mtt_om3x		MACH_MTT_OM3X		MTT_OM3X		3836 -mx6q_arm2		MACH_MX6Q_ARM2		MX6Q_ARM2		3837 -picosam9g45		MACH_PICOSAM9G45	PICOSAM9G45		3838 -vpm_dm365		MACH_VPM_DM365		VPM_DM365		3839 -bonfire			MACH_BONFIRE		BONFIRE			3840 -mt2p2d			MACH_MT2P2D		MT2P2D			3841 -sigpda01		MACH_SIGPDA01		SIGPDA01		3842 -cn27			MACH_CN27		CN27			3843 -mx25_cwtap		MACH_MX25_CWTAP		MX25_CWTAP		3844 -apf28			MACH_APF28		APF28			3845 -pelco_maxwell		MACH_PELCO_MAXWELL	PELCO_MAXWELL		3846 -ge_phoenix		MACH_GE_PHOENIX		GE_PHOENIX		3847 -empc_a500		MACH_EMPC_A500		EMPC_A500		3848 -ims_arm9		MACH_IMS_ARM9		IMS_ARM9		3849 -mini2416		MACH_MINI2416		MINI2416		3850 -mini2450		MACH_MINI2450		MINI2450		3851 -mini310			MACH_MINI310		MINI310			3852 -spear_hurricane		MACH_SPEAR_HURRICANE	SPEAR_HURRICANE		3853 -mt7208			MACH_MT7208		MT7208			3854 -lpc178x			MACH_LPC178X		LPC178X			3855 -farleys			MACH_FARLEYS		FARLEYS			3856 -efm32gg_dk3750		MACH_EFM32GG_DK3750	EFM32GG_DK3750		3857 -zeus_board		MACH_ZEUS_BOARD		ZEUS_BOARD		3858 -cc51			MACH_CC51		CC51			3859 -fxi_c210		MACH_FXI_C210		FXI_C210		3860 -msm8627_cdp		MACH_MSM8627_CDP	MSM8627_CDP		3861 -msm8627_mtp		MACH_MSM8627_MTP	MSM8627_MTP		3862  armadillo800eva		MACH_ARMADILLO800EVA	ARMADILLO800EVA		3863 -primou			MACH_PRIMOU		PRIMOU			3864 -primoc			MACH_PRIMOC		PRIMOC			3865 -primoct			MACH_PRIMOCT		PRIMOCT			3866 -a9500			MACH_A9500		A9500			3867 -pluto			MACH_PLUTO		PLUTO			3869 -acfx100			MACH_ACFX100		ACFX100			3870 -msm8625_rumi3		MACH_MSM8625_RUMI3	MSM8625_RUMI3		3871 -valente			MACH_VALENTE		VALENTE			3872 -crfs_rfeye		MACH_CRFS_RFEYE		CRFS_RFEYE		3873 -rfeye			MACH_RFEYE		RFEYE			3874 -phidget_sbc3		MACH_PHIDGET_SBC3	PHIDGET_SBC3		3875 -tcw_mika		MACH_TCW_MIKA		TCW_MIKA		3876 -imx28_egf		MACH_IMX28_EGF		IMX28_EGF		3877 -valente_wx		MACH_VALENTE_WX		VALENTE_WX		3878 -huangshans		MACH_HUANGSHANS		HUANGSHANS		3879 -bosphorus1		MACH_BOSPHORUS1		BOSPHORUS1		3880 -prima			MACH_PRIMA		PRIMA			3881 -evita_ulk		MACH_EVITA_ULK		EVITA_ULK		3884 -merisc600		MACH_MERISC600		MERISC600		3885 -dolak			MACH_DOLAK		DOLAK			3886 -sbc53			MACH_SBC53		SBC53			3887 -elite_ulk		MACH_ELITE_ULK		ELITE_ULK		3888 -pov2			MACH_POV2		POV2			3889 -ipod_touch_2g		MACH_IPOD_TOUCH_2G	IPOD_TOUCH_2G		3890 -da850_pqab		MACH_DA850_PQAB		DA850_PQAB		3891 -fermi			MACH_FERMI		FERMI			3892 -ccardwmx28		MACH_CCARDWMX28		CCARDWMX28		3893 -ccardmx28		MACH_CCARDMX28		CCARDMX28		3894 -fs20_fcm2050		MACH_FS20_FCM2050	FS20_FCM2050		3895 -kinetis			MACH_KINETIS		KINETIS			3896 -kai			MACH_KAI		KAI			3897 -bcthb2			MACH_BCTHB2		BCTHB2			3898 -inels3_cu		MACH_INELS3_CU		INELS3_CU		3899 -da850_apollo		MACH_DA850_APOLLO	DA850_APOLLO		3901 -tracnas			MACH_TRACNAS		TRACNAS			3902 -mityarm335x		MACH_MITYARM335X	MITYARM335X		3903 -xcgz7x			MACH_XCGZ7X		XCGZ7X			3904 -cubox			MACH_CUBOX		CUBOX			3905 -terminator		MACH_TERMINATOR		TERMINATOR		3906 -eye03			MACH_EYE03		EYE03			3907 -kota3			MACH_KOTA3		KOTA3			3908 -pscpe			MACH_PSCPE		PSCPE			3910 -akt1100			MACH_AKT1100		AKT1100			3911 -pcaaxl2			MACH_PCAAXL2		PCAAXL2			3912 -primodd_ct		MACH_PRIMODD_CT		PRIMODD_CT		3913 -nsbc			MACH_NSBC		NSBC			3914 -meson2_skt		MACH_MESON2_SKT		MESON2_SKT		3915 -meson2_ref		MACH_MESON2_REF		MESON2_REF		3916 -ccardwmx28js		MACH_CCARDWMX28JS	CCARDWMX28JS		3917 -ccardmx28js		MACH_CCARDMX28JS	CCARDMX28JS		3918 -indico			MACH_INDICO		INDICO			3919 -msm8960dt		MACH_MSM8960DT		MSM8960DT		3920 -primods			MACH_PRIMODS		PRIMODS			3921 -beluga_m1388		MACH_BELUGA_M1388	BELUGA_M1388		3922 -primotd			MACH_PRIMOTD		PRIMOTD			3923 -varan_master		MACH_VARAN_MASTER	VARAN_MASTER		3924 -primodd			MACH_PRIMODD		PRIMODD			3925 -jetduo			MACH_JETDUO		JETDUO			3926  mx53_umobo		MACH_MX53_UMOBO		MX53_UMOBO		3927 -trats			MACH_TRATS		TRATS			3928 -starcraft		MACH_STARCRAFT		STARCRAFT		3929 -qseven_tegra2		MACH_QSEVEN_TEGRA2	QSEVEN_TEGRA2		3930 -lichee_sun4i_devbd	MACH_LICHEE_SUN4I_DEVBD	LICHEE_SUN4I_DEVBD	3931 -movenow			MACH_MOVENOW		MOVENOW			3932 -golf_u			MACH_GOLF_U		GOLF_U			3933 -msm7627a_evb		MACH_MSM7627A_EVB	MSM7627A_EVB		3934 -rambo			MACH_RAMBO		RAMBO			3935 -golfu			MACH_GOLFU		GOLFU			3936 -mango310		MACH_MANGO310		MANGO310		3937 -dns343			MACH_DNS343		DNS343			3938 -var_som_om44		MACH_VAR_SOM_OM44	VAR_SOM_OM44		3939 -naon			MACH_NAON		NAON			3940 -vp4000			MACH_VP4000		VP4000			3941 -impcard			MACH_IMPCARD		IMPCARD			3942 -smoovcam		MACH_SMOOVCAM		SMOOVCAM		3943 -cobham3725		MACH_COBHAM3725		COBHAM3725		3944 -cobham3730		MACH_COBHAM3730		COBHAM3730		3945 -cobham3703		MACH_COBHAM3703		COBHAM3703		3946 -quetzal			MACH_QUETZAL		QUETZAL			3947 -apq8064_cdp		MACH_APQ8064_CDP	APQ8064_CDP		3948 -apq8064_mtp		MACH_APQ8064_MTP	APQ8064_MTP		3949 -apq8064_fluid		MACH_APQ8064_FLUID	APQ8064_FLUID		3950 -apq8064_liquid		MACH_APQ8064_LIQUID	APQ8064_LIQUID		3951 -mango210		MACH_MANGO210		MANGO210		3952 -mango100		MACH_MANGO100		MANGO100		3953 -mango24			MACH_MANGO24		MANGO24			3954 -mango64			MACH_MANGO64		MANGO64			3955 -nsa320			MACH_NSA320		NSA320			3956 -elv_ccu2		MACH_ELV_CCU2		ELV_CCU2		3957 -triton_x00		MACH_TRITON_X00		TRITON_X00		3958 -triton_1500_2000	MACH_TRITON_1500_2000	TRITON_1500_2000	3959 -pogoplugv4		MACH_POGOPLUGV4		POGOPLUGV4		3960 -venus_cl		MACH_VENUS_CL		VENUS_CL		3961 -vulcano_g20		MACH_VULCANO_G20	VULCANO_G20		3962 -sgs_i9100		MACH_SGS_I9100		SGS_I9100		3963 -stsv2			MACH_STSV2		STSV2			3964 -csb1724			MACH_CSB1724		CSB1724			3965 -omapl138_lcdk		MACH_OMAPL138_LCDK	OMAPL138_LCDK		3966 -pvd_mx25		MACH_PVD_MX25		PVD_MX25		3968 -meson6_skt		MACH_MESON6_SKT		MESON6_SKT		3969 -meson6_ref		MACH_MESON6_REF		MESON6_REF		3970 -pxm			MACH_PXM		PXM			3971 -pogoplugv3		MACH_POGOPLUGV3		POGOPLUGV3		3973 -mlp89626		MACH_MLP89626		MLP89626		3974 -iomegahmndce		MACH_IOMEGAHMNDCE	IOMEGAHMNDCE		3975 -pogoplugv3pci		MACH_POGOPLUGV3PCI	POGOPLUGV3PCI		3976 -bntv250			MACH_BNTV250		BNTV250			3977 -mx53_qseven		MACH_MX53_QSEVEN	MX53_QSEVEN		3978 -gtl_it1100		MACH_GTL_IT1100		GTL_IT1100		3979 -mx6q_sabresd		MACH_MX6Q_SABRESD	MX6Q_SABRESD		3980  mt4			MACH_MT4		MT4			3981 -jumbo_d			MACH_JUMBO_D		JUMBO_D			3982 -jumbo_i			MACH_JUMBO_I		JUMBO_I			3983 -fs20_dmp		MACH_FS20_DMP		FS20_DMP		3984 -dns320			MACH_DNS320		DNS320			3985 -mx28bacos		MACH_MX28BACOS		MX28BACOS		3986 -tl80			MACH_TL80		TL80			3987 -polatis_nic_1001	MACH_POLATIS_NIC_1001	POLATIS_NIC_1001	3988 -tely			MACH_TELY		TELY			3989  u8520			MACH_U8520		U8520			3990 -manta			MACH_MANTA		MANTA			3991 -mpq8064_cdp		MACH_MPQ8064_CDP	MPQ8064_CDP		3993 -mpq8064_dtv		MACH_MPQ8064_DTV	MPQ8064_DTV		3995 -dm368som		MACH_DM368SOM		DM368SOM		3996 -gprisb2			MACH_GPRISB2		GPRISB2			3997 -chammid			MACH_CHAMMID		CHAMMID			3998 -seoul2			MACH_SEOUL2		SEOUL2			3999 -omap4_nooktablet	MACH_OMAP4_NOOKTABLET	OMAP4_NOOKTABLET	4000 -aalto			MACH_AALTO		AALTO			4001 -metro			MACH_METRO		METRO			4002 -cydm3730		MACH_CYDM3730		CYDM3730		4003 -tqma53			MACH_TQMA53		TQMA53			4004 -msm7627a_qrd3		MACH_MSM7627A_QRD3	MSM7627A_QRD3		4005 -mx28_canby		MACH_MX28_CANBY		MX28_CANBY		4006 -tiger			MACH_TIGER		TIGER			4007 -pcats_9307_type_a	MACH_PCATS_9307_TYPE_A	PCATS_9307_TYPE_A	4008 -pcats_9307_type_o	MACH_PCATS_9307_TYPE_O	PCATS_9307_TYPE_O	4009 -pcats_9307_type_r	MACH_PCATS_9307_TYPE_R	PCATS_9307_TYPE_R	4010 -streamplug		MACH_STREAMPLUG		STREAMPLUG		4011 -icechicken_dev		MACH_ICECHICKEN_DEV	ICECHICKEN_DEV		4012 -hedgehog		MACH_HEDGEHOG		HEDGEHOG		4013 -yusend_obc		MACH_YUSEND_OBC		YUSEND_OBC		4014 -imxninja		MACH_IMXNINJA		IMXNINJA		4015 -omap4_jarod		MACH_OMAP4_JAROD	OMAP4_JAROD		4016 -eco5_pk			MACH_ECO5_PK		ECO5_PK			4017 -qj2440			MACH_QJ2440		QJ2440			4018 -mx6q_mercury		MACH_MX6Q_MERCURY	MX6Q_MERCURY		4019 -cm6810			MACH_CM6810		CM6810			4020 -omap4_torpedo		MACH_OMAP4_TORPEDO	OMAP4_TORPEDO		4021 -nsa310			MACH_NSA310		NSA310			4022 -tmx536			MACH_TMX536		TMX536			4023 -ktt20			MACH_KTT20		KTT20			4024 -dragonix		MACH_DRAGONIX		DRAGONIX		4025 -lungching		MACH_LUNGCHING		LUNGCHING		4026 -bulogics		MACH_BULOGICS		BULOGICS		4027 -mx535_sx		MACH_MX535_SX		MX535_SX		4028 -ngui3250		MACH_NGUI3250		NGUI3250		4029 -salutec_dac		MACH_SALUTEC_DAC	SALUTEC_DAC		4030 -loco			MACH_LOCO		LOCO			4031 -ctera_plug_usi		MACH_CTERA_PLUG_USI	CTERA_PLUG_USI		4032 -scepter			MACH_SCEPTER		SCEPTER			4033 -sga			MACH_SGA		SGA			4034 -p_81_j5			MACH_P_81_J5		P_81_J5			4035 -p_81_o4			MACH_P_81_O4		P_81_O4			4036 -msm8625_surf		MACH_MSM8625_SURF	MSM8625_SURF		4037 -carallon_shark		MACH_CARALLON_SHARK	CARALLON_SHARK		4038 -ordog			MACH_ORDOG		ORDOG			4040 -puente_io		MACH_PUENTE_IO		PUENTE_IO		4041 -msm8625_evb		MACH_MSM8625_EVB	MSM8625_EVB		4042 -ev_am1707		MACH_EV_AM1707		EV_AM1707		4043 -ev_am1707e2		MACH_EV_AM1707E2	EV_AM1707E2		4044 -ev_am3517e2		MACH_EV_AM3517E2	EV_AM3517E2		4045 -calabria		MACH_CALABRIA		CALABRIA		4046 -ev_imx287		MACH_EV_IMX287		EV_IMX287		4047 -erau			MACH_ERAU		ERAU			4048 -sichuan			MACH_SICHUAN		SICHUAN			4049 -davinci_da850		MACH_DAVINCI_DA850	DAVINCI_DA850		4051 -omap138_trunarc		MACH_OMAP138_TRUNARC	OMAP138_TRUNARC		4052 -bcm4761			MACH_BCM4761		BCM4761			4053 -picasso_e2		MACH_PICASSO_E2		PICASSO_E2		4054 -picasso_mf		MACH_PICASSO_MF		PICASSO_MF		4055 -miro			MACH_MIRO		MIRO			4056 -at91sam9g20ewon3	MACH_AT91SAM9G20EWON3	AT91SAM9G20EWON3	4057 -yoyo			MACH_YOYO		YOYO			4058 -windjkl			MACH_WINDJKL		WINDJKL			4059 -monarudo		MACH_MONARUDO		MONARUDO		4060 -batan			MACH_BATAN		BATAN			4061 -tadao			MACH_TADAO		TADAO			4062 -baso			MACH_BASO		BASO			4063 -mahon			MACH_MAHON		MAHON			4064 -villec2			MACH_VILLEC2		VILLEC2			4065 -asi1230			MACH_ASI1230		ASI1230			4066 -alaska			MACH_ALASKA		ALASKA			4067 -swarco_shdsl2		MACH_SWARCO_SHDSL2	SWARCO_SHDSL2		4068 -oxrtu			MACH_OXRTU		OXRTU			4069 -omap5_panda		MACH_OMAP5_PANDA	OMAP5_PANDA		4070 -c8000			MACH_C8000		C8000			4072 -bje_display3_5		MACH_BJE_DISPLAY3_5	BJE_DISPLAY3_5		4073 -picomod7		MACH_PICOMOD7		PICOMOD7		4074 -picocom5		MACH_PICOCOM5		PICOCOM5		4075 -qblissa8		MACH_QBLISSA8		QBLISSA8		4076 -armstonea8		MACH_ARMSTONEA8		ARMSTONEA8		4077 -netdcu14		MACH_NETDCU14		NETDCU14		4078 -at91sam9x5_epiphan	MACH_AT91SAM9X5_EPIPHAN	AT91SAM9X5_EPIPHAN	4079 -p2u			MACH_P2U		P2U			4080 -doris			MACH_DORIS		DORIS			4081 -j49			MACH_J49		J49			4082 -vdss2e			MACH_VDSS2E		VDSS2E			4083 -vc300			MACH_VC300		VC300			4084 -ns115_pad_test		MACH_NS115_PAD_TEST	NS115_PAD_TEST		4085 -ns115_pad_ref		MACH_NS115_PAD_REF	NS115_PAD_REF		4086 -ns115_phone_test	MACH_NS115_PHONE_TEST	NS115_PHONE_TEST	4087 -ns115_phone_ref		MACH_NS115_PHONE_REF	NS115_PHONE_REF		4088 -golfc			MACH_GOLFC		GOLFC			4089 -xerox_olympus		MACH_XEROX_OLYMPUS	XEROX_OLYMPUS		4090 -mx6sl_arm2		MACH_MX6SL_ARM2		MX6SL_ARM2		4091 -csb1701_csb1726		MACH_CSB1701_CSB1726	CSB1701_CSB1726		4092 -at91sam9xeek		MACH_AT91SAM9XEEK	AT91SAM9XEEK		4093 -ebv210			MACH_EBV210		EBV210			4094 -msm7627a_qrd7		MACH_MSM7627A_QRD7	MSM7627A_QRD7		4095 -svthin			MACH_SVTHIN		SVTHIN			4096 -duovero			MACH_DUOVERO		DUOVERO			4097  chupacabra		MACH_CHUPACABRA		CHUPACABRA		4098  scorpion		MACH_SCORPION		SCORPION		4099  davinci_he_hmi10	MACH_DAVINCI_HE_HMI10	DAVINCI_HE_HMI10	4100 @@ -1157,7 +580,6 @@ tam335x			MACH_TAM335X		TAM335X			4116  grouper			MACH_GROUPER		GROUPER			4117  mpcsa21_9g20		MACH_MPCSA21_9G20	MPCSA21_9G20		4118  m6u_cpu			MACH_M6U_CPU		M6U_CPU			4119 -davinci_dp10		MACH_DAVINCI_DP10	DAVINCI_DP10		4120  ginkgo			MACH_GINKGO		GINKGO			4121  cgt_qmx6		MACH_CGT_QMX6		CGT_QMX6		4122  profpga			MACH_PROFPGA		PROFPGA			4123 @@ -1204,3 +626,384 @@ baileys			MACH_BAILEYS		BAILEYS			4169  familybox		MACH_FAMILYBOX		FAMILYBOX		4170  ensemble_mx35		MACH_ENSEMBLE_MX35	ENSEMBLE_MX35		4171  sc_sps_1		MACH_SC_SPS_1		SC_SPS_1		4172 +ucsimply_sam9260	MACH_UCSIMPLY_SAM9260	UCSIMPLY_SAM9260	4173 +unicorn			MACH_UNICORN		UNICORN			4174 +m9g45a			MACH_M9G45A		M9G45A			4175 +mtwebif			MACH_MTWEBIF		MTWEBIF			4176 +playstone		MACH_PLAYSTONE		PLAYSTONE		4177 +chelsea			MACH_CHELSEA		CHELSEA			4178 +bayern			MACH_BAYERN		BAYERN			4179 +mitwo			MACH_MITWO		MITWO			4180 +mx25_noah		MACH_MX25_NOAH		MX25_NOAH		4181 +stm_b2020		MACH_STM_B2020		STM_B2020		4182 +annax_src		MACH_ANNAX_SRC		ANNAX_SRC		4183 +ionics_stratus		MACH_IONICS_STRATUS	IONICS_STRATUS		4184 +hugo			MACH_HUGO		HUGO			4185 +em300			MACH_EM300		EM300			4186 +mmp3_qseven		MACH_MMP3_QSEVEN	MMP3_QSEVEN		4187 +bosphorus2		MACH_BOSPHORUS2		BOSPHORUS2		4188 +tt2200			MACH_TT2200		TT2200			4189 +ocelot3			MACH_OCELOT3		OCELOT3			4190 +tek_cobra		MACH_TEK_COBRA		TEK_COBRA		4191 +protou			MACH_PROTOU		PROTOU			4192 +msm8625_evt		MACH_MSM8625_EVT	MSM8625_EVT		4193 +mx53_sellwood		MACH_MX53_SELLWOOD	MX53_SELLWOOD		4194 +somiq_am35		MACH_SOMIQ_AM35		SOMIQ_AM35		4195 +somiq_am37		MACH_SOMIQ_AM37		SOMIQ_AM37		4196 +k2_plc_cl		MACH_K2_PLC_CL		K2_PLC_CL		4197 +tc2			MACH_TC2		TC2			4198 +dulex_j			MACH_DULEX_J		DULEX_J			4199 +stm_b2044		MACH_STM_B2044		STM_B2044		4200 +deluxe_j		MACH_DELUXE_J		DELUXE_J		4201 +mango2443		MACH_MANGO2443		MANGO2443		4202 +cp2dcg			MACH_CP2DCG		CP2DCG			4203 +cp2dtg			MACH_CP2DTG		CP2DTG			4204 +cp2dug			MACH_CP2DUG		CP2DUG			4205 +var_som_am33		MACH_VAR_SOM_AM33	VAR_SOM_AM33		4206 +pepper			MACH_PEPPER		PEPPER			4207 +mango2450		MACH_MANGO2450		MANGO2450		4208 +valente_wx_c9		MACH_VALENTE_WX_C9	VALENTE_WX_C9		4209 +minitv			MACH_MINITV		MINITV			4210 +u8540			MACH_U8540		U8540			4211 +iv_atlas_i_z7e		MACH_IV_ATLAS_I_Z7E	IV_ATLAS_I_Z7E		4212 +mach_type_sky		MACH_MACH_TYPE_SKY	MACH_TYPE_SKY		4214 +bluesky			MACH_BLUESKY		BLUESKY			4215 +ngrouter		MACH_NGROUTER		NGROUTER		4216 +mx53_denetim		MACH_MX53_DENETIM	MX53_DENETIM		4217 +opal			MACH_OPAL		OPAL			4218 +gnet_us3gref		MACH_GNET_US3GREF	GNET_US3GREF		4219 +gnet_nc3g		MACH_GNET_NC3G		GNET_NC3G		4220 +gnet_ge3g		MACH_GNET_GE3G		GNET_GE3G		4221 +adp2			MACH_ADP2		ADP2			4222 +tqma28			MACH_TQMA28		TQMA28			4223 +kacom3			MACH_KACOM3		KACOM3			4224 +rrhdemo			MACH_RRHDEMO		RRHDEMO			4225 +protodug		MACH_PROTODUG		PROTODUG		4226 +lago			MACH_LAGO		LAGO			4227 +ktt30			MACH_KTT30		KTT30			4228 +ts43xx			MACH_TS43XX		TS43XX			4229 +mx6q_denso		MACH_MX6Q_DENSO		MX6Q_DENSO		4230 +comsat_gsmumts8		MACH_COMSAT_GSMUMTS8	COMSAT_GSMUMTS8		4231 +dreamx			MACH_DREAMX		DREAMX			4232 +thunderstonem		MACH_THUNDERSTONEM	THUNDERSTONEM		4233 +yoyopad			MACH_YOYOPAD		YOYOPAD			4234 +yoyopatient		MACH_YOYOPATIENT	YOYOPATIENT		4235 +a10l			MACH_A10L		A10L			4236 +mq60			MACH_MQ60		MQ60			4237 +linkstation_lsql	MACH_LINKSTATION_LSQL	LINKSTATION_LSQL	4238 +am3703gateway		MACH_AM3703GATEWAY	AM3703GATEWAY		4239 +accipiter		MACH_ACCIPITER		ACCIPITER		4240 +magnidug		MACH_MAGNIDUG		MAGNIDUG		4242 +hydra			MACH_HYDRA		HYDRA			4243 +sun3i			MACH_SUN3I		SUN3I			4244 +stm_b2078		MACH_STM_B2078		STM_B2078		4245 +at91sam9263deskv2	MACH_AT91SAM9263DESKV2	AT91SAM9263DESKV2	4246 +deluxe_r		MACH_DELUXE_R		DELUXE_R		4247 +p_98_v			MACH_P_98_V		P_98_V			4248 +p_98_c			MACH_P_98_C		P_98_C			4249 +davinci_am18xx_omn	MACH_DAVINCI_AM18XX_OMN	DAVINCI_AM18XX_OMN	4250 +socfpga_cyclone5	MACH_SOCFPGA_CYCLONE5	SOCFPGA_CYCLONE5	4251 +cabatuin		MACH_CABATUIN		CABATUIN		4252 +yoyopad_ft		MACH_YOYOPAD_FT		YOYOPAD_FT		4253 +dan2400evb		MACH_DAN2400EVB		DAN2400EVB		4254 +dan3400evb		MACH_DAN3400EVB		DAN3400EVB		4255 +edm_sf_imx6		MACH_EDM_SF_IMX6	EDM_SF_IMX6		4256 +edm_cf_imx6		MACH_EDM_CF_IMX6	EDM_CF_IMX6		4257 +vpos3xx			MACH_VPOS3XX		VPOS3XX			4258 +vulcano_9x5		MACH_VULCANO_9X5	VULCANO_9X5		4259 +spmp8000		MACH_SPMP8000		SPMP8000		4260 +catalina		MACH_CATALINA		CATALINA		4261 +rd88f5181l_fe		MACH_RD88F5181L_FE	RD88F5181L_FE		4262 +mx535_mx		MACH_MX535_MX		MX535_MX		4263 +armadillo840		MACH_ARMADILLO840	ARMADILLO840		4264 +spc9000baseboard	MACH_SPC9000BASEBOARD	SPC9000BASEBOARD	4265 +iris			MACH_IRIS		IRIS			4266 +protodcg		MACH_PROTODCG		PROTODCG		4267 +palmtree		MACH_PALMTREE		PALMTREE		4268 +novena			MACH_NOVENA		NOVENA			4269 +ma_um			MACH_MA_UM		MA_UM			4270 +ma_am			MACH_MA_AM		MA_AM			4271 +ems348			MACH_EMS348		EMS348			4272 +cm_fx6			MACH_CM_FX6		CM_FX6			4273 +arndale			MACH_ARNDALE		ARNDALE			4274 +q5xr5			MACH_Q5XR5		Q5XR5			4275 +willow			MACH_WILLOW		WILLOW			4276 +omap3621_odyv3		MACH_OMAP3621_ODYV3	OMAP3621_ODYV3		4277 +omapl138_presonus	MACH_OMAPL138_PRESONUS	OMAPL138_PRESONUS	4278 +dvf99			MACH_DVF99		DVF99			4279 +impression_j		MACH_IMPRESSION_J	IMPRESSION_J		4280 +qblissa9		MACH_QBLISSA9		QBLISSA9		4281 +robin_heliview10	MACH_ROBIN_HELIVIEW10	ROBIN_HELIVIEW10	4282 +sun7i			MACH_SUN7I		SUN7I			4283 +mx6q_hdmidongle		MACH_MX6Q_HDMIDONGLE	MX6Q_HDMIDONGLE		4284 +mx6_sid2		MACH_MX6_SID2		MX6_SID2		4285 +helios_v3		MACH_HELIOS_V3		HELIOS_V3		4286 +helios_v4		MACH_HELIOS_V4		HELIOS_V4		4287 +q7_imx6			MACH_Q7_IMX6		Q7_IMX6			4288 +odroidx			MACH_ODROIDX		ODROIDX			4289 +robpro			MACH_ROBPRO		ROBPRO			4290 +research59if_mk1	MACH_RESEARCH59IF_MK1	RESEARCH59IF_MK1	4291 +bobsleigh		MACH_BOBSLEIGH		BOBSLEIGH		4292 +dcshgwt3		MACH_DCSHGWT3		DCSHGWT3		4293 +gld1018			MACH_GLD1018		GLD1018			4294 +ev10			MACH_EV10		EV10			4295 +nitrogen6x		MACH_NITROGEN6X		NITROGEN6X		4296 +p_107_bb		MACH_P_107_BB		P_107_BB		4297 +evita_utl		MACH_EVITA_UTL		EVITA_UTL		4298 +falconwing		MACH_FALCONWING		FALCONWING		4299 +dct3			MACH_DCT3		DCT3			4300 +cpx2e_cell		MACH_CPX2E_CELL		CPX2E_CELL		4301 +amiro			MACH_AMIRO		AMIRO			4302 +mx6q_brassboard		MACH_MX6Q_BRASSBOARD	MX6Q_BRASSBOARD		4303 +dalmore			MACH_DALMORE		DALMORE			4304 +omap3_portal7cp		MACH_OMAP3_PORTAL7CP	OMAP3_PORTAL7CP		4305 +tegra_pluto		MACH_TEGRA_PLUTO	TEGRA_PLUTO		4306 +mx6sl_evk		MACH_MX6SL_EVK		MX6SL_EVK		4307 +m7			MACH_M7			M7			4308 +pxm2			MACH_PXM2		PXM2			4309 +haba_knx_lite		MACH_HABA_KNX_LITE	HABA_KNX_LITE		4310 +tai			MACH_TAI		TAI			4311 +prototd			MACH_PROTOTD		PROTOTD			4312 +dst_tonto		MACH_DST_TONTO		DST_TONTO		4313 +draco			MACH_DRACO		DRACO			4314 +dxr2			MACH_DXR2		DXR2			4315 +rut			MACH_RUT		RUT			4316 +am180x_wsc		MACH_AM180X_WSC		AM180X_WSC		4317 +deluxe_u		MACH_DELUXE_U		DELUXE_U		4318 +deluxe_ul		MACH_DELUXE_UL		DELUXE_UL		4319 +at91sam9260medths	MACH_AT91SAM9260MEDTHS	AT91SAM9260MEDTHS	4320 +matrix516		MACH_MATRIX516		MATRIX516		4321 +vid401x			MACH_VID401X		VID401X			4322 +helios_v5		MACH_HELIOS_V5		HELIOS_V5		4323 +playpaq2		MACH_PLAYPAQ2		PLAYPAQ2		4324 +igam			MACH_IGAM		IGAM			4325 +amico_i			MACH_AMICO_I		AMICO_I			4326 +amico_e			MACH_AMICO_E		AMICO_E			4327 +sentient_mm3_ck		MACH_SENTIENT_MM3_CK	SENTIENT_MM3_CK		4328 +smx6			MACH_SMX6		SMX6			4329 +pango			MACH_PANGO		PANGO			4330 +ns115_stick		MACH_NS115_STICK	NS115_STICK		4331 +bctrm3			MACH_BCTRM3		BCTRM3			4332 +doctorws		MACH_DOCTORWS		DOCTORWS		4333 +m2601			MACH_M2601		M2601			4334 +vgg1111			MACH_VGG1111		VGG1111			4337 +countach		MACH_COUNTACH		COUNTACH		4338 +visstrim_sm20		MACH_VISSTRIM_SM20	VISSTRIM_SM20		4339 +a639			MACH_A639		A639			4340 +spacemonkey		MACH_SPACEMONKEY	SPACEMONKEY		4341 +zpdu_stamp		MACH_ZPDU_STAMP		ZPDU_STAMP		4342 +htc_g7_clone		MACH_HTC_G7_CLONE	HTC_G7_CLONE		4343 +ft2080_corvus		MACH_FT2080_CORVUS	FT2080_CORVUS		4344 +fisland			MACH_FISLAND		FISLAND			4345 +zpdu			MACH_ZPDU		ZPDU			4346 +urt			MACH_URT		URT			4347 +conti_ovip		MACH_CONTI_OVIP		CONTI_OVIP		4348 +omapl138_nagra		MACH_OMAPL138_NAGRA	OMAPL138_NAGRA		4349 +da850_at3kp1		MACH_DA850_AT3KP1	DA850_AT3KP1		4350 +da850_at3kp2		MACH_DA850_AT3KP2	DA850_AT3KP2		4351 +surma			MACH_SURMA		SURMA			4352 +stm_b2092		MACH_STM_B2092		STM_B2092		4353 +mx535_ycr		MACH_MX535_YCR		MX535_YCR		4354 +m7_wl			MACH_M7_WL		M7_WL			4355 +m7_u			MACH_M7_U		M7_U			4356 +omap3_stndt_evm		MACH_OMAP3_STNDT_EVM	OMAP3_STNDT_EVM		4357 +m7_wlv			MACH_M7_WLV		M7_WLV			4358 +xam3517			MACH_XAM3517		XAM3517			4359 +a220			MACH_A220		A220			4360 +aclima_odie		MACH_ACLIMA_ODIE	ACLIMA_ODIE		4361 +vibble			MACH_VIBBLE		VIBBLE			4362 +k2_u			MACH_K2_U		K2_U			4363 +mx53_egf		MACH_MX53_EGF		MX53_EGF		4364 +novpek_imx53		MACH_NOVPEK_IMX53	NOVPEK_IMX53		4365 +novpek_imx6x		MACH_NOVPEK_IMX6X	NOVPEK_IMX6X		4366 +mx25_smartbox		MACH_MX25_SMARTBOX	MX25_SMARTBOX		4367 +eicg6410		MACH_EICG6410		EICG6410		4368 +picasso_e3		MACH_PICASSO_E3		PICASSO_E3		4369 +motonavigator		MACH_MOTONAVIGATOR	MOTONAVIGATOR		4370 +varioconnect2		MACH_VARIOCONNECT2	VARIOCONNECT2		4371 +deluxe_tw		MACH_DELUXE_TW		DELUXE_TW		4372 +kore3			MACH_KORE3		KORE3			4374 +mx6s_drs		MACH_MX6S_DRS		MX6S_DRS		4375 +cmimx6			MACH_CMIMX6		CMIMX6			4376 +roth			MACH_ROTH		ROTH			4377 +eq4ux			MACH_EQ4UX		EQ4UX			4378 +x1plus			MACH_X1PLUS		X1PLUS			4379 +modimx27		MACH_MODIMX27		MODIMX27		4380 +videon_hduac		MACH_VIDEON_HDUAC	VIDEON_HDUAC		4381 +blackbird		MACH_BLACKBIRD		BLACKBIRD		4382 +runmaster		MACH_RUNMASTER		RUNMASTER		4383 +ceres			MACH_CERES		CERES			4384 +nad435			MACH_NAD435		NAD435			4385 +ns115_proto_type	MACH_NS115_PROTO_TYPE	NS115_PROTO_TYPE	4386 +fs20_vcc		MACH_FS20_VCC		FS20_VCC		4387 +meson6tv_skt		MACH_MESON6TV_SKT	MESON6TV_SKT		4389 +keystone		MACH_KEYSTONE		KEYSTONE		4390 +pcm052			MACH_PCM052		PCM052			4391 +qrd_skud_prime		MACH_QRD_SKUD_PRIME	QRD_SKUD_PRIME		4393 +guf_santaro		MACH_GUF_SANTARO	GUF_SANTARO		4395 +sheepshead		MACH_SHEEPSHEAD		SHEEPSHEAD		4396 +mx6_iwg15m_mxm		MACH_MX6_IWG15M_MXM	MX6_IWG15M_MXM		4397 +mx6_iwg15m_q7		MACH_MX6_IWG15M_Q7	MX6_IWG15M_Q7		4398 +at91sam9263if8mic	MACH_AT91SAM9263IF8MIC	AT91SAM9263IF8MIC	4399 +marcopolo		MACH_MARCOPOLO		MARCOPOLO		4401 +mx535_sdcr		MACH_MX535_SDCR		MX535_SDCR		4402 +mx53_csb2733		MACH_MX53_CSB2733	MX53_CSB2733		4403 +diva			MACH_DIVA		DIVA			4404 +ncr_7744		MACH_NCR_7744		NCR_7744		4405 +macallan		MACH_MACALLAN		MACALLAN		4406 +wnr3500			MACH_WNR3500		WNR3500			4407 +pgavrf			MACH_PGAVRF		PGAVRF			4408 +helios_v6		MACH_HELIOS_V6		HELIOS_V6		4409 +lcct			MACH_LCCT		LCCT			4410 +csndug			MACH_CSNDUG		CSNDUG			4411 +wandboard_imx6		MACH_WANDBOARD_IMX6	WANDBOARD_IMX6		4412 +omap4_jet		MACH_OMAP4_JET		OMAP4_JET		4413 +tegra_roth		MACH_TEGRA_ROTH		TEGRA_ROTH		4414 +m7dcg			MACH_M7DCG		M7DCG			4415 +m7dug			MACH_M7DUG		M7DUG			4416 +m7dtg			MACH_M7DTG		M7DTG			4417 +ap42x			MACH_AP42X		AP42X			4418 +var_som_mx6		MACH_VAR_SOM_MX6	VAR_SOM_MX6		4419 +pdlu			MACH_PDLU		PDLU			4420 +hydrogen		MACH_HYDROGEN		HYDROGEN		4421 +npa211e			MACH_NPA211E		NPA211E			4422 +arcadia			MACH_ARCADIA		ARCADIA			4423 +arcadia_l		MACH_ARCADIA_L		ARCADIA_L		4424 +msm8930dt		MACH_MSM8930DT		MSM8930DT		4425 +ktam3874		MACH_KTAM3874		KTAM3874		4426 +cec4			MACH_CEC4		CEC4			4427 +ape6evm			MACH_APE6EVM		APE6EVM			4428 +tx6			MACH_TX6		TX6			4429 +cfa10037		MACH_CFA10037		CFA10037		4431 +ezp1000			MACH_EZP1000		EZP1000			4433 +wgr826v			MACH_WGR826V		WGR826V			4434 +exuma			MACH_EXUMA		EXUMA			4435 +fregate			MACH_FREGATE		FREGATE			4436 +osirisimx508		MACH_OSIRISIMX508	OSIRISIMX508		4437 +st_exigo		MACH_ST_EXIGO		ST_EXIGO		4438 +pismo			MACH_PISMO		PISMO			4439 +atc7			MACH_ATC7		ATC7			4440 +nspireclp		MACH_NSPIRECLP		NSPIRECLP		4441 +nspiretp		MACH_NSPIRETP		NSPIRETP		4442 +nspirecx		MACH_NSPIRECX		NSPIRECX		4443 +maya			MACH_MAYA		MAYA			4444 +wecct			MACH_WECCT		WECCT			4445 +m2s			MACH_M2S		M2S			4446 +msm8625q_evbd		MACH_MSM8625Q_EVBD	MSM8625Q_EVBD		4447 +tiny210			MACH_TINY210		TINY210			4448 +g3			MACH_G3			G3			4449 +hurricane		MACH_HURRICANE		HURRICANE		4450 +mx6_pod			MACH_MX6_POD		MX6_POD			4451 +elondcn			MACH_ELONDCN		ELONDCN			4452 +cwmx535			MACH_CWMX535		CWMX535			4453 +m7_wlj			MACH_M7_WLJ		M7_WLJ			4454 +qsp_arm			MACH_QSP_ARM		QSP_ARM			4455 +msm8625q_skud		MACH_MSM8625Q_SKUD	MSM8625Q_SKUD		4456 +htcmondrian		MACH_HTCMONDRIAN	HTCMONDRIAN		4457 +watson_ead		MACH_WATSON_EAD		WATSON_EAD		4458 +mitwoa			MACH_MITWOA		MITWOA			4459 +omap3_wolverine		MACH_OMAP3_WOLVERINE	OMAP3_WOLVERINE		4460 +mapletree		MACH_MAPLETREE		MAPLETREE		4461 +msm8625_fih_sae		MACH_MSM8625_FIH_SAE	MSM8625_FIH_SAE		4462 +epc35			MACH_EPC35		EPC35			4463 +smartrtu		MACH_SMARTRTU		SMARTRTU		4464 +rcm101			MACH_RCM101		RCM101			4465 +amx_imx53_mxx		MACH_AMX_IMX53_MXX	AMX_IMX53_MXX		4466 +acer_a12		MACH_ACER_A12		ACER_A12		4470 +sbc6x			MACH_SBC6X		SBC6X			4471 +u2			MACH_U2			U2			4472 +smdk4270		MACH_SMDK4270		SMDK4270		4473 +priscillag		MACH_PRISCILLAG		PRISCILLAG		4474 +priscillac		MACH_PRISCILLAC		PRISCILLAC		4475 +priscilla		MACH_PRISCILLA		PRISCILLA		4476 +innova_shpu_v2		MACH_INNOVA_SHPU_V2	INNOVA_SHPU_V2		4477 +mach_type_dep2410	MACH_MACH_TYPE_DEP2410	MACH_TYPE_DEP2410	4479 +bctre3			MACH_BCTRE3		BCTRE3			4480 +omap_m100		MACH_OMAP_M100		OMAP_M100		4481 +flo			MACH_FLO		FLO			4482 +nanobone		MACH_NANOBONE		NANOBONE		4483 +stm_b2105		MACH_STM_B2105		STM_B2105		4484 +omap4_bsc_bap_v3	MACH_OMAP4_BSC_BAP_V3	OMAP4_BSC_BAP_V3	4485 +ss1pam			MACH_SS1PAM		SS1PAM			4486 +primominiu		MACH_PRIMOMINIU		PRIMOMINIU		4488 +mrt_35hd_dualnas_e	MACH_MRT_35HD_DUALNAS_E	MRT_35HD_DUALNAS_E	4489 +kiwi			MACH_KIWI		KIWI			4490 +hw90496			MACH_HW90496		HW90496			4491 +mep2440			MACH_MEP2440		MEP2440			4492 +colibri_t30		MACH_COLIBRI_T30	COLIBRI_T30		4493 +cwv1			MACH_CWV1		CWV1			4494 +nsa325			MACH_NSA325		NSA325			4495 +dpxmtc			MACH_DPXMTC		DPXMTC			4497 +tt_stuttgart		MACH_TT_STUTTGART	TT_STUTTGART		4498 +miranda_apcii		MACH_MIRANDA_APCII	MIRANDA_APCII		4499 +mx6q_moderox		MACH_MX6Q_MODEROX	MX6Q_MODEROX		4500 +mudskipper		MACH_MUDSKIPPER		MUDSKIPPER		4501 +urania			MACH_URANIA		URANIA			4502 +stm_b2112		MACH_STM_B2112		STM_B2112		4503 +mx6q_ats_phoenix	MACH_MX6Q_ATS_PHOENIX	MX6Q_ATS_PHOENIX	4505 +stm_b2116		MACH_STM_B2116		STM_B2116		4506 +mythology		MACH_MYTHOLOGY		MYTHOLOGY		4507 +fc360v1			MACH_FC360V1		FC360V1			4508 +gps_sensor		MACH_GPS_SENSOR		GPS_SENSOR		4509 +gazelle			MACH_GAZELLE		GAZELLE			4510 +mpq8064_dma		MACH_MPQ8064_DMA	MPQ8064_DMA		4511 +wems_asd01		MACH_WEMS_ASD01		WEMS_ASD01		4512 +apalis_t30		MACH_APALIS_T30		APALIS_T30		4513 +armstonea9		MACH_ARMSTONEA9		ARMSTONEA9		4515 +omap_blazetablet	MACH_OMAP_BLAZETABLET	OMAP_BLAZETABLET	4516 +ar6mxq			MACH_AR6MXQ		AR6MXQ			4517 +ar6mxs			MACH_AR6MXS		AR6MXS			4518 +gwventana		MACH_GWVENTANA		GWVENTANA		4520 +igep0033		MACH_IGEP0033		IGEP0033		4521 +h52c1_concerto		MACH_H52C1_CONCERTO	H52C1_CONCERTO		4524 +fcmbrd			MACH_FCMBRD		FCMBRD			4525 +pcaaxs1			MACH_PCAAXS1		PCAAXS1			4526 +ls_orca			MACH_LS_ORCA		LS_ORCA			4527 +pcm051lb		MACH_PCM051LB		PCM051LB		4528 +mx6s_lp507_gvci		MACH_MX6S_LP507_GVCI	MX6S_LP507_GVCI		4529 +dido			MACH_DIDO		DIDO			4530 +swarco_itc3_9g20	MACH_SWARCO_ITC3_9G20	SWARCO_ITC3_9G20	4531 +robo_roady		MACH_ROBO_ROADY		ROBO_ROADY		4532 +rskrza1			MACH_RSKRZA1		RSKRZA1			4533 +swarco_sid		MACH_SWARCO_SID		SWARCO_SID		4534 +mx6_iwg15s_sbc		MACH_MX6_IWG15S_SBC	MX6_IWG15S_SBC		4535 +mx6q_camaro		MACH_MX6Q_CAMARO	MX6Q_CAMARO		4536 +hb6mxs			MACH_HB6MXS		HB6MXS			4537 +lager			MACH_LAGER		LAGER			4538 +lp8x4x			MACH_LP8X4X		LP8X4X			4539 +tegratab7		MACH_TEGRATAB7		TEGRATAB7		4540 +andromeda		MACH_ANDROMEDA		ANDROMEDA		4541 +bootes			MACH_BOOTES		BOOTES			4542 +nethmi			MACH_NETHMI		NETHMI			4543 +tegratab		MACH_TEGRATAB		TEGRATAB		4544 +som5_evb		MACH_SOM5_EVB		SOM5_EVB		4545 +venaticorum		MACH_VENATICORUM	VENATICORUM		4546 +stm_b2110		MACH_STM_B2110		STM_B2110		4547 +elux_hathor		MACH_ELUX_HATHOR	ELUX_HATHOR		4548 +helios_v7		MACH_HELIOS_V7		HELIOS_V7		4549 +xc10v1			MACH_XC10V1		XC10V1			4550 +cp2u			MACH_CP2U		CP2U			4551 +iap_f			MACH_IAP_F		IAP_F			4552 +iap_g			MACH_IAP_G		IAP_G			4553 +aae			MACH_AAE		AAE			4554 +pegasus			MACH_PEGASUS		PEGASUS			4555 +cygnus			MACH_CYGNUS		CYGNUS			4556 +centaurus		MACH_CENTAURUS		CENTAURUS		4557 +msm8930_qrd8930		MACH_MSM8930_QRD8930	MSM8930_QRD8930		4558 +quby_tim		MACH_QUBY_TIM		QUBY_TIM		4559 +zedi3250a		MACH_ZEDI3250A		ZEDI3250A		4560 +grus			MACH_GRUS		GRUS			4561 +apollo3			MACH_APOLLO3		APOLLO3			4562 +cowon_r7		MACH_COWON_R7		COWON_R7		4563 +tonga3			MACH_TONGA3		TONGA3			4564 +p535			MACH_P535		P535			4565 +sa3874i			MACH_SA3874I		SA3874I			4566 +mx6_navico_com		MACH_MX6_NAVICO_COM	MX6_NAVICO_COM		4567 +proxmobil2		MACH_PROXMOBIL2		PROXMOBIL2		4568 +ubinux1			MACH_UBINUX1		UBINUX1			4569 +istos			MACH_ISTOS		ISTOS			4570 +benvolio4		MACH_BENVOLIO4		BENVOLIO4		4571 +eco5_bx2		MACH_ECO5_BX2		ECO5_BX2		4572 +eukrea_cpuimx28sd	MACH_EUKREA_CPUIMX28SD	EUKREA_CPUIMX28SD	4573 +domotab			MACH_DOMOTAB		DOMOTAB			4574 +pfla03			MACH_PFLA03		PFLA03			4575 diff --git a/arch/avr32/include/asm/io.h b/arch/avr32/include/asm/io.h index cf60d0a9f17..fc6483f83cc 100644 --- a/arch/avr32/include/asm/io.h +++ b/arch/avr32/include/asm/io.h @@ -165,6 +165,10 @@ BUILDIO_IOPORT(l, u32)  #define readw_be			__raw_readw  #define readl_be			__raw_readl +#define writeb_relaxed			writeb +#define writew_relaxed			writew +#define writel_relaxed			writel +  #define writeb_be			__raw_writeb  #define writew_be			__raw_writew  #define writel_be			__raw_writel diff --git a/arch/c6x/include/asm/irqflags.h b/arch/c6x/include/asm/irqflags.h index cf78e09e18c..2c71d5634ec 100644 --- a/arch/c6x/include/asm/irqflags.h +++ b/arch/c6x/include/asm/irqflags.h @@ -27,7 +27,7 @@ static inline unsigned long arch_local_save_flags(void)  /* set interrupt enabled status */  static inline void arch_local_irq_restore(unsigned long flags)  { -	asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags)); +	asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags) : "memory");  }  /* unconditionally enable interrupts */ diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 77597e5ea60..79521d5499f 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -849,17 +849,6 @@ static palinfo_entry_t palinfo_entries[]={  #define NR_PALINFO_ENTRIES	(int) ARRAY_SIZE(palinfo_entries) -/* - * this array is used to keep track of the proc entries we create. This is - * required in the module mode when we need to remove all entries. The procfs code - * does not do recursion of deletion - * - * Notes: - *	- +1 accounts for the cpuN directory entry in /proc/pal - */ -#define NR_PALINFO_PROC_ENTRIES	(NR_CPUS*(NR_PALINFO_ENTRIES+1)) - -static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES];  static struct proc_dir_entry *palinfo_dir;  /* @@ -971,60 +960,32 @@ palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, voi  static void __cpuinit  create_palinfo_proc_entries(unsigned int cpu)  { -#	define CPUSTR	"cpu%d" -  	pal_func_cpu_u_t f; -	struct proc_dir_entry **pdir;  	struct proc_dir_entry *cpu_dir;  	int j; -	char cpustr[sizeof(CPUSTR)]; - - -	/* -	 * we keep track of created entries in a depth-first order for -	 * cleanup purposes. Each entry is stored into palinfo_proc_entries -	 */ -	sprintf(cpustr,CPUSTR, cpu); +	char cpustr[3+4+1];	/* cpu numbers are up to 4095 on itanic */ +	sprintf(cpustr, "cpu%d", cpu);  	cpu_dir = proc_mkdir(cpustr, palinfo_dir); +	if (!cpu_dir) +		return;  	f.req_cpu = cpu; -	/* -	 * Compute the location to store per cpu entries -	 * We dont store the top level entry in this list, but -	 * remove it finally after removing all cpu entries. -	 */ -	pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)]; -	*pdir++ = cpu_dir;  	for (j=0; j < NR_PALINFO_ENTRIES; j++) {  		f.func_id = j; -		*pdir = create_proc_read_entry( -				palinfo_entries[j].name, 0, cpu_dir, -				palinfo_read_entry, (void *)f.value); -		pdir++; +		create_proc_read_entry( +			palinfo_entries[j].name, 0, cpu_dir, +			palinfo_read_entry, (void *)f.value);  	}  }  static void  remove_palinfo_proc_entries(unsigned int hcpu)  { -	int j; -	struct proc_dir_entry *cpu_dir, **pdir; - -	pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)]; -	cpu_dir = *pdir; -	*pdir++=NULL; -	for (j=0; j < (NR_PALINFO_ENTRIES); j++) { -		if ((*pdir)) { -			remove_proc_entry ((*pdir)->name, cpu_dir); -			*pdir ++= NULL; -		} -	} - -	if (cpu_dir) { -		remove_proc_entry(cpu_dir->name, palinfo_dir); -	} +	char cpustr[3+4+1];	/* cpu numbers are up to 4095 on itanic */ +	sprintf(cpustr, "cpu%d", hcpu); +	remove_proc_subtree(cpustr, palinfo_dir);  }  static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, @@ -1058,6 +1019,8 @@ palinfo_init(void)  	printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);  	palinfo_dir = proc_mkdir("pal", NULL); +	if (!palinfo_dir) +		return -ENOMEM;  	/* Create palinfo dirs in /proc for all online cpus */  	for_each_online_cpu(i) { @@ -1073,22 +1036,8 @@ palinfo_init(void)  static void __exit  palinfo_exit(void)  { -	int i = 0; - -	/* remove all nodes: depth first pass. Could optimize this  */ -	for_each_online_cpu(i) { -		remove_palinfo_proc_entries(i); -	} - -	/* -	 * Remove the top level entry finally -	 */ -	remove_proc_entry(palinfo_dir->name, NULL); - -	/* -	 * Unregister from cpu notifier callbacks -	 */  	unregister_hotcpu_notifier(&palinfo_cpu_notifier); +	remove_proc_subtree("pal", NULL);  }  module_init(palinfo_init); diff --git a/arch/m68k/include/asm/gpio.h b/arch/m68k/include/asm/gpio.h index 4395ffc51fd..8cc83431805 100644 --- a/arch/m68k/include/asm/gpio.h +++ b/arch/m68k/include/asm/gpio.h @@ -86,4 +86,24 @@ static inline int gpio_cansleep(unsigned gpio)  	return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio);  } +static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) +{ +	int err; + +	err = gpio_request(gpio, label); +	if (err) +		return err; + +	if (flags & GPIOF_DIR_IN) +		err = gpio_direction_input(gpio); +	else +		err = gpio_direction_output(gpio, +			(flags & GPIOF_INIT_HIGH) ? 1 : 0); + +	if (err) +		gpio_free(gpio); + +	return err; +} +  #endif diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index cd2e21ff562..51244bf9727 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -18,7 +18,7 @@ config MIPS  	select HAVE_KRETPROBES  	select HAVE_DEBUG_KMEMLEAK  	select ARCH_BINFMT_ELF_RANDOMIZE_PIE -	select HAVE_ARCH_TRANSPARENT_HUGEPAGE +	select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT  	select RTC_LIB if !MACH_LOONGSON  	select GENERIC_ATOMIC64 if !64BIT  	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE @@ -657,7 +657,7 @@ config SNI_RM  	bool "SNI RM200/300/400"  	select FW_ARC if CPU_LITTLE_ENDIAN  	select FW_ARC32 if CPU_LITTLE_ENDIAN -	select SNIPROM if CPU_BIG_ENDIAN +	select FW_SNIPROM if CPU_BIG_ENDIAN  	select ARCH_MAY_HAVE_PC_FDC  	select BOOT_ELF32  	select CEVT_R4K @@ -1144,7 +1144,7 @@ config DEFAULT_SGI_PARTITION  config FW_ARC32  	bool -config SNIPROM +config FW_SNIPROM  	bool  config BOOT_ELF32 @@ -1493,7 +1493,6 @@ config CPU_XLP  	select CPU_SUPPORTS_32BIT_KERNEL  	select CPU_SUPPORTS_64BIT_KERNEL  	select CPU_SUPPORTS_HIGHMEM -	select CPU_HAS_LLSC  	select WEAK_ORDERING  	select WEAK_REORDERING_BEYOND_LLSC  	select CPU_HAS_PREFETCH diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c index ed1949c2950..9aa7d44898e 100644 --- a/arch/mips/bcm63xx/boards/board_bcm963xx.c +++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c @@ -745,10 +745,7 @@ void __init board_prom_init(void)  		strcpy(cfe_version, "unknown");  	printk(KERN_INFO PFX "CFE version: %s\n", cfe_version); -	if (bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET)) { -		printk(KERN_ERR PFX "invalid nvram checksum\n"); -		return; -	} +	bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET);  	board_name = bcm63xx_nvram_get_name();  	/* find board by name */ diff --git a/arch/mips/bcm63xx/nvram.c b/arch/mips/bcm63xx/nvram.c index 62061168083..a4b8864f930 100644 --- a/arch/mips/bcm63xx/nvram.c +++ b/arch/mips/bcm63xx/nvram.c @@ -38,7 +38,7 @@ struct bcm963xx_nvram {  static struct bcm963xx_nvram nvram;  static int mac_addr_used; -int __init bcm63xx_nvram_init(void *addr) +void __init bcm63xx_nvram_init(void *addr)  {  	unsigned int check_len;  	u32 crc, expected_crc; @@ -60,9 +60,8 @@ int __init bcm63xx_nvram_init(void *addr)  	crc = crc32_le(~0, (u8 *)&nvram, check_len);  	if (crc != expected_crc) -		return -EINVAL; - -	return 0; +		pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n", +			expected_crc, crc);  }  u8 *bcm63xx_nvram_get_name(void) diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c index 314231be788..35e18e98beb 100644 --- a/arch/mips/bcm63xx/setup.c +++ b/arch/mips/bcm63xx/setup.c @@ -157,4 +157,4 @@ int __init bcm63xx_register_devices(void)  	return board_register_devices();  } -device_initcall(bcm63xx_register_devices); +arch_initcall(bcm63xx_register_devices); diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index c594a3d4f74..b0baa299f89 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -174,7 +174,10 @@ static int octeon_kexec_prepare(struct kimage *image)  static void octeon_generic_shutdown(void)  { -	int cpu, i; +	int i; +#ifdef CONFIG_SMP +	int cpu; +#endif  	struct cvmx_bootmem_desc *bootmem_desc;  	void *named_block_array_ptr; diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h index 62d6a3b4d3b..4e0b6bc1165 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h @@ -9,10 +9,8 @@   *   * Initialized the local nvram copy from the target address and checks   * its checksum. - * - * Returns 0 on success.   */ -int __init bcm63xx_nvram_init(void *nvram); +void bcm63xx_nvram_init(void *nvram);  /**   * bcm63xx_nvram_get_name() - returns the board name according to nvram diff --git a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h index d9c82841903..193c0912d38 100644 --- a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h @@ -28,11 +28,7 @@  /* #define cpu_has_prefetch	? */  #define cpu_has_mcheck		1  /* #define cpu_has_ejtag	? */ -#ifdef CONFIG_CPU_HAS_LLSC  #define cpu_has_llsc		1 -#else -#define cpu_has_llsc		0 -#endif  /* #define cpu_has_vtag_icache	? */  /* #define cpu_has_dc_aliases	? */  /* #define cpu_has_ic_fills_f_dc ? */ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 12b70c25906..0da44d422f5 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -1166,7 +1166,10 @@ do {									\  	unsigned int __dspctl;						\  									\  	__asm__ __volatile__(						\ +	"	.set push					\n"	\ +	"	.set dsp					\n"	\  	"	rddsp	%0, %x1					\n"	\ +	"	.set pop					\n"	\  	: "=r" (__dspctl)						\  	: "i" (mask));							\  	__dspctl;							\ @@ -1175,30 +1178,198 @@ do {									\  #define wrdsp(val, mask)						\  do {									\  	__asm__ __volatile__(						\ +	"	.set push					\n"	\ +	"	.set dsp					\n"	\  	"	wrdsp	%0, %x1					\n"	\ +	"	.set pop					\n"	\  	:								\  	: "r" (val), "i" (mask));					\  } while (0) -#define mflo0() ({ long mflo0; __asm__("mflo %0, $ac0" : "=r" (mflo0)); mflo0;}) -#define mflo1() ({ long mflo1; __asm__("mflo %0, $ac1" : "=r" (mflo1)); mflo1;}) -#define mflo2() ({ long mflo2; __asm__("mflo %0, $ac2" : "=r" (mflo2)); mflo2;}) -#define mflo3() ({ long mflo3; __asm__("mflo %0, $ac3" : "=r" (mflo3)); mflo3;}) +#define mflo0()								\ +({									\ +	long mflo0;							\ +	__asm__(							\ +	"	.set push					\n"	\ +	"	.set dsp					\n"	\ +	"	mflo %0, $ac0					\n"	\ +	"	.set pop					\n" 	\ +	: "=r" (mflo0)); 						\ +	mflo0;								\ +}) + +#define mflo1()								\ +({									\ +	long mflo1;							\ +	__asm__(							\ +	"	.set push					\n"	\ +	"	.set dsp					\n"	\ +	"	mflo %0, $ac1					\n"	\ +	"	.set pop					\n" 	\ +	: "=r" (mflo1)); 						\ +	mflo1;								\ +}) + +#define mflo2()								\ +({									\ +	long mflo2;							\ +	__asm__(							\ +	"	.set push					\n"	\ +	"	.set dsp					\n"	\ +	"	mflo %0, $ac2					\n"	\ +	"	.set pop					\n" 	\ +	: "=r" (mflo2)); 						\ +	mflo2;								\ +}) -#define mfhi0() ({ long mfhi0; __asm__("mfhi %0, $ac0" : "=r" (mfhi0)); mfhi0;}) -#define mfhi1() ({ long mfhi1; __asm__("mfhi %0, $ac1" : "=r" (mfhi1)); mfhi1;}) -#define mfhi2() ({ long mfhi2; __asm__("mfhi %0, $ac2" : "=r" (mfhi2)); mfhi2;}) -#define mfhi3() ({ long mfhi3; __asm__("mfhi %0, $ac3" : "=r" (mfhi3)); mfhi3;}) +#define mflo3()								\ +({									\ +	long mflo3;							\ +	__asm__(							\ +	"	.set push					\n"	\ +	"	.set dsp					\n"	\ +	"	mflo %0, $ac3					\n"	\ +	"	.set pop					\n" 	\ +	: "=r" (mflo3)); 						\ +	mflo3;								\ +}) -#define mtlo0(x) __asm__("mtlo %0, $ac0" ::"r" (x)) -#define mtlo1(x) __asm__("mtlo %0, $ac1" ::"r" (x)) -#define mtlo2(x) __asm__("mtlo %0, $ac2" ::"r" (x)) -#define mtlo3(x) __asm__("mtlo %0, $ac3" ::"r" (x)) +#define mfhi0()								\ +({									\ +	long mfhi0;							\ +	__asm__(							\ +	"	.set push					\n"	\ +	"	.set dsp					\n"	\ +	"	mfhi %0, $ac0					\n"	\ +	"	.set pop					\n" 	\ +	: "=r" (mfhi0)); 						\ +	mfhi0;								\ +}) -#define mthi0(x) __asm__("mthi %0, $ac0" ::"r" (x)) -#define mthi1(x) __asm__("mthi %0, $ac1" ::"r" (x)) -#define mthi2(x) __asm__("mthi %0, $ac2" ::"r" (x)) -#define mthi3(x) __asm__("mthi %0, $ac3" ::"r" (x)) +#define mfhi1()								\ +({									\ +	long mfhi1;							\ +	__asm__(							\ +	"	.set push					\n"	\ +	"	.set dsp					\n"	\ +	"	mfhi %0, $ac1					\n"	\ +	"	.set pop					\n" 	\ +	: "=r" (mfhi1)); 						\ +	mfhi1;								\ +}) + +#define mfhi2()								\ +({									\ +	long mfhi2;							\ +	__asm__(							\ +	"	.set push					\n"	\ +	"	.set dsp					\n"	\ +	"	mfhi %0, $ac2					\n"	\ +	"	.set pop					\n" 	\ +	: "=r" (mfhi2)); 						\ +	mfhi2;								\ +}) + +#define mfhi3()								\ +({									\ +	long mfhi3;							\ +	__asm__(							\ +	"	.set push					\n"	\ +	"	.set dsp					\n"	\ +	"	mfhi %0, $ac3					\n"	\ +	"	.set pop					\n" 	\ +	: "=r" (mfhi3)); 						\ +	mfhi3;								\ +}) + + +#define mtlo0(x)							\ +({									\ +	__asm__(							\ +	"	.set push					\n"	\ +	"	.set dsp					\n"	\ +	"	mtlo %0, $ac0					\n"	\ +	"	.set pop					\n"	\ +	:								\ +	: "r" (x));							\ +}) + +#define mtlo1(x)							\ +({									\ +	__asm__(							\ +	"	.set push					\n"	\ +	"	.set dsp					\n"	\ +	"	mtlo %0, $ac1					\n"	\ +	"	.set pop					\n"	\ +	:								\ +	: "r" (x));							\ +}) + +#define mtlo2(x)							\ +({									\ +	__asm__(							\ +	"	.set push					\n"	\ +	"	.set dsp					\n"	\ +	"	mtlo %0, $ac2					\n"	\ +	"	.set pop					\n"	\ +	:								\ +	: "r" (x));							\ +}) + +#define mtlo3(x)							\ +({									\ +	__asm__(							\ +	"	.set push					\n"	\ +	"	.set dsp					\n"	\ +	"	mtlo %0, $ac3					\n"	\ +	"	.set pop					\n"	\ +	:								\ +	: "r" (x));							\ +}) + +#define mthi0(x)							\ +({									\ +	__asm__(							\ +	"	.set push					\n"	\ +	"	.set dsp					\n"	\ +	"	mthi %0, $ac0					\n"	\ +	"	.set pop					\n"	\ +	:								\ +	: "r" (x));							\ +}) + +#define mthi1(x)							\ +({									\ +	__asm__(							\ +	"	.set push					\n"	\ +	"	.set dsp					\n"	\ +	"	mthi %0, $ac1					\n"	\ +	"	.set pop					\n"	\ +	:								\ +	: "r" (x));							\ +}) + +#define mthi2(x)							\ +({									\ +	__asm__(							\ +	"	.set push					\n"	\ +	"	.set dsp					\n"	\ +	"	mthi %0, $ac2					\n"	\ +	"	.set pop					\n"	\ +	:								\ +	: "r" (x));							\ +}) + +#define mthi3(x)							\ +({									\ +	__asm__(							\ +	"	.set push					\n"	\ +	"	.set dsp					\n"	\ +	"	mthi %0, $ac3					\n"	\ +	"	.set pop					\n"	\ +	:								\ +	: "r" (x));							\ +})  #else diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h index 197f6367c20..8efe5a9e2c3 100644 --- a/arch/mips/include/asm/signal.h +++ b/arch/mips/include/asm/signal.h @@ -21,6 +21,6 @@  #include <asm/sigcontext.h>  #include <asm/siginfo.h> -#define __ARCH_HAS_ODD_SIGACTION +#define __ARCH_HAS_IRIX_SIGACTION  #endif /* _ASM_SIGNAL_H */ diff --git a/arch/mips/include/uapi/asm/signal.h b/arch/mips/include/uapi/asm/signal.h index d6b18b4d0f3..addb9f556b7 100644 --- a/arch/mips/include/uapi/asm/signal.h +++ b/arch/mips/include/uapi/asm/signal.h @@ -72,6 +72,12 @@ typedef unsigned long old_sigset_t;		/* at least 32 bits */   *   * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single   * Unix names RESETHAND and NODEFER respectively. + * + * SA_RESTORER used to be defined as 0x04000000 but only the O32 ABI ever + * supported its use and no libc was using it, so the entire sa-restorer + * functionality was removed with lmo commit 39bffc12c3580ab for 2.5.48 + * retaining only the SA_RESTORER definition as a reminder to avoid + * accidental reuse of the mask bit.   */  #define SA_ONSTACK	0x08000000  #define SA_RESETHAND	0x80000000 @@ -84,8 +90,6 @@ typedef unsigned long old_sigset_t;		/* at least 32 bits */  #define SA_NOMASK	SA_NODEFER  #define SA_ONESHOT	SA_RESETHAND -#define SA_RESTORER	0x04000000	/* Only for o32 */ -  #define MINSIGSTKSZ    2048  #define SIGSTKSZ       8192 diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index f81d98f6184..de75fb50562 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -100,29 +100,16 @@ obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event_mipsxx.o  obj-$(CONFIG_JUMP_LABEL)	+= jump_label.o  # -# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is safe -# to enable DSP assembler support here even if the MIPS Release 2 CPU we -# are targetting does not support DSP because all code-paths making use of -# it properly check that the running CPU *actually does* support these -# instructions. +# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not +# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches +# here because the compiler may use DSP ASE instructions (such as lwx) in +# code paths where we cannot check that the CPU we are running on supports it. +# Proper abstraction using HAVE_AS_DSP and macros is done in +# arch/mips/include/asm/mipsregs.h.  #  ifeq ($(CONFIG_CPU_MIPSR2), y)  CFLAGS_DSP 			= -DHAVE_AS_DSP -# -# Check if assembler supports DSP ASE -# -ifeq ($(call cc-option-yn,-mdsp), y) -CFLAGS_DSP			+= -mdsp -endif - -# -# Check if assembler supports DSP ASE Rev2 -# -ifeq ($(call cc-option-yn,-mdspr2), y) -CFLAGS_DSP			+= -mdspr2 -endif -  CFLAGS_signal.o			= $(CFLAGS_DSP)  CFLAGS_signal32.o		= $(CFLAGS_DSP)  CFLAGS_process.o		= $(CFLAGS_DSP) diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 6bfccc227a9..5fe66a0c322 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -580,6 +580,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)  		c->tlbsize = 48;  		break;  	case PRID_IMP_VR41XX: +		set_isa(c, MIPS_CPU_ISA_III); +		c->options = R4K_OPTS; +		c->tlbsize = 32;  		switch (c->processor_id & 0xf0) {  		case PRID_REV_VR4111:  			c->cputype = CPU_VR4111; @@ -604,6 +607,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)  				__cpu_name[cpu] = "NEC VR4131";  			} else {  				c->cputype = CPU_VR4133; +				c->options |= MIPS_CPU_LLSC;  				__cpu_name[cpu] = "NEC VR4133";  			}  			break; @@ -613,9 +617,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)  			__cpu_name[cpu] = "NEC Vr41xx";  			break;  		} -		set_isa(c, MIPS_CPU_ISA_III); -		c->options = R4K_OPTS; -		c->tlbsize = 32;  		break;  	case PRID_IMP_R4300:  		c->cputype = CPU_R4300; @@ -1226,10 +1227,8 @@ __cpuinit void cpu_probe(void)  	if (c->options & MIPS_CPU_FPU) {  		c->fpu_id = cpu_get_fpu_id(); -		if (c->isa_level == MIPS_CPU_ISA_M32R1 || -		    c->isa_level == MIPS_CPU_ISA_M32R2 || -		    c->isa_level == MIPS_CPU_ISA_M64R1 || -		    c->isa_level == MIPS_CPU_ISA_M64R2) { +		if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | +				    MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {  			if (c->fpu_id & MIPS_FPIR_3D)  				c->ases |= MIPS_ASE_MIPS3D;  		} diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 8eeee1c860c..db9655f0889 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c @@ -171,7 +171,7 @@ SYSCALL_DEFINE6(32_ipc, u32, call, long, first, long, second, long, third,  		err = compat_sys_shmctl(first, second, compat_ptr(ptr));  		break;  	default: -		err = -EINVAL; +		err = -ENOSYS;  		break;  	} diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index 16586767335..33d067148e6 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S @@ -46,10 +46,9 @@  	PTR_L	a5, PT_R9(sp)  	PTR_L	a6, PT_R10(sp)  	PTR_L	a7, PT_R11(sp) -#else -	PTR_ADDIU	sp, PT_SIZE  #endif -.endm +	PTR_ADDIU	sp, PT_SIZE +	.endm  	.macro RETURN_BACK  	jr ra @@ -68,7 +67,11 @@ NESTED(ftrace_caller, PT_SIZE, ra)  	.globl _mcount  _mcount:  	b	ftrace_stub -	addiu sp,sp,8 +#ifdef CONFIG_32BIT +	 addiu sp,sp,8 +#else +	 nop +#endif  	/* When tracing is activated, it calls ftrace_caller+8 (aka here) */  	lw	t1, function_trace_stop diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 135c4aadccb..7a54f74b781 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -67,7 +67,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)  	if (cpu_has_mips_r) {  		seq_printf(m, "isa\t\t\t:");  		if (cpu_has_mips_1) -			seq_printf(m, "%s", "mips1"); +			seq_printf(m, "%s", " mips1");  		if (cpu_has_mips_2)  			seq_printf(m, "%s", " mips2");  		if (cpu_has_mips_3) diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index a200b5bdbb8..c3abb88170f 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1571,7 +1571,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)  #ifdef CONFIG_64BIT  	status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;  #endif -	if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) +	if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)  		status_set |= ST0_XX;  	if (cpu_has_dsp)  		status_set |= ST0_MX; diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c index 81f1dcfdcab..a64daee740e 100644 --- a/arch/mips/lib/bitops.c +++ b/arch/mips/lib/bitops.c @@ -90,12 +90,12 @@ int __mips_test_and_set_bit(unsigned long nr,  	unsigned bit = nr & SZLONG_MASK;  	unsigned long mask;  	unsigned long flags; -	unsigned long res; +	int res;  	a += nr >> SZLONG_LOG;  	mask = 1UL << bit;  	raw_local_irq_save(flags); -	res = (mask & *a); +	res = (mask & *a) != 0;  	*a |= mask;  	raw_local_irq_restore(flags);  	return res; @@ -116,12 +116,12 @@ int __mips_test_and_set_bit_lock(unsigned long nr,  	unsigned bit = nr & SZLONG_MASK;  	unsigned long mask;  	unsigned long flags; -	unsigned long res; +	int res;  	a += nr >> SZLONG_LOG;  	mask = 1UL << bit;  	raw_local_irq_save(flags); -	res = (mask & *a); +	res = (mask & *a) != 0;  	*a |= mask;  	raw_local_irq_restore(flags);  	return res; @@ -141,12 +141,12 @@ int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)  	unsigned bit = nr & SZLONG_MASK;  	unsigned long mask;  	unsigned long flags; -	unsigned long res; +	int res;  	a += nr >> SZLONG_LOG;  	mask = 1UL << bit;  	raw_local_irq_save(flags); -	res = (mask & *a); +	res = (mask & *a) != 0;  	*a &= ~mask;  	raw_local_irq_restore(flags);  	return res; @@ -166,12 +166,12 @@ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)  	unsigned bit = nr & SZLONG_MASK;  	unsigned long mask;  	unsigned long flags; -	unsigned long res; +	int res;  	a += nr >> SZLONG_LOG;  	mask = 1UL << bit;  	raw_local_irq_save(flags); -	res = (mask & *a); +	res = (mask & *a) != 0;  	*a ^= mask;  	raw_local_irq_restore(flags);  	return res; diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S index 507147aebd4..a6adffbb4e5 100644 --- a/arch/mips/lib/csum_partial.S +++ b/arch/mips/lib/csum_partial.S @@ -270,7 +270,7 @@ LEAF(csum_partial)  #endif  	/* odd buffer alignment? */ -#ifdef CPU_MIPSR2 +#ifdef CONFIG_CPU_MIPSR2  	wsbh	v1, sum  	movn	sum, v1, t7  #else @@ -670,7 +670,7 @@ EXC(	sb	t0, NBYTES-2(dst), .Ls_exc)  	addu	sum, v1  #endif -#ifdef CPU_MIPSR2 +#ifdef CONFIG_CPU_MIPSR2  	wsbh	v1, sum  	movn	sum, v1, odd  #else diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index ecca559b8d7..2078915eacb 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1247,10 +1247,8 @@ static void __cpuinit setup_scache(void)  		return;  	default: -		if (c->isa_level == MIPS_CPU_ISA_M32R1 || -		    c->isa_level == MIPS_CPU_ISA_M32R2 || -		    c->isa_level == MIPS_CPU_ISA_M64R1 || -		    c->isa_level == MIPS_CPU_ISA_M64R2) { +		if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | +				    MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {  #ifdef CONFIG_MIPS_CPU_SCACHE  			if (mips_sc_init ()) {  				scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 93d937b4b1b..df96da7e939 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -98,10 +98,8 @@ static inline int __init mips_sc_probe(void)  	c->scache.flags |= MIPS_CACHE_NOT_PRESENT;  	/* Ignore anything but MIPSxx processors */ -	if (c->isa_level != MIPS_CPU_ISA_M32R1 && -	    c->isa_level != MIPS_CPU_ISA_M32R2 && -	    c->isa_level != MIPS_CPU_ISA_M64R1 && -	    c->isa_level != MIPS_CPU_ISA_M64R2) +	if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | +			      MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)))  		return 0;  	/* Does this MIPS32/MIPS64 CPU have a config2 register? */ diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c index 38a80c83fd6..d1faece21b6 100644 --- a/arch/mips/pci/pci-alchemy.c +++ b/arch/mips/pci/pci-alchemy.c @@ -19,7 +19,7 @@  #include <asm/mach-au1x00/au1000.h>  #include <asm/tlbmisc.h> -#ifdef CONFIG_DEBUG_PCI +#ifdef CONFIG_PCI_DEBUG  #define DBG(x...) printk(KERN_DEBUG x)  #else  #define DBG(x...) do {} while (0) @@ -162,7 +162,7 @@ static int config_access(unsigned char access_type, struct pci_bus *bus,  	if (status & (1 << 29)) {  		*data = 0xffffffff;  		error = -1; -		DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d", +		DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d\n",  		    access_type, bus->number, device);  	} else if ((status >> 28) & 0xf) {  		DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n", diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 256c5bf0adb..04d69c4a5ac 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -304,7 +304,7 @@ syscall_exit_work:  	subi	r12,r12,TI_FLAGS  4:	/* Anything else left to do? */ -	SET_DEFAULT_THREAD_PPR(r3, r9)		/* Set thread.ppr = 3 */ +	SET_DEFAULT_THREAD_PPR(r3, r10)		/* Set thread.ppr = 3 */  	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)  	beq	.ret_from_except_lite @@ -657,7 +657,7 @@ resume_kernel:  	/* Clear _TIF_EMULATE_STACK_STORE flag */  	lis	r11,_TIF_EMULATE_STACK_STORE@h  	addi	r5,r9,TI_FLAGS -	ldarx	r4,0,r5 +0:	ldarx	r4,0,r5  	andc	r4,r4,r11  	stdcx.	r4,0,r5  	bne-	0b diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 59dd545fdde..16e77a81ab4 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -555,10 +555,12 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new)  		new->thread.regs->msr |=  			(MSR_FP | new->thread.fpexc_mode);  	} +#ifdef CONFIG_ALTIVEC  	if (msr & MSR_VEC) {  		do_load_up_transact_altivec(&new->thread);  		new->thread.regs->msr |= MSR_VEC;  	} +#endif  	/* We may as well turn on VSX too since all the state is restored now */  	if (msr & MSR_VSX)  		new->thread.regs->msr |= MSR_VSX; diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 3acb28e245b..95068bf569a 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -866,10 +866,12 @@ static long restore_tm_user_regs(struct pt_regs *regs,  		do_load_up_transact_fpu(¤t->thread);  		regs->msr |= (MSR_FP | current->thread.fpexc_mode);  	} +#ifdef CONFIG_ALTIVEC  	if (msr & MSR_VEC) {  		do_load_up_transact_altivec(¤t->thread);  		regs->msr |= MSR_VEC;  	} +#endif  	return 0;  } diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 995f8543cb5..c1794286098 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -522,10 +522,12 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,  		do_load_up_transact_fpu(¤t->thread);  		regs->msr |= (MSR_FP | current->thread.fpexc_mode);  	} +#ifdef CONFIG_ALTIVEC  	if (msr & MSR_VEC) {  		do_load_up_transact_altivec(¤t->thread);  		regs->msr |= MSR_VEC;  	} +#endif  	return err;  } diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 84dbace657c..2da67e7a16d 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S @@ -309,6 +309,7 @@ _GLOBAL(tm_recheckpoint)  	or	r5, r6, r5			/* Set MSR.FP+.VSX/.VEC */  	mtmsr	r5 +#ifdef CONFIG_ALTIVEC  	/* FP and VEC registers:  These are recheckpointed from thread.fpr[]  	 * and thread.vr[] respectively.  The thread.transact_fpr[] version  	 * is more modern, and will be loaded subsequently by any FPUnavailable @@ -323,6 +324,7 @@ _GLOBAL(tm_recheckpoint)  	REST_32VRS(0, r5, r3)			/* r5 scratch, r3 THREAD ptr */  	ld	r5, THREAD_VRSAVE(r3)  	mtspr	SPRN_VRSAVE, r5 +#endif  dont_restore_vec:  	andi.	r0, r4, MSR_FP diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h index 41cefd43655..33db48a8ce2 100644 --- a/arch/powerpc/kvm/e500.h +++ b/arch/powerpc/kvm/e500.h @@ -26,17 +26,20 @@  #define E500_PID_NUM   3  #define E500_TLB_NUM   2 -#define E500_TLB_VALID 1 -#define E500_TLB_BITMAP 2 +/* entry is mapped somewhere in host TLB */ +#define E500_TLB_VALID		(1 << 0) +/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */ +#define E500_TLB_BITMAP		(1 << 1) +/* TLB1 entry is mapped by host TLB0 */  #define E500_TLB_TLB0		(1 << 2)  struct tlbe_ref { -	pfn_t pfn; -	unsigned int flags; /* E500_TLB_* */ +	pfn_t pfn;		/* valid only for TLB0, except briefly */ +	unsigned int flags;	/* E500_TLB_* */  };  struct tlbe_priv { -	struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */ +	struct tlbe_ref ref;  };  #ifdef CONFIG_KVM_E500V2 @@ -63,17 +66,6 @@ struct kvmppc_vcpu_e500 {  	unsigned int gtlb_nv[E500_TLB_NUM]; -	/* -	 * information associated with each host TLB entry -- -	 * TLB1 only for now.  If/when guest TLB1 entries can be -	 * mapped with host TLB0, this will be used for that too. -	 * -	 * We don't want to use this for guest TLB0 because then we'd -	 * have the overhead of doing the translation again even if -	 * the entry is still in the guest TLB (e.g. we swapped out -	 * and back, and our host TLB entries got evicted). -	 */ -	struct tlbe_ref *tlb_refs[E500_TLB_NUM];  	unsigned int host_tlb1_nv;  	u32 svr; diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index a222edfb9a9..1c6a9d729df 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -193,8 +193,11 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,  	struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;  	/* Don't bother with unmapped entries */ -	if (!(ref->flags & E500_TLB_VALID)) -		return; +	if (!(ref->flags & E500_TLB_VALID)) { +		WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0), +		     "%s: flags %x\n", __func__, ref->flags); +		WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]); +	}  	if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {  		u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; @@ -248,7 +251,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,  					 pfn_t pfn)  {  	ref->pfn = pfn; -	ref->flags = E500_TLB_VALID; +	ref->flags |= E500_TLB_VALID;  	if (tlbe_is_writable(gtlbe))  		kvm_set_pfn_dirty(pfn); @@ -257,6 +260,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,  static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)  {  	if (ref->flags & E500_TLB_VALID) { +		/* FIXME: don't log bogus pfn for TLB1 */  		trace_kvm_booke206_ref_release(ref->pfn, ref->flags);  		ref->flags = 0;  	} @@ -274,36 +278,23 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)  static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)  { -	int tlbsel = 0; -	int i; - -	for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { -		struct tlbe_ref *ref = -			&vcpu_e500->gtlb_priv[tlbsel][i].ref; -		kvmppc_e500_ref_release(ref); -	} -} - -static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500) -{ -	int stlbsel = 1; +	int tlbsel;  	int i; -	kvmppc_e500_tlbil_all(vcpu_e500); - -	for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { -		struct tlbe_ref *ref = -			&vcpu_e500->tlb_refs[stlbsel][i]; -		kvmppc_e500_ref_release(ref); +	for (tlbsel = 0; tlbsel <= 1; tlbsel++) { +		for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { +			struct tlbe_ref *ref = +				&vcpu_e500->gtlb_priv[tlbsel][i].ref; +			kvmppc_e500_ref_release(ref); +		}  	} - -	clear_tlb_privs(vcpu_e500);  }  void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)  {  	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); -	clear_tlb_refs(vcpu_e500); +	kvmppc_e500_tlbil_all(vcpu_e500); +	clear_tlb_privs(vcpu_e500);  	clear_tlb1_bitmap(vcpu_e500);  } @@ -458,8 +449,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,  		gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);  	} -	/* Drop old ref and setup new one. */ -	kvmppc_e500_ref_release(ref);  	kvmppc_e500_ref_setup(ref, gtlbe, pfn);  	kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, @@ -507,14 +496,15 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,  	if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))  		vcpu_e500->host_tlb1_nv = 0; -	vcpu_e500->tlb_refs[1][sesel] = *ref; -	vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; -	vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;  	if (vcpu_e500->h2g_tlb1_rmap[sesel]) { -		unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel]; +		unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;  		vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);  	} -	vcpu_e500->h2g_tlb1_rmap[sesel] = esel; + +	vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; +	vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; +	vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1; +	WARN_ON(!(ref->flags & E500_TLB_VALID));  	return sesel;  } @@ -526,13 +516,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,  		u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,  		struct kvm_book3e_206_tlb_entry *stlbe, int esel)  { -	struct tlbe_ref ref; +	struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;  	int sesel;  	int r; -	ref.flags = 0;  	r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, -				   &ref); +				   ref);  	if (r)  		return r; @@ -544,7 +533,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,  	}  	/* Otherwise map into TLB1 */ -	sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel); +	sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);  	write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);  	return 0; @@ -565,7 +554,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,  	case 0:  		priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; -		/* Triggers after clear_tlb_refs or on initial mapping */ +		/* Triggers after clear_tlb_privs or on initial mapping */  		if (!(priv->ref.flags & E500_TLB_VALID)) {  			kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);  		} else { @@ -665,35 +654,16 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)  		host_tlb_params[0].entries / host_tlb_params[0].ways;  	host_tlb_params[1].sets = 1; -	vcpu_e500->tlb_refs[0] = -		kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries, -			GFP_KERNEL); -	if (!vcpu_e500->tlb_refs[0]) -		goto err; - -	vcpu_e500->tlb_refs[1] = -		kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries, -			GFP_KERNEL); -	if (!vcpu_e500->tlb_refs[1]) -		goto err; -  	vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *  					   host_tlb_params[1].entries,  					   GFP_KERNEL);  	if (!vcpu_e500->h2g_tlb1_rmap) -		goto err; +		return -EINVAL;  	return 0; - -err: -	kfree(vcpu_e500->tlb_refs[0]); -	kfree(vcpu_e500->tlb_refs[1]); -	return -EINVAL;  }  void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)  {  	kfree(vcpu_e500->h2g_tlb1_rmap); -	kfree(vcpu_e500->tlb_refs[0]); -	kfree(vcpu_e500->tlb_refs[1]);  } diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index 1f89d26e65f..2f4baa074b2 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -108,6 +108,8 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)  {  } +static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu); +  void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)  {  	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); @@ -136,8 +138,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)  	mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);  	mtspr(SPRN_GESR, vcpu->arch.shared->esr); -	if (vcpu->arch.oldpir != mfspr(SPRN_PIR)) +	if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || +	    __get_cpu_var(last_vcpu_on_cpu) != vcpu) {  		kvmppc_e500_tlbil_all(vcpu_e500); +		__get_cpu_var(last_vcpu_on_cpu) = vcpu; +	}  	kvmppc_load_guest_fp(vcpu);  } diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 0da39fed355..299731e9036 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -186,7 +186,13 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)  					   (0x1UL << 4), &dummy1, &dummy2);  		if (lpar_rc == H_SUCCESS)  			return i; -		BUG_ON(lpar_rc != H_NOT_FOUND); + +		/* +		 * The test for adjunct partition is performed before the +		 * ANDCOND test.  H_RESOURCE may be returned, so we need to +		 * check for that as well. +		 */ +		BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);  		slot_offset++;  		slot_offset &= 0x7; diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index 27cb32185ce..379d96e2105 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -50,10 +50,6 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr);  #define ioremap_nocache(addr, size)	ioremap(addr, size)  #define ioremap_wc			ioremap_nocache -/* TODO: s390 cannot support io_remap_pfn_range... */ -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) 	       \ -	remap_pfn_range(vma, vaddr, pfn, size, prot) -  static inline void __iomem *ioremap(unsigned long offset, unsigned long size)  {  	return (void __iomem *) offset; diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 4a2930844d4..3cb47cf0253 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -57,6 +57,10 @@ extern unsigned long zero_page_mask;  	 (((unsigned long)(vaddr)) &zero_page_mask))))  #define __HAVE_COLOR_ZERO_PAGE +/* TODO: s390 cannot support io_remap_pfn_range... */ +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) 	       \ +	remap_pfn_range(vma, vaddr, pfn, size, prot) +  #endif /* !__ASSEMBLY__ */  /* @@ -344,6 +348,7 @@ extern unsigned long MODULES_END;  #define _REGION3_ENTRY_CO	0x100	/* change-recording override	    */  /* Bits in the segment table entry */ +#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address	    */  #define _SEGMENT_ENTRY_ORIGIN	~0x7ffUL/* segment table origin		    */  #define _SEGMENT_ENTRY_RO	0x200	/* page protection bit		    */  #define _SEGMENT_ENTRY_INV	0x20	/* invalid segment table entry	    */ @@ -1531,7 +1536,8 @@ extern int s390_enable_sie(void);  /*   * No page table caches to initialise   */ -#define pgtable_cache_init()	do { } while (0) +static inline void pgtable_cache_init(void) { } +static inline void check_pgt_cache(void) { }  #include <asm-generic/pgtable.h> diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index dff631d34b4..466fb338396 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -77,42 +77,69 @@ static size_t copy_in_kernel(size_t count, void __user *to,   * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address   * contains the (negative) exception code.   */ -static __always_inline unsigned long follow_table(struct mm_struct *mm, -						  unsigned long addr, int write) +#ifdef CONFIG_64BIT +static unsigned long follow_table(struct mm_struct *mm, +				  unsigned long address, int write)  { -	pgd_t *pgd; -	pud_t *pud; -	pmd_t *pmd; -	pte_t *ptep; +	unsigned long *table = (unsigned long *)__pa(mm->pgd); -	pgd = pgd_offset(mm, addr); -	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) -		return -0x3aUL; +	switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { +	case _ASCE_TYPE_REGION1: +		table = table + ((address >> 53) & 0x7ff); +		if (unlikely(*table & _REGION_ENTRY_INV)) +			return -0x39UL; +		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); +	case _ASCE_TYPE_REGION2: +		table = table + ((address >> 42) & 0x7ff); +		if (unlikely(*table & _REGION_ENTRY_INV)) +			return -0x3aUL; +		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); +	case _ASCE_TYPE_REGION3: +		table = table + ((address >> 31) & 0x7ff); +		if (unlikely(*table & _REGION_ENTRY_INV)) +			return -0x3bUL; +		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); +	case _ASCE_TYPE_SEGMENT: +		table = table + ((address >> 20) & 0x7ff); +		if (unlikely(*table & _SEGMENT_ENTRY_INV)) +			return -0x10UL; +		if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) { +			if (write && (*table & _SEGMENT_ENTRY_RO)) +				return -0x04UL; +			return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) + +				(address & ~_SEGMENT_ENTRY_ORIGIN_LARGE); +		} +		table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); +	} +	table = table + ((address >> 12) & 0xff); +	if (unlikely(*table & _PAGE_INVALID)) +		return -0x11UL; +	if (write && (*table & _PAGE_RO)) +		return -0x04UL; +	return (*table & PAGE_MASK) + (address & ~PAGE_MASK); +} -	pud = pud_offset(pgd, addr); -	if (pud_none(*pud) || unlikely(pud_bad(*pud))) -		return -0x3bUL; +#else /* CONFIG_64BIT */ -	pmd = pmd_offset(pud, addr); -	if (pmd_none(*pmd)) -		return -0x10UL; -	if (pmd_large(*pmd)) { -		if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO)) -			return -0x04UL; -		return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK); -	} -	if (unlikely(pmd_bad(*pmd))) -		return -0x10UL; +static unsigned long follow_table(struct mm_struct *mm, +				  unsigned long address, int write) +{ +	unsigned long *table = (unsigned long *)__pa(mm->pgd); -	ptep = pte_offset_map(pmd, addr); -	if (!pte_present(*ptep)) +	table = table + ((address >> 20) & 0x7ff); +	if (unlikely(*table & _SEGMENT_ENTRY_INV)) +		return -0x10UL; +	table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); +	table = table + ((address >> 12) & 0xff); +	if (unlikely(*table & _PAGE_INVALID))  		return -0x11UL; -	if (write && (!pte_write(*ptep) || !pte_dirty(*ptep))) +	if (write && (*table & _PAGE_RO))  		return -0x04UL; - -	return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK); +	return (*table & PAGE_MASK) + (address & ~PAGE_MASK);  } +#endif /* CONFIG_64BIT */ +  static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,  					     size_t n, int write_user)  { @@ -197,7 +224,7 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from)  static size_t clear_user_pt(size_t n, void __user *to)  { -	void *zpage = &empty_zero_page; +	void *zpage = (void *) empty_zero_page;  	long done, size, ret;  	done = 0; diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index e26d430ce2f..ff18e3cfb6b 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -2,11 +2,16 @@  generic-y += clkdev.h +generic-y += cputime.h  generic-y += div64.h +generic-y += emergency-restart.h  generic-y += exec.h  generic-y += local64.h +generic-y += mutex.h  generic-y += irq_regs.h  generic-y += local.h  generic-y += module.h +generic-y += serial.h  generic-y += trace_clock.h +generic-y += types.h  generic-y += word-at-a-time.h diff --git a/arch/sparc/include/asm/cputime.h b/arch/sparc/include/asm/cputime.h deleted file mode 100644 index 1a642b81e01..00000000000 --- a/arch/sparc/include/asm/cputime.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __SPARC_CPUTIME_H -#define __SPARC_CPUTIME_H - -#include <asm-generic/cputime.h> - -#endif /* __SPARC_CPUTIME_H */ diff --git a/arch/sparc/include/asm/emergency-restart.h b/arch/sparc/include/asm/emergency-restart.h deleted file mode 100644 index 108d8c48e42..00000000000 --- a/arch/sparc/include/asm/emergency-restart.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_EMERGENCY_RESTART_H -#define _ASM_EMERGENCY_RESTART_H - -#include <asm-generic/emergency-restart.h> - -#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/arch/sparc/include/asm/mutex.h b/arch/sparc/include/asm/mutex.h deleted file mode 100644 index 458c1f7fbc1..00000000000 --- a/arch/sparc/include/asm/mutex.h +++ /dev/null @@ -1,9 +0,0 @@ -/* - * Pull in the generic implementation for the mutex fastpath. - * - * TODO: implement optimized primitives instead, or leave the generic - * implementation in place, or pick the atomic_xchg() based generic - * implementation. (see asm-generic/mutex-xchg.h for details) - */ - -#include <asm-generic/mutex-dec.h> diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 08fcce90316..7619f2f792a 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -915,6 +915,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,  	return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);  } +#include <asm/tlbflush.h>  #include <asm-generic/pgtable.h>  /* We provide our own get_unmapped_area to cope with VA holes and diff --git a/arch/sparc/include/asm/serial.h b/arch/sparc/include/asm/serial.h deleted file mode 100644 index f90d61c2805..00000000000 --- a/arch/sparc/include/asm/serial.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __SPARC_SERIAL_H -#define __SPARC_SERIAL_H - -#define BASE_BAUD ( 1843200 / 16 ) - -#endif /* __SPARC_SERIAL_H */ diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h index b73da3c5f10..3c8917f054d 100644 --- a/arch/sparc/include/asm/smp_32.h +++ b/arch/sparc/include/asm/smp_32.h @@ -36,7 +36,6 @@ typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,  		       unsigned long, unsigned long);  void cpu_panic(void); -extern void smp4m_irq_rotate(int cpu);  /*   *	General functions that each host system must provide. @@ -46,7 +45,6 @@ void sun4m_init_smp(void);  void sun4d_init_smp(void);  void smp_callin(void); -void smp_boot_cpus(void);  void smp_store_cpu_info(int);  void smp_resched_interrupt(void); @@ -107,9 +105,6 @@ extern int hard_smp_processor_id(void);  #define raw_smp_processor_id()		(current_thread_info()->cpu) -#define prof_multiplier(__cpu)		cpu_data(__cpu).multiplier -#define prof_counter(__cpu)		cpu_data(__cpu).counter -  void smp_setup_cpu_possible_map(void);  #endif /* !(__ASSEMBLY__) */ diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h index cad36f56fa0..c7de3323819 100644 --- a/arch/sparc/include/asm/switch_to_64.h +++ b/arch/sparc/include/asm/switch_to_64.h @@ -18,8 +18,7 @@ do {						\  	 * and 2 stores in this critical code path.  -DaveM  	 */  #define switch_to(prev, next, last)					\ -do {	flush_tlb_pending();						\ -	save_and_clear_fpu();						\ +do {	save_and_clear_fpu();						\  	/* If you are tempted to conditionalize the following */	\  	/* so that ASI is only written if it changes, think again. */	\  	__asm__ __volatile__("wr %%g0, %0, %%asi"			\ diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h index 2ef46349415..f0d6a9700f4 100644 --- a/arch/sparc/include/asm/tlbflush_64.h +++ b/arch/sparc/include/asm/tlbflush_64.h @@ -11,24 +11,40 @@  struct tlb_batch {  	struct mm_struct *mm;  	unsigned long tlb_nr; +	unsigned long active;  	unsigned long vaddrs[TLB_BATCH_NR];  };  extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);  extern void flush_tsb_user(struct tlb_batch *tb); +extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);  /* TLB flush operations. */ -extern void flush_tlb_pending(void); +static inline void flush_tlb_mm(struct mm_struct *mm) +{ +} + +static inline void flush_tlb_page(struct vm_area_struct *vma, +				  unsigned long vmaddr) +{ +} + +static inline void flush_tlb_range(struct vm_area_struct *vma, +				   unsigned long start, unsigned long end) +{ +} + +#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE -#define flush_tlb_range(vma,start,end)	\ -	do { (void)(start); flush_tlb_pending(); } while (0) -#define flush_tlb_page(vma,addr)	flush_tlb_pending() -#define flush_tlb_mm(mm)		flush_tlb_pending() +extern void flush_tlb_pending(void); +extern void arch_enter_lazy_mmu_mode(void); +extern void arch_leave_lazy_mmu_mode(void); +#define arch_flush_lazy_mmu_mode()      do {} while (0)  /* Local cpu only.  */  extern void __flush_tlb_all(void); - +extern void __flush_tlb_page(unsigned long context, unsigned long vaddr);  extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);  #ifndef CONFIG_SMP @@ -38,15 +54,24 @@ do {	flush_tsb_kernel_range(start,end); \  	__flush_tlb_kernel_range(start,end); \  } while (0) +static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) +{ +	__flush_tlb_page(CTX_HWBITS(mm->context), vaddr); +} +  #else /* CONFIG_SMP */  extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);  #define flush_tlb_kernel_range(start, end) \  do {	flush_tsb_kernel_range(start,end); \  	smp_flush_tlb_kernel_range(start, end); \  } while (0) +#define global_flush_tlb_page(mm, vaddr) \ +	smp_flush_tlb_page(mm, vaddr) +  #endif /* ! CONFIG_SMP */  #endif /* _SPARC64_TLBFLUSH_H */ diff --git a/arch/sparc/include/uapi/asm/Kbuild b/arch/sparc/include/uapi/asm/Kbuild index ce175aff71b..b5843ee09fb 100644 --- a/arch/sparc/include/uapi/asm/Kbuild +++ b/arch/sparc/include/uapi/asm/Kbuild @@ -44,7 +44,6 @@ header-y += swab.h  header-y += termbits.h  header-y += termios.h  header-y += traps.h -header-y += types.h  header-y += uctx.h  header-y += unistd.h  header-y += utrap.h diff --git a/arch/sparc/include/uapi/asm/types.h b/arch/sparc/include/uapi/asm/types.h deleted file mode 100644 index 383d156cde9..00000000000 --- a/arch/sparc/include/uapi/asm/types.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef _SPARC_TYPES_H -#define _SPARC_TYPES_H -/* - * This file is never included by application software unless - * explicitly requested (e.g., via linux/types.h) in which case the - * application is Linux specific so (user-) name space pollution is - * not a major issue.  However, for interoperability, libraries still - * need to be careful to avoid a name clashes. - */ - -#if defined(__sparc__) - -#include <asm-generic/int-ll64.h> - -#endif /* defined(__sparc__) */ - -#endif /* defined(_SPARC_TYPES_H) */ diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 537eb66abd0..ca64d2a86ec 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -849,7 +849,7 @@ void smp_tsb_sync(struct mm_struct *mm)  }  extern unsigned long xcall_flush_tlb_mm; -extern unsigned long xcall_flush_tlb_pending; +extern unsigned long xcall_flush_tlb_page;  extern unsigned long xcall_flush_tlb_kernel_range;  extern unsigned long xcall_fetch_glob_regs;  extern unsigned long xcall_fetch_glob_pmu; @@ -1074,23 +1074,56 @@ local_flush_and_out:  	put_cpu();  } +struct tlb_pending_info { +	unsigned long ctx; +	unsigned long nr; +	unsigned long *vaddrs; +}; + +static void tlb_pending_func(void *info) +{ +	struct tlb_pending_info *t = info; + +	__flush_tlb_pending(t->ctx, t->nr, t->vaddrs); +} +  void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)  {  	u32 ctx = CTX_HWBITS(mm->context); +	struct tlb_pending_info info;  	int cpu = get_cpu(); +	info.ctx = ctx; +	info.nr = nr; +	info.vaddrs = vaddrs; +  	if (mm == current->mm && atomic_read(&mm->mm_users) == 1)  		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));  	else -		smp_cross_call_masked(&xcall_flush_tlb_pending, -				      ctx, nr, (unsigned long) vaddrs, -				      mm_cpumask(mm)); +		smp_call_function_many(mm_cpumask(mm), tlb_pending_func, +				       &info, 1);  	__flush_tlb_pending(ctx, nr, vaddrs);  	put_cpu();  } +void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) +{ +	unsigned long context = CTX_HWBITS(mm->context); +	int cpu = get_cpu(); + +	if (mm == current->mm && atomic_read(&mm->mm_users) == 1) +		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); +	else +		smp_cross_call_masked(&xcall_flush_tlb_page, +				      context, vaddr, 0, +				      mm_cpumask(mm)); +	__flush_tlb_page(context, vaddr); + +	put_cpu(); +} +  void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)  {  	start &= PAGE_MASK; diff --git a/arch/sparc/lib/bitext.c b/arch/sparc/lib/bitext.c index 48d00e72ce1..8ec4e9c0251 100644 --- a/arch/sparc/lib/bitext.c +++ b/arch/sparc/lib/bitext.c @@ -119,11 +119,7 @@ void bit_map_clear(struct bit_map *t, int offset, int len)  void bit_map_init(struct bit_map *t, unsigned long *map, int size)  { - -	if ((size & 07) != 0) -		BUG(); -	memset(map, 0, size>>3); - +	bitmap_zero(map, size);  	memset(t, 0, sizeof *t);  	spin_lock_init(&t->lock);  	t->map = map; diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 0f4f7191fbb..28f96f27c76 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -34,7 +34,7 @@  #define IOMMU_RNGE	IOMMU_RNGE_256MB  #define IOMMU_START	0xF0000000  #define IOMMU_WINSIZE	(256*1024*1024U) -#define IOMMU_NPTES	(IOMMU_WINSIZE/PAGE_SIZE)	/* 64K PTEs, 265KB */ +#define IOMMU_NPTES	(IOMMU_WINSIZE/PAGE_SIZE)	/* 64K PTEs, 256KB */  #define IOMMU_ORDER	6				/* 4096 * (1<<6) */  /* srmmu.c */ diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index c38bb72e3e8..036c2797dec 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -280,7 +280,9 @@ static void __init srmmu_nocache_init(void)  		SRMMU_NOCACHE_ALIGN_MAX, 0UL);  	memset(srmmu_nocache_pool, 0, srmmu_nocache_size); -	srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL); +	srmmu_nocache_bitmap = +		__alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long), +				SMP_CACHE_BYTES, 0UL);  	bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);  	srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index ba6ae7ffdc2..272aa4f7657 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);  void flush_tlb_pending(void)  {  	struct tlb_batch *tb = &get_cpu_var(tlb_batch); +	struct mm_struct *mm = tb->mm; -	if (tb->tlb_nr) { -		flush_tsb_user(tb); +	if (!tb->tlb_nr) +		goto out; -		if (CTX_VALID(tb->mm->context)) { +	flush_tsb_user(tb); + +	if (CTX_VALID(mm->context)) { +		if (tb->tlb_nr == 1) { +			global_flush_tlb_page(mm, tb->vaddrs[0]); +		} else {  #ifdef CONFIG_SMP  			smp_flush_tlb_pending(tb->mm, tb->tlb_nr,  					      &tb->vaddrs[0]); @@ -37,12 +43,30 @@ void flush_tlb_pending(void)  					    tb->tlb_nr, &tb->vaddrs[0]);  #endif  		} -		tb->tlb_nr = 0;  	} +	tb->tlb_nr = 0; + +out:  	put_cpu_var(tlb_batch);  } +void arch_enter_lazy_mmu_mode(void) +{ +	struct tlb_batch *tb = &__get_cpu_var(tlb_batch); + +	tb->active = 1; +} + +void arch_leave_lazy_mmu_mode(void) +{ +	struct tlb_batch *tb = &__get_cpu_var(tlb_batch); + +	if (tb->tlb_nr) +		flush_tlb_pending(); +	tb->active = 0; +} +  static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,  			      bool exec)  { @@ -60,6 +84,12 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,  		nr = 0;  	} +	if (!tb->active) { +		global_flush_tlb_page(mm, vaddr); +		flush_tsb_user_page(mm, vaddr); +		return; +	} +  	if (nr == 0)  		tb->mm = mm; diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 428982b9bec..2cc3bce5ee9 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -7,11 +7,10 @@  #include <linux/preempt.h>  #include <linux/slab.h>  #include <asm/page.h> -#include <asm/tlbflush.h> -#include <asm/tlb.h> -#include <asm/mmu_context.h>  #include <asm/pgtable.h> +#include <asm/mmu_context.h>  #include <asm/tsb.h> +#include <asm/tlb.h>  #include <asm/oplib.h>  extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; @@ -46,23 +45,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)  	}  } -static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, -			    unsigned long tsb, unsigned long nentries) +static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v, +				  unsigned long hash_shift, +				  unsigned long nentries)  { -	unsigned long i; +	unsigned long tag, ent, hash; -	for (i = 0; i < tb->tlb_nr; i++) { -		unsigned long v = tb->vaddrs[i]; -		unsigned long tag, ent, hash; +	v &= ~0x1UL; +	hash = tsb_hash(v, hash_shift, nentries); +	ent = tsb + (hash * sizeof(struct tsb)); +	tag = (v >> 22UL); -		v &= ~0x1UL; +	tsb_flush(ent, tag); +} -		hash = tsb_hash(v, hash_shift, nentries); -		ent = tsb + (hash * sizeof(struct tsb)); -		tag = (v >> 22UL); +static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, +			    unsigned long tsb, unsigned long nentries) +{ +	unsigned long i; -		tsb_flush(ent, tag); -	} +	for (i = 0; i < tb->tlb_nr; i++) +		__flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);  }  void flush_tsb_user(struct tlb_batch *tb) @@ -90,6 +93,30 @@ void flush_tsb_user(struct tlb_batch *tb)  	spin_unlock_irqrestore(&mm->context.lock, flags);  } +void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) +{ +	unsigned long nentries, base, flags; + +	spin_lock_irqsave(&mm->context.lock, flags); + +	base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; +	nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; +	if (tlb_type == cheetah_plus || tlb_type == hypervisor) +		base = __pa(base); +	__flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); + +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) +	if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { +		base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; +		nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; +		if (tlb_type == cheetah_plus || tlb_type == hypervisor) +			base = __pa(base); +		__flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries); +	} +#endif +	spin_unlock_irqrestore(&mm->context.lock, flags); +} +  #define HV_PGSZ_IDX_BASE	HV_PGSZ_IDX_8K  #define HV_PGSZ_MASK_BASE	HV_PGSZ_MASK_8K diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index f8e13d421fc..432aa0cb1b3 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S @@ -53,6 +53,33 @@ __flush_tlb_mm:		/* 18 insns */  	nop  	.align		32 +	.globl		__flush_tlb_page +__flush_tlb_page:	/* 22 insns */ +	/* %o0 = context, %o1 = vaddr */ +	rdpr		%pstate, %g7 +	andn		%g7, PSTATE_IE, %g2 +	wrpr		%g2, %pstate +	mov		SECONDARY_CONTEXT, %o4 +	ldxa		[%o4] ASI_DMMU, %g2 +	stxa		%o0, [%o4] ASI_DMMU +	andcc		%o1, 1, %g0 +	andn		%o1, 1, %o3 +	be,pn		%icc, 1f +	 or		%o3, 0x10, %o3 +	stxa		%g0, [%o3] ASI_IMMU_DEMAP +1:	stxa		%g0, [%o3] ASI_DMMU_DEMAP +	membar		#Sync +	stxa		%g2, [%o4] ASI_DMMU +	sethi		%hi(KERNBASE), %o4 +	flush		%o4 +	retl +	 wrpr		%g7, 0x0, %pstate +	nop +	nop +	nop +	nop + +	.align		32  	.globl		__flush_tlb_pending  __flush_tlb_pending:	/* 26 insns */  	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ @@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */  	retl  	 wrpr		%g7, 0x0, %pstate +__cheetah_flush_tlb_page:	/* 22 insns */ +	/* %o0 = context, %o1 = vaddr */ +	rdpr		%pstate, %g7 +	andn		%g7, PSTATE_IE, %g2 +	wrpr		%g2, 0x0, %pstate +	wrpr		%g0, 1, %tl +	mov		PRIMARY_CONTEXT, %o4 +	ldxa		[%o4] ASI_DMMU, %g2 +	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %o3 +	sllx		%o3, CTX_PGSZ1_NUC_SHIFT, %o3 +	or		%o0, %o3, %o0	/* Preserve nucleus page size fields */ +	stxa		%o0, [%o4] ASI_DMMU +	andcc		%o1, 1, %g0 +	be,pn		%icc, 1f +	 andn		%o1, 1, %o3 +	stxa		%g0, [%o3] ASI_IMMU_DEMAP +1:	stxa		%g0, [%o3] ASI_DMMU_DEMAP	 +	membar		#Sync +	stxa		%g2, [%o4] ASI_DMMU +	sethi		%hi(KERNBASE), %o4 +	flush		%o4 +	wrpr		%g0, 0, %tl +	retl +	 wrpr		%g7, 0x0, %pstate +  __cheetah_flush_tlb_pending:	/* 27 insns */  	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */  	rdpr		%pstate, %g7 @@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */  	retl  	 nop +__hypervisor_flush_tlb_page: /* 11 insns */ +	/* %o0 = context, %o1 = vaddr */ +	mov		%o0, %g2 +	mov		%o1, %o0              /* ARG0: vaddr + IMMU-bit */ +	mov		%g2, %o1	      /* ARG1: mmu context */ +	mov		HV_MMU_ALL, %o2	      /* ARG2: flags */ +	srlx		%o0, PAGE_SHIFT, %o0 +	sllx		%o0, PAGE_SHIFT, %o0 +	ta		HV_MMU_UNMAP_ADDR_TRAP +	brnz,pn		%o0, __hypervisor_tlb_tl0_error +	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1 +	retl +	 nop +  __hypervisor_flush_tlb_pending: /* 16 insns */  	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */  	sllx		%o1, 3, %g1 @@ -339,6 +405,13 @@ cheetah_patch_cachetlbops:  	call		tlb_patch_one  	 mov		19, %o2 +	sethi		%hi(__flush_tlb_page), %o0 +	or		%o0, %lo(__flush_tlb_page), %o0 +	sethi		%hi(__cheetah_flush_tlb_page), %o1 +	or		%o1, %lo(__cheetah_flush_tlb_page), %o1 +	call		tlb_patch_one +	 mov		22, %o2 +  	sethi		%hi(__flush_tlb_pending), %o0  	or		%o0, %lo(__flush_tlb_pending), %o0  	sethi		%hi(__cheetah_flush_tlb_pending), %o1 @@ -397,10 +470,9 @@ xcall_flush_tlb_mm:	/* 21 insns */  	nop  	nop -	.globl		xcall_flush_tlb_pending -xcall_flush_tlb_pending:	/* 21 insns */ -	/* %g5=context, %g1=nr, %g7=vaddrs[] */ -	sllx		%g1, 3, %g1 +	.globl		xcall_flush_tlb_page +xcall_flush_tlb_page:	/* 17 insns */ +	/* %g5=context, %g1=vaddr */  	mov		PRIMARY_CONTEXT, %g4  	ldxa		[%g4] ASI_DMMU, %g2  	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %g4 @@ -408,20 +480,16 @@ xcall_flush_tlb_pending:	/* 21 insns */  	or		%g5, %g4, %g5  	mov		PRIMARY_CONTEXT, %g4  	stxa		%g5, [%g4] ASI_DMMU -1:	sub		%g1, (1 << 3), %g1 -	ldx		[%g7 + %g1], %g5 -	andcc		%g5, 0x1, %g0 +	andcc		%g1, 0x1, %g0  	be,pn		%icc, 2f - -	 andn		%g5, 0x1, %g5 +	 andn		%g1, 0x1, %g5  	stxa		%g0, [%g5] ASI_IMMU_DEMAP  2:	stxa		%g0, [%g5] ASI_DMMU_DEMAP  	membar		#Sync -	brnz,pt		%g1, 1b -	 nop  	stxa		%g2, [%g4] ASI_DMMU  	retry  	nop +	nop  	.globl		xcall_flush_tlb_kernel_range  xcall_flush_tlb_kernel_range:	/* 25 insns */ @@ -656,15 +724,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */  	membar		#Sync  	retry -	.globl		__hypervisor_xcall_flush_tlb_pending -__hypervisor_xcall_flush_tlb_pending: /* 21 insns */ -	/* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */ -	sllx		%g1, 3, %g1 +	.globl		__hypervisor_xcall_flush_tlb_page +__hypervisor_xcall_flush_tlb_page: /* 17 insns */ +	/* %g5=ctx, %g1=vaddr */  	mov		%o0, %g2  	mov		%o1, %g3  	mov		%o2, %g4 -1:	sub		%g1, (1 << 3), %g1 -	ldx		[%g7 + %g1], %o0	/* ARG0: virtual address */ +	mov		%g1, %o0	        /* ARG0: virtual address */  	mov		%g5, %o1		/* ARG1: mmu context */  	mov		HV_MMU_ALL, %o2		/* ARG2: flags */  	srlx		%o0, PAGE_SHIFT, %o0 @@ -673,8 +739,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */  	mov		HV_MMU_UNMAP_ADDR_TRAP, %g6  	brnz,a,pn	%o0, __hypervisor_tlb_xcall_error  	 mov		%o0, %g5 -	brnz,pt		%g1, 1b -	 nop  	mov		%g2, %o0  	mov		%g3, %o1  	mov		%g4, %o2 @@ -757,6 +821,13 @@ hypervisor_patch_cachetlbops:  	call		tlb_patch_one  	 mov		10, %o2 +	sethi		%hi(__flush_tlb_page), %o0 +	or		%o0, %lo(__flush_tlb_page), %o0 +	sethi		%hi(__hypervisor_flush_tlb_page), %o1 +	or		%o1, %lo(__hypervisor_flush_tlb_page), %o1 +	call		tlb_patch_one +	 mov		11, %o2 +  	sethi		%hi(__flush_tlb_pending), %o0  	or		%o0, %lo(__flush_tlb_pending), %o0  	sethi		%hi(__hypervisor_flush_tlb_pending), %o1 @@ -788,12 +859,12 @@ hypervisor_patch_cachetlbops:  	call		tlb_patch_one  	 mov		21, %o2 -	sethi		%hi(xcall_flush_tlb_pending), %o0 -	or		%o0, %lo(xcall_flush_tlb_pending), %o0 -	sethi		%hi(__hypervisor_xcall_flush_tlb_pending), %o1 -	or		%o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 +	sethi		%hi(xcall_flush_tlb_page), %o0 +	or		%o0, %lo(xcall_flush_tlb_page), %o0 +	sethi		%hi(__hypervisor_xcall_flush_tlb_page), %o1 +	or		%o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1  	call		tlb_patch_one -	 mov		21, %o2 +	 mov		17, %o2  	sethi		%hi(xcall_flush_tlb_kernel_range), %o0  	or		%o0, %lo(xcall_flush_tlb_kernel_range), %o0 diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index 241c0bb60b1..c96f9bbb760 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h @@ -40,7 +40,15 @@  #include <asm/percpu.h>  #include <arch/spr_def.h> -/* Set and clear kernel interrupt masks. */ +/* + * Set and clear kernel interrupt masks. + * + * NOTE: __insn_mtspr() is a compiler builtin marked as a memory + * clobber.  We rely on it being equivalent to a compiler barrier in + * this code since arch_local_irq_save() and friends must act as + * compiler barriers.  This compiler semantic is baked into enough + * places that the compiler will maintain it going forward. + */  #if CHIP_HAS_SPLIT_INTR_MASK()  #if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32  # error Fix assumptions about which word various interrupts are in diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index d1e15f7b59c..7a5aa1a7864 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -1004,15 +1004,8 @@ void __cpuinit setup_cpu(int boot)  #ifdef CONFIG_BLK_DEV_INITRD -/* - * Note that the kernel can potentially support other compression - * techniques than gz, though we don't do so by default.  If we ever - * decide to do so we can either look for other filename extensions, - * or just allow a file with this name to be compressed with an - * arbitrary compressor (somewhat counterintuitively). - */  static int __initdata set_initramfs_file; -static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; +static char __initdata initramfs_file[128] = "initramfs";  static int __init setup_initramfs_file(char *str)  { @@ -1026,9 +1019,9 @@ static int __init setup_initramfs_file(char *str)  early_param("initramfs_file", setup_initramfs_file);  /* - * We look for an "initramfs.cpio.gz" file in the hvfs. - * If there is one, we allocate some memory for it and it will be - * unpacked to the initramfs. + * We look for a file called "initramfs" in the hvfs.  If there is one, we + * allocate some memory for it and it will be unpacked to the initramfs. + * If it's compressed, the initd code will uncompress it first.   */  static void __init load_hv_initrd(void)  { @@ -1038,10 +1031,16 @@ static void __init load_hv_initrd(void)  	fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);  	if (fd == HV_ENOENT) { -		if (set_initramfs_file) +		if (set_initramfs_file) {  			pr_warning("No such hvfs initramfs file '%s'\n",  				   initramfs_file); -		return; +			return; +		} else { +			/* Try old backwards-compatible name. */ +			fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz"); +			if (fd == HV_ENOENT) +				return; +		}  	}  	BUG_ON(fd < 0);  	stat = hv_fs_fstat(fd); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 70c0f3da047..15b5cef4aa3 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1549,6 +1549,7 @@ config X86_SMAP  config EFI  	bool "EFI runtime service support"  	depends on ACPI +	select UCS2_STRING  	---help---  	  This enables the kernel to use EFI runtime services that are  	  available (such as the EFI variable services). diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 8a84501acb1..5ef205c5f37 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -4,7 +4,7 @@  # create a compressed vmlinux image from the original vmlinux  # -targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o +targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo  KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2  KBUILD_CFLAGS += -fno-strict-aliasing -fPIC @@ -29,7 +29,6 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \  	$(obj)/piggy.o  $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone -$(obj)/efi_stub_$(BITS).o: KBUILD_CLFAGS += -fshort-wchar -mno-red-zone  ifeq ($(CONFIG_EFI_STUB), y)  	VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o @@ -43,7 +42,7 @@ OBJCOPYFLAGS_vmlinux.bin :=  -R .comment -S  $(obj)/vmlinux.bin: vmlinux FORCE  	$(call if_changed,objcopy) -targets += vmlinux.bin.all vmlinux.relocs +targets += $(patsubst $(obj)/%,%,$(VMLINUX_OBJS)) vmlinux.bin.all vmlinux.relocs  CMD_RELOCS = arch/x86/tools/relocs  quiet_cmd_relocs = RELOCS  $@ diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index c205035a6b9..8615f758182 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -251,6 +251,51 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size)  	*size = len;  } +static efi_status_t setup_efi_vars(struct boot_params *params) +{ +	struct setup_data *data; +	struct efi_var_bootdata *efidata; +	u64 store_size, remaining_size, var_size; +	efi_status_t status; + +	if (!sys_table->runtime->query_variable_info) +		return EFI_UNSUPPORTED; + +	data = (struct setup_data *)(unsigned long)params->hdr.setup_data; + +	while (data && data->next) +		data = (struct setup_data *)(unsigned long)data->next; + +	status = efi_call_phys4(sys_table->runtime->query_variable_info, +				EFI_VARIABLE_NON_VOLATILE | +				EFI_VARIABLE_BOOTSERVICE_ACCESS | +				EFI_VARIABLE_RUNTIME_ACCESS, &store_size, +				&remaining_size, &var_size); + +	if (status != EFI_SUCCESS) +		return status; + +	status = efi_call_phys3(sys_table->boottime->allocate_pool, +				EFI_LOADER_DATA, sizeof(*efidata), &efidata); + +	if (status != EFI_SUCCESS) +		return status; + +	efidata->data.type = SETUP_EFI_VARS; +	efidata->data.len = sizeof(struct efi_var_bootdata) - +		sizeof(struct setup_data); +	efidata->data.next = 0; +	efidata->store_size = store_size; +	efidata->remaining_size = remaining_size; +	efidata->max_var_size = var_size; + +	if (data) +		data->next = (unsigned long)efidata; +	else +		params->hdr.setup_data = (unsigned long)efidata; + +} +  static efi_status_t setup_efi_pci(struct boot_params *params)  {  	efi_pci_io_protocol *pci; @@ -1157,6 +1202,8 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table,  	setup_graphics(boot_params); +	setup_efi_vars(boot_params); +  	setup_efi_pci(boot_params);  	status = efi_call_phys3(sys_table->boottime->allocate_pool, diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 60c89f30c72..2fb5d5884e2 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -102,6 +102,13 @@ extern void efi_call_phys_epilog(void);  extern void efi_unmap_memmap(void);  extern void efi_memory_uc(u64 addr, unsigned long size); +struct efi_var_bootdata { +	struct setup_data data; +	u64 store_size; +	u64 remaining_size; +	u64 max_var_size; +}; +  #ifdef CONFIG_EFI  static inline bool efi_is_native(void) diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 5edd1742cfd..7361e47db79 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -703,7 +703,10 @@ static inline void arch_leave_lazy_mmu_mode(void)  	PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);  } -void arch_flush_lazy_mmu_mode(void); +static inline void arch_flush_lazy_mmu_mode(void) +{ +	PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush); +}  static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,  				phys_addr_t phys, pgprot_t flags) diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 142236ed83a..b3b0ec1dac8 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -91,6 +91,7 @@ struct pv_lazy_ops {  	/* Set deferred update mode, used for batching operations. */  	void (*enter)(void);  	void (*leave)(void); +	void (*flush)(void);  };  struct pv_time_ops { @@ -679,6 +680,7 @@ void paravirt_end_context_switch(struct task_struct *next);  void paravirt_enter_lazy_mmu(void);  void paravirt_leave_lazy_mmu(void); +void paravirt_flush_lazy_mmu(void);  void _paravirt_nop(void);  u32 _paravirt_ident_32(u32); diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index 1ace47b6259..2e188d68397 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h @@ -29,13 +29,13 @@ extern const unsigned long sys_call_table[];   */  static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)  { -	return regs->orig_ax & __SYSCALL_MASK; +	return regs->orig_ax;  }  static inline void syscall_rollback(struct task_struct *task,  				    struct pt_regs *regs)  { -	regs->ax = regs->orig_ax & __SYSCALL_MASK; +	regs->ax = regs->orig_ax;  }  static inline long syscall_get_error(struct task_struct *task, diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h index 4fef20773b8..c7797307fc2 100644 --- a/arch/x86/include/asm/tlb.h +++ b/arch/x86/include/asm/tlb.h @@ -7,7 +7,7 @@  #define tlb_flush(tlb)							\  {									\ -	if (tlb->fullmm == 0)						\ +	if (!tlb->fullmm && !tlb->need_flush_all) 			\  		flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL);	\  	else								\  		flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL);	\ diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index c15ddaf9071..08744242b8d 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h @@ -6,6 +6,7 @@  #define SETUP_E820_EXT			1  #define SETUP_DTB			2  #define SETUP_PCI			3 +#define SETUP_EFI_VARS			4  /* ram_size flags */  #define RAMDISK_IMAGE_START_MASK	0x07FF diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index a7d26d83fb7..8f4be53ea04 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -35,13 +35,6 @@ static bool __init ms_hyperv_platform(void)  	if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))  		return false; -	/* -	 * Xen emulates Hyper-V to support enlightened Windows. -	 * Check to see first if we are on a Xen Hypervisor. -	 */ -	if (xen_cpuid_base()) -		return false; -  	cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,  	      &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); @@ -82,12 +75,6 @@ static void __init ms_hyperv_init_platform(void)  	if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)  		clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); -#if IS_ENABLED(CONFIG_HYPERV) -	/* -	 * Setup the IDT for hypervisor callback. -	 */ -	alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector); -#endif  }  const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { @@ -103,6 +90,11 @@ static irq_handler_t vmbus_isr;  void hv_register_vmbus_handler(int irq, irq_handler_t handler)  { +	/* +	 * Setup the IDT for hypervisor callback. +	 */ +	alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector); +  	vmbus_irq = irq;  	vmbus_isr = handler;  } diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index dab7580c47a..cc45deb791b 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -153,8 +153,14 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =  };  static struct extra_reg intel_snb_extra_regs[] __read_mostly = { -	INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), -	INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), +	INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), +	INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), +	EVENT_EXTRA_END +}; + +static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { +	INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), +	INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),  	EVENT_EXTRA_END  }; @@ -2097,7 +2103,10 @@ __init int intel_pmu_init(void)  		x86_pmu.event_constraints = intel_snb_event_constraints;  		x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;  		x86_pmu.pebs_aliases = intel_pebs_aliases_snb; -		x86_pmu.extra_regs = intel_snb_extra_regs; +		if (boot_cpu_data.x86_model == 45) +			x86_pmu.extra_regs = intel_snbep_extra_regs; +		else +			x86_pmu.extra_regs = intel_snb_extra_regs;  		/* all extra regs are per-cpu when HT is on */  		x86_pmu.er_flags |= ERF_HAS_RSP_1;  		x86_pmu.er_flags |= ERF_NO_HT_SHARING; @@ -2123,7 +2132,10 @@ __init int intel_pmu_init(void)  		x86_pmu.event_constraints = intel_ivb_event_constraints;  		x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;  		x86_pmu.pebs_aliases = intel_pebs_aliases_snb; -		x86_pmu.extra_regs = intel_snb_extra_regs; +		if (boot_cpu_data.x86_model == 62) +			x86_pmu.extra_regs = intel_snbep_extra_regs; +		else +			x86_pmu.extra_regs = intel_snb_extra_regs;  		/* all extra regs are per-cpu when HT is on */  		x86_pmu.er_flags |= ERF_HAS_RSP_1;  		x86_pmu.er_flags |= ERF_NO_HT_SHARING; diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index b05a575d56f..26830f3af0d 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -314,10 +314,11 @@ int intel_pmu_drain_bts_buffer(void)  	if (top <= at)  		return 0; +	memset(®s, 0, sizeof(regs)); +  	ds->bts_index = ds->bts_buffer_base;  	perf_sample_data_init(&data, 0, event->hw.last_period); -	regs.ip     = 0;  	/*  	 * Prepare a generic sample, i.e. fill in the invariant fields. diff --git a/arch/x86/kernel/microcode_core_early.c b/arch/x86/kernel/microcode_core_early.c index 577db8417d1..833d51d6ee0 100644 --- a/arch/x86/kernel/microcode_core_early.c +++ b/arch/x86/kernel/microcode_core_early.c @@ -45,9 +45,6 @@ static int __cpuinit x86_vendor(void)  	u32 eax = 0x00000000;  	u32 ebx, ecx = 0, edx; -	if (!have_cpuid_p()) -		return X86_VENDOR_UNKNOWN; -  	native_cpuid(&eax, &ebx, &ecx, &edx);  	if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx)) @@ -59,18 +56,45 @@ static int __cpuinit x86_vendor(void)  	return X86_VENDOR_UNKNOWN;  } +static int __cpuinit x86_family(void) +{ +	u32 eax = 0x00000001; +	u32 ebx, ecx = 0, edx; +	int x86; + +	native_cpuid(&eax, &ebx, &ecx, &edx); + +	x86 = (eax >> 8) & 0xf; +	if (x86 == 15) +		x86 += (eax >> 20) & 0xff; + +	return x86; +} +  void __init load_ucode_bsp(void)  { -	int vendor = x86_vendor(); +	int vendor, x86; + +	if (!have_cpuid_p()) +		return; -	if (vendor == X86_VENDOR_INTEL) +	vendor = x86_vendor(); +	x86 = x86_family(); + +	if (vendor == X86_VENDOR_INTEL && x86 >= 6)  		load_ucode_intel_bsp();  }  void __cpuinit load_ucode_ap(void)  { -	int vendor = x86_vendor(); +	int vendor, x86; + +	if (!have_cpuid_p()) +		return; + +	vendor = x86_vendor(); +	x86 = x86_family(); -	if (vendor == X86_VENDOR_INTEL) +	if (vendor == X86_VENDOR_INTEL && x86 >= 6)  		load_ucode_intel_ap();  } diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 17fff18a103..8bfb335f74b 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -263,6 +263,18 @@ void paravirt_leave_lazy_mmu(void)  	leave_lazy(PARAVIRT_LAZY_MMU);  } +void paravirt_flush_lazy_mmu(void) +{ +	preempt_disable(); + +	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { +		arch_leave_lazy_mmu_mode(); +		arch_enter_lazy_mmu_mode(); +	} + +	preempt_enable(); +} +  void paravirt_start_context_switch(struct task_struct *prev)  {  	BUG_ON(preemptible()); @@ -292,18 +304,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)  	return this_cpu_read(paravirt_lazy_mode);  } -void arch_flush_lazy_mmu_mode(void) -{ -	preempt_disable(); - -	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { -		arch_leave_lazy_mmu_mode(); -		arch_enter_lazy_mmu_mode(); -	} - -	preempt_enable(); -} -  struct pv_info pv_info = {  	.name = "bare hardware",  	.paravirt_enabled = 0, @@ -475,6 +475,7 @@ struct pv_mmu_ops pv_mmu_ops = {  	.lazy_mode = {  		.enter = paravirt_nop,  		.leave = paravirt_nop, +		.flush = paravirt_nop,  	},  	.set_fixmap = native_set_fixmap, diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 90d8cc930f5..fae9134a2de 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -507,11 +507,14 @@ static void __init memblock_x86_reserve_range_setup_data(void)  /*   * Keep the crash kernel below this limit.  On 32 bits earlier kernels   * would limit the kernel to the low 512 MiB due to mapping restrictions. + * On 64bit, old kexec-tools need to under 896MiB.   */  #ifdef CONFIG_X86_32 -# define CRASH_KERNEL_ADDR_MAX	(512 << 20) +# define CRASH_KERNEL_ADDR_LOW_MAX	(512 << 20) +# define CRASH_KERNEL_ADDR_HIGH_MAX	(512 << 20)  #else -# define CRASH_KERNEL_ADDR_MAX	MAXMEM +# define CRASH_KERNEL_ADDR_LOW_MAX	(896UL<<20) +# define CRASH_KERNEL_ADDR_HIGH_MAX	MAXMEM  #endif  static void __init reserve_crashkernel_low(void) @@ -521,19 +524,35 @@ static void __init reserve_crashkernel_low(void)  	unsigned long long low_base = 0, low_size = 0;  	unsigned long total_low_mem;  	unsigned long long base; +	bool auto_set = false;  	int ret;  	total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT)); +	/* crashkernel=Y,low */  	ret = parse_crashkernel_low(boot_command_line, total_low_mem,  						&low_size, &base); -	if (ret != 0 || low_size <= 0) -		return; +	if (ret != 0) { +		/* +		 * two parts from lib/swiotlb.c: +		 *	swiotlb size: user specified with swiotlb= or default. +		 *	swiotlb overflow buffer: now is hardcoded to 32k. +		 *		We round it to 8M for other buffers that +		 *		may need to stay low too. +		 */ +		low_size = swiotlb_size_or_default() + (8UL<<20); +		auto_set = true; +	} else { +		/* passed with crashkernel=0,low ? */ +		if (!low_size) +			return; +	}  	low_base = memblock_find_in_range(low_size, (1ULL<<32),  					low_size, alignment);  	if (!low_base) { -		pr_info("crashkernel low reservation failed - No suitable area found.\n"); +		if (!auto_set) +			pr_info("crashkernel low reservation failed - No suitable area found.\n");  		return;  	} @@ -554,14 +573,22 @@ static void __init reserve_crashkernel(void)  	const unsigned long long alignment = 16<<20;	/* 16M */  	unsigned long long total_mem;  	unsigned long long crash_size, crash_base; +	bool high = false;  	int ret;  	total_mem = memblock_phys_mem_size(); +	/* crashkernel=XM */  	ret = parse_crashkernel(boot_command_line, total_mem,  			&crash_size, &crash_base); -	if (ret != 0 || crash_size <= 0) -		return; +	if (ret != 0 || crash_size <= 0) { +		/* crashkernel=X,high */ +		ret = parse_crashkernel_high(boot_command_line, total_mem, +				&crash_size, &crash_base); +		if (ret != 0 || crash_size <= 0) +			return; +		high = true; +	}  	/* 0 means: find the address automatically */  	if (crash_base <= 0) { @@ -569,7 +596,9 @@ static void __init reserve_crashkernel(void)  		 *  kexec want bzImage is below CRASH_KERNEL_ADDR_MAX  		 */  		crash_base = memblock_find_in_range(alignment, -			       CRASH_KERNEL_ADDR_MAX, crash_size, alignment); +					high ? CRASH_KERNEL_ADDR_HIGH_MAX : +					       CRASH_KERNEL_ADDR_LOW_MAX, +					crash_size, alignment);  		if (!crash_base) {  			pr_info("crashkernel reservation failed - No suitable area found.\n"); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 02b51dd4e4a..f77df1c5de6 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1857,7 +1857,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)  	if (!pv_eoi_enabled(vcpu))  		return 0;  	return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, -					 addr); +					 addr, sizeof(u8));  }  void kvm_lapic_init(void) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f19ac0aca60..e1721324c27 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1823,7 +1823,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)  		return 0;  	} -	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) +	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, +					sizeof(u32)))  		return 1;  	vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); @@ -1952,12 +1953,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)  		gpa_offset = data & ~(PAGE_MASK | 1); -		/* Check that the address is 32-byte aligned. */ -		if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1)) -			break; -  		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, -		     &vcpu->arch.pv_time, data & ~1ULL)) +		     &vcpu->arch.pv_time, data & ~1ULL, +		     sizeof(struct pvclock_vcpu_time_info)))  			vcpu->arch.pv_time_enabled = false;  		else  			vcpu->arch.pv_time_enabled = true; @@ -1977,7 +1975,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)  			return 1;  		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, -							data & KVM_STEAL_VALID_BITS)) +						data & KVM_STEAL_VALID_BITS, +						sizeof(struct kvm_steal_time)))  			return 1;  		vcpu->arch.st.msr_val = data; diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 1cbd89ca556..7114c63f047 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -1334,6 +1334,7 @@ __init void lguest_init(void)  	pv_mmu_ops.read_cr3 = lguest_read_cr3;  	pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;  	pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode; +	pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;  	pv_mmu_ops.pte_update = lguest_pte_update;  	pv_mmu_ops.pte_update_defer = lguest_pte_update; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 2b97525246d..0e883364abb 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -378,10 +378,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)  	if (pgd_none(*pgd_ref))  		return -1; -	if (pgd_none(*pgd)) +	if (pgd_none(*pgd)) {  		set_pgd(pgd, *pgd_ref); -	else +		arch_flush_lazy_mmu_mode(); +	} else {  		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); +	}  	/*  	 * Below here mismatches are bugs because these lower tables diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c index b0086567271..0e38951e65e 100644 --- a/arch/x86/mm/pageattr-test.c +++ b/arch/x86/mm/pageattr-test.c @@ -68,7 +68,7 @@ static int print_split(struct split_state *s)  			s->gpg++;  			i += GPS/PAGE_SIZE;  		} else if (level == PG_LEVEL_2M) { -			if (!(pte_val(*pte) & _PAGE_PSE)) { +			if ((pte_val(*pte) & _PAGE_PRESENT) && !(pte_val(*pte) & _PAGE_PSE)) {  				printk(KERN_ERR  					"%lx level %d but not PSE %Lx\n",  					addr, level, (u64)pte_val(*pte)); diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 091934e1d0d..fb4e73ec24d 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -467,7 +467,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,  	 * We are safe now. Check whether the new pgprot is the same:  	 */  	old_pte = *kpte; -	old_prot = new_prot = req_prot = pte_pgprot(old_pte); +	old_prot = req_prot = pte_pgprot(old_pte);  	pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);  	pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); @@ -478,12 +478,12 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,  	 * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL  	 * for the ancient hardware that doesn't support it.  	 */ -	if (pgprot_val(new_prot) & _PAGE_PRESENT) -		pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL; +	if (pgprot_val(req_prot) & _PAGE_PRESENT) +		pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL;  	else -		pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); +		pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); -	new_prot = canon_pgprot(new_prot); +	req_prot = canon_pgprot(req_prot);  	/*  	 * old_pte points to the large page base address. So we need @@ -1413,6 +1413,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)  	 * but that can deadlock->flush only current cpu:  	 */  	__flush_tlb_all(); + +	arch_flush_lazy_mmu_mode();  }  #ifdef CONFIG_HIBERNATION diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 193350b51f9..17fda6a8b3c 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -58,6 +58,13 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)  void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)  {  	paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); +	/* +	 * NOTE! For PAE, any changes to the top page-directory-pointer-table +	 * entries need a full cr3 reload to flush. +	 */ +#ifdef CONFIG_X86_PAE +	tlb->need_flush_all = 1; +#endif  	tlb_remove_page(tlb, virt_to_page(pmd));  } diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 5f2ecaf3f9d..e4a86a677ce 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -41,6 +41,7 @@  #include <linux/io.h>  #include <linux/reboot.h>  #include <linux/bcd.h> +#include <linux/ucs2_string.h>  #include <asm/setup.h>  #include <asm/efi.h> @@ -51,6 +52,13 @@  #define EFI_DEBUG	1 +/* + * There's some additional metadata associated with each + * variable. Intel's reference implementation is 60 bytes - bump that + * to account for potential alignment constraints + */ +#define VAR_METADATA_SIZE 64 +  struct efi __read_mostly efi = {  	.mps        = EFI_INVALID_TABLE_ADDR,  	.acpi       = EFI_INVALID_TABLE_ADDR, @@ -69,6 +77,13 @@ struct efi_memory_map memmap;  static struct efi efi_phys __initdata;  static efi_system_table_t efi_systab __initdata; +static u64 efi_var_store_size; +static u64 efi_var_remaining_size; +static u64 efi_var_max_var_size; +static u64 boot_used_size; +static u64 boot_var_size; +static u64 active_size; +  unsigned long x86_efi_facility;  /* @@ -98,6 +113,15 @@ static int __init setup_add_efi_memmap(char *arg)  }  early_param("add_efi_memmap", setup_add_efi_memmap); +static bool efi_no_storage_paranoia; + +static int __init setup_storage_paranoia(char *arg) +{ +	efi_no_storage_paranoia = true; +	return 0; +} +early_param("efi_no_storage_paranoia", setup_storage_paranoia); +  static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)  { @@ -162,8 +186,53 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,  					       efi_char16_t *name,  					       efi_guid_t *vendor)  { -	return efi_call_virt3(get_next_variable, -			      name_size, name, vendor); +	efi_status_t status; +	static bool finished = false; +	static u64 var_size; + +	status = efi_call_virt3(get_next_variable, +				name_size, name, vendor); + +	if (status == EFI_NOT_FOUND) { +		finished = true; +		if (var_size < boot_used_size) { +			boot_var_size = boot_used_size - var_size; +			active_size += boot_var_size; +		} else { +			printk(KERN_WARNING FW_BUG  "efi: Inconsistent initial sizes\n"); +		} +	} + +	if (boot_used_size && !finished) { +		unsigned long size; +		u32 attr; +		efi_status_t s; +		void *tmp; + +		s = virt_efi_get_variable(name, vendor, &attr, &size, NULL); + +		if (s != EFI_BUFFER_TOO_SMALL || !size) +			return status; + +		tmp = kmalloc(size, GFP_ATOMIC); + +		if (!tmp) +			return status; + +		s = virt_efi_get_variable(name, vendor, &attr, &size, tmp); + +		if (s == EFI_SUCCESS && (attr & EFI_VARIABLE_NON_VOLATILE)) { +			var_size += size; +			var_size += ucs2_strsize(name, 1024); +			active_size += size; +			active_size += VAR_METADATA_SIZE; +			active_size += ucs2_strsize(name, 1024); +		} + +		kfree(tmp); +	} + +	return status;  }  static efi_status_t virt_efi_set_variable(efi_char16_t *name, @@ -172,9 +241,34 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,  					  unsigned long data_size,  					  void *data)  { -	return efi_call_virt5(set_variable, -			      name, vendor, attr, -			      data_size, data); +	efi_status_t status; +	u32 orig_attr = 0; +	unsigned long orig_size = 0; + +	status = virt_efi_get_variable(name, vendor, &orig_attr, &orig_size, +				       NULL); + +	if (status != EFI_BUFFER_TOO_SMALL) +		orig_size = 0; + +	status = efi_call_virt5(set_variable, +				name, vendor, attr, +				data_size, data); + +	if (status == EFI_SUCCESS) { +		if (orig_size) { +			active_size -= orig_size; +			active_size -= ucs2_strsize(name, 1024); +			active_size -= VAR_METADATA_SIZE; +		} +		if (data_size) { +			active_size += data_size; +			active_size += ucs2_strsize(name, 1024); +			active_size += VAR_METADATA_SIZE; +		} +	} + +	return status;  }  static efi_status_t virt_efi_query_variable_info(u32 attr, @@ -682,6 +776,9 @@ void __init efi_init(void)  	char vendor[100] = "unknown";  	int i = 0;  	void *tmp; +	struct setup_data *data; +	struct efi_var_bootdata *efi_var_data; +	u64 pa_data;  #ifdef CONFIG_X86_32  	if (boot_params.efi_info.efi_systab_hi || @@ -699,6 +796,22 @@ void __init efi_init(void)  	if (efi_systab_init(efi_phys.systab))  		return; +	pa_data = boot_params.hdr.setup_data; +	while (pa_data) { +		data = early_ioremap(pa_data, sizeof(*efi_var_data)); +		if (data->type == SETUP_EFI_VARS) { +			efi_var_data = (struct efi_var_bootdata *)data; + +			efi_var_store_size = efi_var_data->store_size; +			efi_var_remaining_size = efi_var_data->remaining_size; +			efi_var_max_var_size = efi_var_data->max_var_size; +		} +		pa_data = data->next; +		early_iounmap(data, sizeof(*efi_var_data)); +	} + +	boot_used_size = efi_var_store_size - efi_var_remaining_size; +  	set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);  	/* @@ -999,3 +1112,48 @@ u64 efi_mem_attributes(unsigned long phys_addr)  	}  	return 0;  } + +/* + * Some firmware has serious problems when using more than 50% of the EFI + * variable store, i.e. it triggers bugs that can brick machines. Ensure that + * we never use more than this safe limit. + * + * Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable + * store. + */ +efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) +{ +	efi_status_t status; +	u64 storage_size, remaining_size, max_size; + +	status = efi.query_variable_info(attributes, &storage_size, +					 &remaining_size, &max_size); +	if (status != EFI_SUCCESS) +		return status; + +	if (!max_size && remaining_size > size) +		printk_once(KERN_ERR FW_BUG "Broken EFI implementation" +			    " is returning MaxVariableSize=0\n"); +	/* +	 * Some firmware implementations refuse to boot if there's insufficient +	 * space in the variable store. We account for that by refusing the +	 * write if permitting it would reduce the available space to under +	 * 50%. However, some firmware won't reclaim variable space until +	 * after the used (not merely the actively used) space drops below +	 * a threshold. We can approximate that case with the value calculated +	 * above. If both the firmware and our calculations indicate that the +	 * available space would drop below 50%, refuse the write. +	 */ + +	if (!storage_size || size > remaining_size || +	    (max_size && size > max_size)) +		return EFI_OUT_OF_RESOURCES; + +	if (!efi_no_storage_paranoia && +	    ((active_size + size + VAR_METADATA_SIZE > storage_size / 2) && +	     (remaining_size - size < storage_size / 2))) +		return EFI_OUT_OF_RESOURCES; + +	return EFI_SUCCESS; +} +EXPORT_SYMBOL_GPL(efi_query_variable_store); diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 6afbb2ca9a0..e006c18d288 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1748,14 +1748,18 @@ static void *m2v(phys_addr_t maddr)  }  /* Set the page permissions on an identity-mapped pages */ -static void set_page_prot(void *addr, pgprot_t prot) +static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)  {  	unsigned long pfn = __pa(addr) >> PAGE_SHIFT;  	pte_t pte = pfn_pte(pfn, prot); -	if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0)) +	if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))  		BUG();  } +static void set_page_prot(void *addr, pgprot_t prot) +{ +	return set_page_prot_flags(addr, prot, UVMF_NONE); +}  #ifdef CONFIG_X86_32  static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)  { @@ -1839,12 +1843,12 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,  				 unsigned long addr)  {  	if (*pt_base == PFN_DOWN(__pa(addr))) { -		set_page_prot((void *)addr, PAGE_KERNEL); +		set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);  		clear_page((void *)addr);  		(*pt_base)++;  	}  	if (*pt_end == PFN_DOWN(__pa(addr))) { -		set_page_prot((void *)addr, PAGE_KERNEL); +		set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);  		clear_page((void *)addr);  		(*pt_end)--;  	} @@ -2196,6 +2200,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {  	.lazy_mode = {  		.enter = paravirt_enter_lazy_mmu,  		.leave = xen_leave_lazy_mmu, +		.flush = paravirt_flush_lazy_mmu,  	},  	.set_fixmap = xen_set_fixmap, diff --git a/block/blk-core.c b/block/blk-core.c index 074b758efc4..7c288358a74 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -39,6 +39,7 @@  EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);  EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); +EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);  EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);  DEFINE_IDA(blk_queue_ida); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 6206a934eb8..5efc5a64718 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -229,6 +229,8 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \  	unsigned long val;						\  	ssize_t ret;							\  	ret = queue_var_store(&val, page, count);			\ +	if (ret < 0)							\ +		 return ret;						\  	if (neg)							\  		val = !val;						\  									\ diff --git a/block/partition-generic.c b/block/partition-generic.c index ae95ee6a58a..789cdea0589 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c @@ -257,7 +257,6 @@ void delete_partition(struct gendisk *disk, int partno)  	hd_struct_put(part);  } -EXPORT_SYMBOL(delete_partition);  static ssize_t whole_disk_show(struct device *dev,  			       struct device_attribute *attr, char *buf) diff --git a/crypto/gcm.c b/crypto/gcm.c index 137ad1ec543..13ccbda34ff 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -44,6 +44,7 @@ struct crypto_rfc4543_ctx {  struct crypto_rfc4543_req_ctx {  	u8 auth_tag[16]; +	u8 assocbuf[32];  	struct scatterlist cipher[1];  	struct scatterlist payload[2];  	struct scatterlist assoc[2]; @@ -1133,9 +1134,19 @@ static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,  	scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2);  	assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); -	sg_init_table(assoc, 2); -	sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, -		    req->assoc->offset); +	if (req->assoc->length == req->assoclen) { +		sg_init_table(assoc, 2); +		sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, +			    req->assoc->offset); +	} else { +		BUG_ON(req->assoclen > sizeof(rctx->assocbuf)); + +		scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0, +					 req->assoclen, 0); + +		sg_init_table(assoc, 2); +		sg_set_buf(assoc, rctx->assocbuf, req->assoclen); +	}  	scatterwalk_crypto_chain(assoc, payload, 0, 2);  	aead_request_set_tfm(subreq, ctx->child); diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 92ed9692c47..4bf68c8d479 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -396,7 +396,7 @@ config ACPI_CUSTOM_METHOD  config ACPI_BGRT  	bool "Boottime Graphics Resource Table support" -	depends on EFI +	depends on EFI && X86          help  	  This driver adds support for exposing the ACPI Boottime Graphics  	  Resource Table, which allows the operating system to obtain diff --git a/drivers/acpi/acpi_i2c.c b/drivers/acpi/acpi_i2c.c index 82045e3f5ca..a82c7626aa9 100644 --- a/drivers/acpi/acpi_i2c.c +++ b/drivers/acpi/acpi_i2c.c @@ -90,7 +90,7 @@ void acpi_i2c_register_devices(struct i2c_adapter *adapter)  	acpi_handle handle;  	acpi_status status; -	handle = ACPI_HANDLE(&adapter->dev); +	handle = ACPI_HANDLE(adapter->dev.parent);  	if (!handle)  		return; diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 5ff17306612..6ae5e440436 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -415,7 +415,6 @@ static int acpi_pci_root_add(struct acpi_device *device,  	struct acpi_pci_root *root;  	struct acpi_pci_driver *driver;  	u32 flags, base_flags; -	bool is_osc_granted = false;  	root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);  	if (!root) @@ -476,6 +475,30 @@ static int acpi_pci_root_add(struct acpi_device *device,  	flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;  	acpi_pci_osc_support(root, flags); +	/* +	 * TBD: Need PCI interface for enumeration/configuration of roots. +	 */ + +	mutex_lock(&acpi_pci_root_lock); +	list_add_tail(&root->node, &acpi_pci_roots); +	mutex_unlock(&acpi_pci_root_lock); + +	/* +	 * Scan the Root Bridge +	 * -------------------- +	 * Must do this prior to any attempt to bind the root device, as the +	 * PCI namespace does not get created until this call is made (and +	 * thus the root bridge's pci_dev does not exist). +	 */ +	root->bus = pci_acpi_scan_root(root); +	if (!root->bus) { +		printk(KERN_ERR PREFIX +			    "Bus %04x:%02x not present in PCI namespace\n", +			    root->segment, (unsigned int)root->secondary.start); +		result = -ENODEV; +		goto out_del_root; +	} +  	/* Indicate support for various _OSC capabilities. */  	if (pci_ext_cfg_avail())  		flags |= OSC_EXT_PCI_CONFIG_SUPPORT; @@ -494,6 +517,7 @@ static int acpi_pci_root_add(struct acpi_device *device,  			flags = base_flags;  		}  	} +  	if (!pcie_ports_disabled  	    && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {  		flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL @@ -514,54 +538,28 @@ static int acpi_pci_root_add(struct acpi_device *device,  		status = acpi_pci_osc_control_set(device->handle, &flags,  				       OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);  		if (ACPI_SUCCESS(status)) { -			is_osc_granted = true;  			dev_info(&device->dev,  				"ACPI _OSC control (0x%02x) granted\n", flags); +			if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { +				/* +				 * We have ASPM control, but the FADT indicates +				 * that it's unsupported. Clear it. +				 */ +				pcie_clear_aspm(root->bus); +			}  		} else { -			is_osc_granted = false;  			dev_info(&device->dev,  				"ACPI _OSC request failed (%s), "  				"returned control mask: 0x%02x\n",  				acpi_format_exception(status), flags); +			pr_info("ACPI _OSC control for PCIe not granted, " +				"disabling ASPM\n"); +			pcie_no_aspm();  		}  	} else {  		dev_info(&device->dev, -			"Unable to request _OSC control " -			"(_OSC support mask: 0x%02x)\n", flags); -	} - -	/* -	 * TBD: Need PCI interface for enumeration/configuration of roots. -	 */ - -	mutex_lock(&acpi_pci_root_lock); -	list_add_tail(&root->node, &acpi_pci_roots); -	mutex_unlock(&acpi_pci_root_lock); - -	/* -	 * Scan the Root Bridge -	 * -------------------- -	 * Must do this prior to any attempt to bind the root device, as the -	 * PCI namespace does not get created until this call is made (and  -	 * thus the root bridge's pci_dev does not exist). -	 */ -	root->bus = pci_acpi_scan_root(root); -	if (!root->bus) { -		printk(KERN_ERR PREFIX -			    "Bus %04x:%02x not present in PCI namespace\n", -			    root->segment, (unsigned int)root->secondary.start); -		result = -ENODEV; -		goto out_del_root; -	} - -	/* ASPM setting */ -	if (is_osc_granted) { -		if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) -			pcie_clear_aspm(root->bus); -	} else { -		pr_info("ACPI _OSC control for PCIe not granted, " -			"disabling ASPM\n"); -		pcie_no_aspm(); +			 "Unable to request _OSC control " +			 "(_OSC support mask: 0x%02x)\n", flags);  	}  	pci_acpi_add_bus_pm_notifier(device, root->bus); diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index fc95308e9a1..ee255c60bda 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -66,7 +66,8 @@ module_param(latency_factor, uint, 0644);  static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); -static struct acpi_processor_cx *acpi_cstate[CPUIDLE_STATE_MAX]; +static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], +								acpi_cstate);  static int disabled_by_idle_boot_param(void)  { @@ -722,7 +723,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,  		struct cpuidle_driver *drv, int index)  {  	struct acpi_processor *pr; -	struct acpi_processor_cx *cx = acpi_cstate[index]; +	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);  	pr = __this_cpu_read(processors); @@ -745,7 +746,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,   */  static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)  { -	struct acpi_processor_cx *cx = acpi_cstate[index]; +	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);  	ACPI_FLUSH_CPU_CACHE(); @@ -775,7 +776,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,  		struct cpuidle_driver *drv, int index)  {  	struct acpi_processor *pr; -	struct acpi_processor_cx *cx = acpi_cstate[index]; +	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);  	pr = __this_cpu_read(processors); @@ -833,7 +834,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,  		struct cpuidle_driver *drv, int index)  {  	struct acpi_processor *pr; -	struct acpi_processor_cx *cx = acpi_cstate[index]; +	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);  	pr = __this_cpu_read(processors); @@ -960,7 +961,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,  		    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))  			continue;  #endif -		acpi_cstate[count] = cx; +		per_cpu(acpi_cstate[count], dev->cpu) = cx;  		count++;  		if (count == CPUIDLE_STATE_MAX) diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index ffdd32d2260..2f48123d74c 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -150,6 +150,7 @@ enum piix_controller_ids {  	tolapai_sata,  	piix_pata_vmw,			/* PIIX4 for VMware, spurious DMA_ERR */  	ich8_sata_snb, +	ich8_2port_sata_snb,  };  struct piix_map_db { @@ -304,7 +305,7 @@ static const struct pci_device_id piix_pci_tbl[] = {  	/* SATA Controller IDE (Lynx Point) */  	{ 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },  	/* SATA Controller IDE (Lynx Point) */ -	{ 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, +	{ 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },  	/* SATA Controller IDE (Lynx Point) */  	{ 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },  	/* SATA Controller IDE (Lynx Point-LP) */ @@ -439,6 +440,7 @@ static const struct piix_map_db *piix_map_db_table[] = {  	[ich8m_apple_sata]	= &ich8m_apple_map_db,  	[tolapai_sata]		= &tolapai_map_db,  	[ich8_sata_snb]		= &ich8_map_db, +	[ich8_2port_sata_snb]	= &ich8_2port_map_db,  };  static struct pci_bits piix_enable_bits[] = { @@ -1242,6 +1244,16 @@ static struct ata_port_info piix_port_info[] = {  		.udma_mask	= ATA_UDMA6,  		.port_ops	= &piix_sata_ops,  	}, + +	[ich8_2port_sata_snb] = +	{ +		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR +					| PIIX_FLAG_PIO16, +		.pio_mask	= ATA_PIO4, +		.mwdma_mask	= ATA_MWDMA2, +		.udma_mask	= ATA_UDMA6, +		.port_ops	= &piix_sata_ops, +	},  };  #define AHCI_PCI_BAR 5 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 497adea1f0d..63c743baf92 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2329,7 +2329,7 @@ int ata_dev_configure(struct ata_device *dev)  		 * from SATA Settings page of Identify Device Data Log.  		 */  		if (ata_id_has_devslp(dev->id)) { -			u8 sata_setting[ATA_SECT_SIZE]; +			u8 *sata_setting = ap->sector_buf;  			int i, j;  			dev->flags |= ATA_DFLAG_DEVSLP; @@ -2439,6 +2439,9 @@ int ata_dev_configure(struct ata_device *dev)  		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,  					 dev->max_sectors); +	if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) +		dev->max_sectors = ATA_MAX_SECTORS_LBA48; +  	if (ap->ops->dev_config)  		ap->ops->dev_config(dev); @@ -4100,6 +4103,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {  	/* Weird ATAPI devices */  	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },  	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA }, +	{ "Slimtype DVD A  DS8A8SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },  	/* Devices we expect to fail diagnostics */ diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 318b4135818..ff44787e5a4 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -532,8 +532,8 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)  			struct scsi_sense_hdr sshdr;  			scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,  					     &sshdr); -			if (sshdr.sense_key == 0 && -			    sshdr.asc == 0 && sshdr.ascq == 0) +			if (sshdr.sense_key == RECOVERED_ERROR && +			    sshdr.asc == 0 && sshdr.ascq == 0x1d)  				cmd_result &= ~SAM_STAT_CHECK_CONDITION;  		} @@ -618,8 +618,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)  			struct scsi_sense_hdr sshdr;  			scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,  						&sshdr); -			if (sshdr.sense_key == 0 && -				sshdr.asc == 0 && sshdr.ascq == 0) +			if (sshdr.sense_key == RECOVERED_ERROR && +			    sshdr.asc == 0 && sshdr.ascq == 0x1d)  				cmd_result &= ~SAM_STAT_CHECK_CONDITION;  		} diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 5f74587ef25..71671c42ef4 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -46,6 +46,7 @@  #include "power.h"  static DEFINE_MUTEX(dev_pm_qos_mtx); +static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);  static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); @@ -216,12 +217,17 @@ void dev_pm_qos_constraints_destroy(struct device *dev)  	struct pm_qos_constraints *c;  	struct pm_qos_flags *f; -	mutex_lock(&dev_pm_qos_mtx); +	mutex_lock(&dev_pm_qos_sysfs_mtx);  	/*  	 * If the device's PM QoS resume latency limit or PM QoS flags have been  	 * exposed to user space, they have to be hidden at this point.  	 */ +	pm_qos_sysfs_remove_latency(dev); +	pm_qos_sysfs_remove_flags(dev); + +	mutex_lock(&dev_pm_qos_mtx); +  	__dev_pm_qos_hide_latency_limit(dev);  	__dev_pm_qos_hide_flags(dev); @@ -254,6 +260,8 @@ void dev_pm_qos_constraints_destroy(struct device *dev)   out:  	mutex_unlock(&dev_pm_qos_mtx); + +	mutex_unlock(&dev_pm_qos_sysfs_mtx);  }  /** @@ -558,6 +566,14 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,  	kfree(req);  } +static void dev_pm_qos_drop_user_request(struct device *dev, +					 enum dev_pm_qos_req_type type) +{ +	mutex_lock(&dev_pm_qos_mtx); +	__dev_pm_qos_drop_user_request(dev, type); +	mutex_unlock(&dev_pm_qos_mtx); +} +  /**   * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.   * @dev: Device whose PM QoS latency limit is to be exposed to user space. @@ -581,6 +597,8 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)  		return ret;  	} +	mutex_lock(&dev_pm_qos_sysfs_mtx); +  	mutex_lock(&dev_pm_qos_mtx);  	if (IS_ERR_OR_NULL(dev->power.qos)) @@ -591,26 +609,27 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)  	if (ret < 0) {  		__dev_pm_qos_remove_request(req);  		kfree(req); +		mutex_unlock(&dev_pm_qos_mtx);  		goto out;  	} -  	dev->power.qos->latency_req = req; + +	mutex_unlock(&dev_pm_qos_mtx); +  	ret = pm_qos_sysfs_add_latency(dev);  	if (ret) -		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); +		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);   out: -	mutex_unlock(&dev_pm_qos_mtx); +	mutex_unlock(&dev_pm_qos_sysfs_mtx);  	return ret;  }  EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);  static void __dev_pm_qos_hide_latency_limit(struct device *dev)  { -	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) { -		pm_qos_sysfs_remove_latency(dev); +	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req)  		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); -	}  }  /** @@ -619,9 +638,15 @@ static void __dev_pm_qos_hide_latency_limit(struct device *dev)   */  void dev_pm_qos_hide_latency_limit(struct device *dev)  { +	mutex_lock(&dev_pm_qos_sysfs_mtx); + +	pm_qos_sysfs_remove_latency(dev); +  	mutex_lock(&dev_pm_qos_mtx);  	__dev_pm_qos_hide_latency_limit(dev);  	mutex_unlock(&dev_pm_qos_mtx); + +	mutex_unlock(&dev_pm_qos_sysfs_mtx);  }  EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); @@ -649,6 +674,8 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)  	}  	pm_runtime_get_sync(dev); +	mutex_lock(&dev_pm_qos_sysfs_mtx); +  	mutex_lock(&dev_pm_qos_mtx);  	if (IS_ERR_OR_NULL(dev->power.qos)) @@ -659,16 +686,19 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)  	if (ret < 0) {  		__dev_pm_qos_remove_request(req);  		kfree(req); +		mutex_unlock(&dev_pm_qos_mtx);  		goto out;  	} -  	dev->power.qos->flags_req = req; + +	mutex_unlock(&dev_pm_qos_mtx); +  	ret = pm_qos_sysfs_add_flags(dev);  	if (ret) -		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); +		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);   out: -	mutex_unlock(&dev_pm_qos_mtx); +	mutex_unlock(&dev_pm_qos_sysfs_mtx);  	pm_runtime_put(dev);  	return ret;  } @@ -676,10 +706,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);  static void __dev_pm_qos_hide_flags(struct device *dev)  { -	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) { -		pm_qos_sysfs_remove_flags(dev); +	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)  		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); -	}  }  /** @@ -689,9 +717,15 @@ static void __dev_pm_qos_hide_flags(struct device *dev)  void dev_pm_qos_hide_flags(struct device *dev)  {  	pm_runtime_get_sync(dev); +	mutex_lock(&dev_pm_qos_sysfs_mtx); + +	pm_qos_sysfs_remove_flags(dev); +  	mutex_lock(&dev_pm_qos_mtx);  	__dev_pm_qos_hide_flags(dev);  	mutex_unlock(&dev_pm_qos_mtx); + +	mutex_unlock(&dev_pm_qos_sysfs_mtx);  	pm_runtime_put(dev);  }  EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index e6732cf7c06..79f4fca9877 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -398,7 +398,7 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,  			base = 0;  		if (max < rbnode->base_reg + rbnode->blklen) -			end = rbnode->base_reg + rbnode->blklen - max; +			end = max - rbnode->base_reg + 1;  		else  			end = rbnode->blklen; diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 3d2367501fd..58cfb323242 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -710,12 +710,12 @@ skip_format_initialization:  		}  	} +	regmap_debugfs_init(map, config->name); +  	ret = regcache_init(map, config);  	if (ret != 0)  		goto err_range; -	regmap_debugfs_init(map, config->name); -  	/* Add a devres resource for dev_get_regmap() */  	m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);  	if (!m) { @@ -1036,6 +1036,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,  			kfree(async->work_buf);  			kfree(async);  		} + +		return ret;  	}  	trace_regmap_hw_write_start(map->dev, reg, diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 25ef5c014fc..92b6d7c51e3 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -51,8 +51,9 @@ new_skb(ulong len)  {  	struct sk_buff *skb; -	skb = alloc_skb(len, GFP_ATOMIC); +	skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC);  	if (skb) { +		skb_reserve(skb, MAX_HEADER);  		skb_reset_mac_header(skb);  		skb_reset_network_header(skb);  		skb->protocol = __constant_htons(ETH_P_AOE); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index fe5f6403417..dfe758382ea 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -922,6 +922,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,  		lo->lo_flags |= LO_FLAGS_PARTSCAN;  	if (lo->lo_flags & LO_FLAGS_PARTSCAN)  		ioctl_by_bdev(bdev, BLKRRPART, 0); + +	/* Grab the block_device to prevent its destruction after we +	 * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev). +	 */ +	bdgrab(bdev);  	return 0;  out_clr: @@ -1031,8 +1036,10 @@ static int loop_clr_fd(struct loop_device *lo)  	memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);  	memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);  	memset(lo->lo_file_name, 0, LO_NAME_SIZE); -	if (bdev) +	if (bdev) { +		bdput(bdev);  		invalidate_bdev(bdev); +	}  	set_capacity(lo->lo_disk, 0);  	loop_sysfs_exit(lo);  	if (bdev) { @@ -1044,29 +1051,12 @@ static int loop_clr_fd(struct loop_device *lo)  	lo->lo_state = Lo_unbound;  	/* This is safe: open() is still holding a reference. */  	module_put(THIS_MODULE); +	if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev) +		ioctl_by_bdev(bdev, BLKRRPART, 0);  	lo->lo_flags = 0;  	if (!part_shift)  		lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;  	mutex_unlock(&lo->lo_ctl_mutex); - -	/* -	 * Remove all partitions, since BLKRRPART won't remove user -	 * added partitions when max_part=0 -	 */ -	if (bdev) { -		struct disk_part_iter piter; -		struct hd_struct *part; - -		mutex_lock_nested(&bdev->bd_mutex, 1); -		invalidate_partition(bdev->bd_disk, 0); -		disk_part_iter_init(&piter, bdev->bd_disk, -					DISK_PITER_INCL_EMPTY); -		while ((part = disk_part_iter_next(&piter))) -			delete_partition(bdev->bd_disk, part->partno); -		disk_part_iter_exit(&piter); -		mutex_unlock(&bdev->bd_mutex); -	} -  	/*  	 * Need not hold lo_ctl_mutex to fput backing file.  	 * Calling fput holding lo_ctl_mutex triggers a circular diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 92250af84e7..32c678028e5 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -81,12 +81,17 @@  /* Device instance number, incremented each time a device is probed. */  static int instance; +struct list_head online_list; +struct list_head removing_list; +spinlock_t dev_lock; +  /*   * Global variable used to hold the major block device number   * allocated in mtip_init().   */  static int mtip_major;  static struct dentry *dfs_parent; +static struct dentry *dfs_device_status;  static u32 cpu_use[NR_CPUS]; @@ -243,40 +248,31 @@ static inline void release_slot(struct mtip_port *port, int tag)  /*   * Reset the HBA (without sleeping)   * - * Just like hba_reset, except does not call sleep, so can be - * run from interrupt/tasklet context. - *   * @dd Pointer to the driver data structure.   *   * return value   *	0	The reset was successful.   *	-1	The HBA Reset bit did not clear.   */ -static int hba_reset_nosleep(struct driver_data *dd) +static int mtip_hba_reset(struct driver_data *dd)  {  	unsigned long timeout; -	/* Chip quirk: quiesce any chip function */ -	mdelay(10); -  	/* Set the reset bit */  	writel(HOST_RESET, dd->mmio + HOST_CTL);  	/* Flush */  	readl(dd->mmio + HOST_CTL); -	/* -	 * Wait 10ms then spin for up to 1 second -	 * waiting for reset acknowledgement -	 */ -	timeout = jiffies + msecs_to_jiffies(1000); -	mdelay(10); -	while ((readl(dd->mmio + HOST_CTL) & HOST_RESET) -		 && time_before(jiffies, timeout)) -		mdelay(1); +	/* Spin for up to 2 seconds, waiting for reset acknowledgement */ +	timeout = jiffies + msecs_to_jiffies(2000); +	do { +		mdelay(10); +		if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) +			return -1; -	if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) -		return -1; +	} while ((readl(dd->mmio + HOST_CTL) & HOST_RESET) +		 && time_before(jiffies, timeout));  	if (readl(dd->mmio + HOST_CTL) & HOST_RESET)  		return -1; @@ -481,7 +477,7 @@ static void mtip_restart_port(struct mtip_port *port)  		dev_warn(&port->dd->pdev->dev,  			"PxCMD.CR not clear, escalating reset\n"); -		if (hba_reset_nosleep(port->dd)) +		if (mtip_hba_reset(port->dd))  			dev_err(&port->dd->pdev->dev,  				"HBA reset escalation failed.\n"); @@ -527,6 +523,26 @@ static void mtip_restart_port(struct mtip_port *port)  } +static int mtip_device_reset(struct driver_data *dd) +{ +	int rv = 0; + +	if (mtip_check_surprise_removal(dd->pdev)) +		return 0; + +	if (mtip_hba_reset(dd) < 0) +		rv = -EFAULT; + +	mdelay(1); +	mtip_init_port(dd->port); +	mtip_start_port(dd->port); + +	/* Enable interrupts on the HBA. */ +	writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, +					dd->mmio + HOST_CTL); +	return rv; +} +  /*   * Helper function for tag logging   */ @@ -632,7 +648,7 @@ static void mtip_timeout_function(unsigned long int data)  	if (cmdto_cnt) {  		print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);  		if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { -			mtip_restart_port(port); +			mtip_device_reset(port->dd);  			wake_up_interruptible(&port->svc_wait);  		}  		clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); @@ -1283,11 +1299,11 @@ static int mtip_exec_internal_command(struct mtip_port *port,  	int rv = 0, ready2go = 1;  	struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL];  	unsigned long to; +	struct driver_data *dd = port->dd;  	/* Make sure the buffer is 8 byte aligned. This is asic specific. */  	if (buffer & 0x00000007) { -		dev_err(&port->dd->pdev->dev, -			"SG buffer is not 8 byte aligned\n"); +		dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n");  		return -EFAULT;  	} @@ -1300,23 +1316,21 @@ static int mtip_exec_internal_command(struct mtip_port *port,  		mdelay(100);  	} while (time_before(jiffies, to));  	if (!ready2go) { -		dev_warn(&port->dd->pdev->dev, +		dev_warn(&dd->pdev->dev,  			"Internal cmd active. new cmd [%02X]\n", fis->command);  		return -EBUSY;  	}  	set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);  	port->ic_pause_timer = 0; -	if (fis->command == ATA_CMD_SEC_ERASE_UNIT) -		clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); -	else if (fis->command == ATA_CMD_DOWNLOAD_MICRO) -		clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); +	clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); +	clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);  	if (atomic == GFP_KERNEL) {  		if (fis->command != ATA_CMD_STANDBYNOW1) {  			/* wait for io to complete if non atomic */  			if (mtip_quiesce_io(port, 5000) < 0) { -				dev_warn(&port->dd->pdev->dev, +				dev_warn(&dd->pdev->dev,  					"Failed to quiesce IO\n");  				release_slot(port, MTIP_TAG_INTERNAL);  				clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); @@ -1361,58 +1375,84 @@ static int mtip_exec_internal_command(struct mtip_port *port,  	/* Issue the command to the hardware */  	mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL); -	/* Poll if atomic, wait_for_completion otherwise */  	if (atomic == GFP_KERNEL) {  		/* Wait for the command to complete or timeout. */ -		if (wait_for_completion_timeout( +		if (wait_for_completion_interruptible_timeout(  				&wait, -				msecs_to_jiffies(timeout)) == 0) { -			dev_err(&port->dd->pdev->dev, -				"Internal command did not complete [%d] " -				"within timeout of  %lu ms\n", -				atomic, timeout); -			if (mtip_check_surprise_removal(port->dd->pdev) || +				msecs_to_jiffies(timeout)) <= 0) { +			if (rv == -ERESTARTSYS) { /* interrupted */ +				dev_err(&dd->pdev->dev, +					"Internal command [%02X] was interrupted after %lu ms\n", +					fis->command, timeout); +				rv = -EINTR; +				goto exec_ic_exit; +			} else if (rv == 0) /* timeout */ +				dev_err(&dd->pdev->dev, +					"Internal command did not complete [%02X] within timeout of  %lu ms\n", +					fis->command, timeout); +			else +				dev_err(&dd->pdev->dev, +					"Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n", +					fis->command, rv, timeout); + +			if (mtip_check_surprise_removal(dd->pdev) ||  				test_bit(MTIP_DDF_REMOVE_PENDING_BIT, -						&port->dd->dd_flag)) { +						&dd->dd_flag)) { +				dev_err(&dd->pdev->dev, +					"Internal command [%02X] wait returned due to SR\n", +					fis->command);  				rv = -ENXIO;  				goto exec_ic_exit;  			} +			mtip_device_reset(dd); /* recover from timeout issue */  			rv = -EAGAIN; +			goto exec_ic_exit;  		}  	} else { +		u32 hba_stat, port_stat; +  		/* Spin for <timeout> checking if command still outstanding */  		timeout = jiffies + msecs_to_jiffies(timeout);  		while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL])  				& (1 << MTIP_TAG_INTERNAL))  				&& time_before(jiffies, timeout)) { -			if (mtip_check_surprise_removal(port->dd->pdev)) { +			if (mtip_check_surprise_removal(dd->pdev)) {  				rv = -ENXIO;  				goto exec_ic_exit;  			}  			if ((fis->command != ATA_CMD_STANDBYNOW1) &&  				test_bit(MTIP_DDF_REMOVE_PENDING_BIT, -						&port->dd->dd_flag)) { +						&dd->dd_flag)) {  				rv = -ENXIO;  				goto exec_ic_exit;  			} -			if (readl(port->mmio + PORT_IRQ_STAT) & PORT_IRQ_ERR) { -				atomic_inc(&int_cmd->active); /* error */ -				break; +			port_stat = readl(port->mmio + PORT_IRQ_STAT); +			if (!port_stat) +				continue; + +			if (port_stat & PORT_IRQ_ERR) { +				dev_err(&dd->pdev->dev, +					"Internal command [%02X] failed\n", +					fis->command); +				mtip_device_reset(dd); +				rv = -EIO; +				goto exec_ic_exit; +			} else { +				writel(port_stat, port->mmio + PORT_IRQ_STAT); +				hba_stat = readl(dd->mmio + HOST_IRQ_STAT); +				if (hba_stat) +					writel(hba_stat, +						dd->mmio + HOST_IRQ_STAT);  			} +			break;  		}  	} -	if (atomic_read(&int_cmd->active) > 1) { -		dev_err(&port->dd->pdev->dev, -			"Internal command [%02X] failed\n", fis->command); -		rv = -EIO; -	}  	if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])  			& (1 << MTIP_TAG_INTERNAL)) {  		rv = -ENXIO; -		if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, -					&port->dd->dd_flag)) { -			mtip_restart_port(port); +		if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) { +			mtip_device_reset(dd);  			rv = -EAGAIN;  		}  	} @@ -1724,7 +1764,8 @@ static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,   *      -EINVAL		Invalid parameters passed in, trim not supported   *      -EIO		Error submitting trim request to hw   */ -static int mtip_send_trim(struct driver_data *dd, unsigned int lba, unsigned int len) +static int mtip_send_trim(struct driver_data *dd, unsigned int lba, +				unsigned int len)  {  	int i, rv = 0;  	u64 tlba, tlen, sect_left; @@ -1811,45 +1852,6 @@ static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors)  }  /* - * Reset the HBA. - * - * Resets the HBA by setting the HBA Reset bit in the Global - * HBA Control register. After setting the HBA Reset bit the - * function waits for 1 second before reading the HBA Reset - * bit to make sure it has cleared. If HBA Reset is not clear - * an error is returned. Cannot be used in non-blockable - * context. - * - * @dd Pointer to the driver data structure. - * - * return value - *	0  The reset was successful. - *	-1 The HBA Reset bit did not clear. - */ -static int mtip_hba_reset(struct driver_data *dd) -{ -	mtip_deinit_port(dd->port); - -	/* Set the reset bit */ -	writel(HOST_RESET, dd->mmio + HOST_CTL); - -	/* Flush */ -	readl(dd->mmio + HOST_CTL); - -	/* Wait for reset to clear */ -	ssleep(1); - -	/* Check the bit has cleared */ -	if (readl(dd->mmio + HOST_CTL) & HOST_RESET) { -		dev_err(&dd->pdev->dev, -			"Reset bit did not clear.\n"); -		return -1; -	} - -	return 0; -} - -/*   * Display the identify command data.   *   * @port Pointer to the port data structure. @@ -2710,6 +2712,100 @@ static ssize_t mtip_hw_show_status(struct device *dev,  static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL); +/* debugsfs entries */ + +static ssize_t show_device_status(struct device_driver *drv, char *buf) +{ +	int size = 0; +	struct driver_data *dd, *tmp; +	unsigned long flags; +	char id_buf[42]; +	u16 status = 0; + +	spin_lock_irqsave(&dev_lock, flags); +	size += sprintf(&buf[size], "Devices Present:\n"); +	list_for_each_entry_safe(dd, tmp, &online_list, online_list) { +		if (dd->pdev) { +			if (dd->port && +			    dd->port->identify && +			    dd->port->identify_valid) { +				strlcpy(id_buf, +					(char *) (dd->port->identify + 10), 21); +				status = *(dd->port->identify + 141); +			} else { +				memset(id_buf, 0, 42); +				status = 0; +			} + +			if (dd->port && +			    test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { +				size += sprintf(&buf[size], +					" device %s %s (ftl rebuild %d %%)\n", +					dev_name(&dd->pdev->dev), +					id_buf, +					status); +			} else { +				size += sprintf(&buf[size], +					" device %s %s\n", +					dev_name(&dd->pdev->dev), +					id_buf); +			} +		} +	} + +	size += sprintf(&buf[size], "Devices Being Removed:\n"); +	list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) { +		if (dd->pdev) { +			if (dd->port && +			    dd->port->identify && +			    dd->port->identify_valid) { +				strlcpy(id_buf, +					(char *) (dd->port->identify+10), 21); +				status = *(dd->port->identify + 141); +			} else { +				memset(id_buf, 0, 42); +				status = 0; +			} + +			if (dd->port && +			    test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { +				size += sprintf(&buf[size], +					" device %s %s (ftl rebuild %d %%)\n", +					dev_name(&dd->pdev->dev), +					id_buf, +					status); +			} else { +				size += sprintf(&buf[size], +					" device %s %s\n", +					dev_name(&dd->pdev->dev), +					id_buf); +			} +		} +	} +	spin_unlock_irqrestore(&dev_lock, flags); + +	return size; +} + +static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf, +						size_t len, loff_t *offset) +{ +	int size = *offset; +	char buf[MTIP_DFS_MAX_BUF_SIZE]; + +	if (!len || *offset) +		return 0; + +	size += show_device_status(NULL, buf); + +	*offset = size <= len ? size : len; +	size = copy_to_user(ubuf, buf, *offset); +	if (size) +		return -EFAULT; + +	return *offset; +} +  static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,  				  size_t len, loff_t *offset)  { @@ -2804,6 +2900,13 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,  	return *offset;  } +static const struct file_operations mtip_device_status_fops = { +	.owner  = THIS_MODULE, +	.open   = simple_open, +	.read   = mtip_hw_read_device_status, +	.llseek = no_llseek, +}; +  static const struct file_operations mtip_regs_fops = {  	.owner  = THIS_MODULE,  	.open   = simple_open, @@ -4161,6 +4264,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,  	const struct cpumask *node_mask;  	int cpu, i = 0, j = 0;  	int my_node = NUMA_NO_NODE; +	unsigned long flags;  	/* Allocate memory for this devices private data. */  	my_node = pcibus_to_node(pdev->bus); @@ -4218,6 +4322,9 @@ static int mtip_pci_probe(struct pci_dev *pdev,  	dd->pdev	= pdev;  	dd->numa_node	= my_node; +	INIT_LIST_HEAD(&dd->online_list); +	INIT_LIST_HEAD(&dd->remove_list); +  	memset(dd->workq_name, 0, 32);  	snprintf(dd->workq_name, 31, "mtipq%d", dd->instance); @@ -4305,6 +4412,14 @@ static int mtip_pci_probe(struct pci_dev *pdev,  	instance++;  	if (rv != MTIP_FTL_REBUILD_MAGIC)  		set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); +	else +		rv = 0; /* device in rebuild state, return 0 from probe */ + +	/* Add to online list even if in ftl rebuild */ +	spin_lock_irqsave(&dev_lock, flags); +	list_add(&dd->online_list, &online_list); +	spin_unlock_irqrestore(&dev_lock, flags); +  	goto done;  block_initialize_err: @@ -4338,9 +4453,15 @@ static void mtip_pci_remove(struct pci_dev *pdev)  {  	struct driver_data *dd = pci_get_drvdata(pdev);  	int counter = 0; +	unsigned long flags;  	set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); +	spin_lock_irqsave(&dev_lock, flags); +	list_del_init(&dd->online_list); +	list_add(&dd->remove_list, &removing_list); +	spin_unlock_irqrestore(&dev_lock, flags); +  	if (mtip_check_surprise_removal(pdev)) {  		while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {  			counter++; @@ -4366,6 +4487,10 @@ static void mtip_pci_remove(struct pci_dev *pdev)  	pci_disable_msi(pdev); +	spin_lock_irqsave(&dev_lock, flags); +	list_del_init(&dd->remove_list); +	spin_unlock_irqrestore(&dev_lock, flags); +  	kfree(dd);  	pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);  } @@ -4513,6 +4638,11 @@ static int __init mtip_init(void)  	pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); +	spin_lock_init(&dev_lock); + +	INIT_LIST_HEAD(&online_list); +	INIT_LIST_HEAD(&removing_list); +  	/* Allocate a major block device number to use with this driver. */  	error = register_blkdev(0, MTIP_DRV_NAME);  	if (error <= 0) { @@ -4522,11 +4652,18 @@ static int __init mtip_init(void)  	}  	mtip_major = error; -	if (!dfs_parent) { -		dfs_parent = debugfs_create_dir("rssd", NULL); -		if (IS_ERR_OR_NULL(dfs_parent)) { -			pr_warn("Error creating debugfs parent\n"); -			dfs_parent = NULL; +	dfs_parent = debugfs_create_dir("rssd", NULL); +	if (IS_ERR_OR_NULL(dfs_parent)) { +		pr_warn("Error creating debugfs parent\n"); +		dfs_parent = NULL; +	} +	if (dfs_parent) { +		dfs_device_status = debugfs_create_file("device_status", +					S_IRUGO, dfs_parent, NULL, +					&mtip_device_status_fops); +		if (IS_ERR_OR_NULL(dfs_device_status)) { +			pr_err("Error creating device_status node\n"); +			dfs_device_status = NULL;  		}  	} diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index 3bffff5f670..8e8334c9dd0 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h @@ -129,9 +129,9 @@ enum {  	MTIP_PF_EH_ACTIVE_BIT       = 1, /* error handling */  	MTIP_PF_SE_ACTIVE_BIT       = 2, /* secure erase */  	MTIP_PF_DM_ACTIVE_BIT       = 3, /* download microcde */ -	MTIP_PF_PAUSE_IO      =	((1 << MTIP_PF_IC_ACTIVE_BIT) | \ -				(1 << MTIP_PF_EH_ACTIVE_BIT) | \ -				(1 << MTIP_PF_SE_ACTIVE_BIT) | \ +	MTIP_PF_PAUSE_IO      =	((1 << MTIP_PF_IC_ACTIVE_BIT) | +				(1 << MTIP_PF_EH_ACTIVE_BIT) | +				(1 << MTIP_PF_SE_ACTIVE_BIT) |  				(1 << MTIP_PF_DM_ACTIVE_BIT)),  	MTIP_PF_SVC_THD_ACTIVE_BIT  = 4, @@ -144,9 +144,9 @@ enum {  	MTIP_DDF_REMOVE_PENDING_BIT = 1,  	MTIP_DDF_OVER_TEMP_BIT      = 2,  	MTIP_DDF_WRITE_PROTECT_BIT  = 3, -	MTIP_DDF_STOP_IO      = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \ -				(1 << MTIP_DDF_SEC_LOCK_BIT) | \ -				(1 << MTIP_DDF_OVER_TEMP_BIT) | \ +	MTIP_DDF_STOP_IO      = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | +				(1 << MTIP_DDF_SEC_LOCK_BIT) | +				(1 << MTIP_DDF_OVER_TEMP_BIT) |  				(1 << MTIP_DDF_WRITE_PROTECT_BIT)),  	MTIP_DDF_CLEANUP_BIT        = 5, @@ -180,7 +180,7 @@ struct mtip_work {  #define MTIP_TRIM_TIMEOUT_MS		240000  #define MTIP_MAX_TRIM_ENTRIES		8 -#define MTIP_MAX_TRIM_ENTRY_LEN 	0xfff8 +#define MTIP_MAX_TRIM_ENTRY_LEN		0xfff8  struct mtip_trim_entry {  	u32 lba;   /* starting lba of region */ @@ -501,6 +501,10 @@ struct driver_data {  	atomic_t irq_workers_active;  	int isr_binding; + +	struct list_head online_list; /* linkage for online list */ + +	struct list_head remove_list; /* linkage for removing list */  };  #endif diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index f556f8a8b3f..b7b7a88d9f6 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1742,9 +1742,10 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request)  	struct rbd_device *rbd_dev = img_request->rbd_dev;  	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;  	struct rbd_obj_request *obj_request; +	struct rbd_obj_request *next_obj_request;  	dout("%s: img %p\n", __func__, img_request); -	for_each_obj_request(img_request, obj_request) { +	for_each_obj_request_safe(img_request, obj_request, next_obj_request) {  		int ret;  		obj_request->callback = rbd_img_obj_callback; diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index e3f9a99b852..d784650d14f 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -373,26 +373,14 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma)  	struct hpet_dev *devp;  	unsigned long addr; -	if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff) -		return -EINVAL; -  	devp = file->private_data;  	addr = devp->hd_hpets->hp_hpet_phys;  	if (addr & (PAGE_SIZE - 1))  		return -ENOSYS; -	vma->vm_flags |= VM_IO;  	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - -	if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, -					PAGE_SIZE, vma->vm_page_prot)) { -		printk(KERN_ERR "%s: io_remap_pfn_range failed\n", -			__func__); -		return -EAGAIN; -	} - -	return 0; +	return vm_iomap_memory(vma, addr, PAGE_SIZE);  #else  	return -ENOSYS;  #endif diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 69ae5972713..a0f7724852e 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -380,6 +380,15 @@ void hwrng_unregister(struct hwrng *rng)  }  EXPORT_SYMBOL_GPL(hwrng_unregister); +static void __exit hwrng_exit(void) +{ +	mutex_lock(&rng_mutex); +	BUG_ON(current_rng); +	kfree(rng_buffer); +	mutex_unlock(&rng_mutex); +} + +module_exit(hwrng_exit);  MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");  MODULE_LICENSE("GPL"); diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index e905d5f5305..ce5f3fc25d6 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -149,7 +149,8 @@ struct ports_device {  	spinlock_t ports_lock;  	/* To protect the vq operations for the control channel */ -	spinlock_t cvq_lock; +	spinlock_t c_ivq_lock; +	spinlock_t c_ovq_lock;  	/* The current config space is stored here */  	struct virtio_console_config config; @@ -569,11 +570,14 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,  	vq = portdev->c_ovq;  	sg_init_one(sg, &cpkt, sizeof(cpkt)); + +	spin_lock(&portdev->c_ovq_lock);  	if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) {  		virtqueue_kick(vq);  		while (!virtqueue_get_buf(vq, &len))  			cpu_relax();  	} +	spin_unlock(&portdev->c_ovq_lock);  	return 0;  } @@ -1436,7 +1440,7 @@ static int add_port(struct ports_device *portdev, u32 id)  		 * rproc_serial does not want the console port, only  		 * the generic port implementation.  		 */ -		port->host_connected = port->guest_connected = true; +		port->host_connected = true;  	else if (!use_multiport(port->portdev)) {  		/*  		 * If we're not using multiport support, @@ -1709,23 +1713,23 @@ static void control_work_handler(struct work_struct *work)  	portdev = container_of(work, struct ports_device, control_work);  	vq = portdev->c_ivq; -	spin_lock(&portdev->cvq_lock); +	spin_lock(&portdev->c_ivq_lock);  	while ((buf = virtqueue_get_buf(vq, &len))) { -		spin_unlock(&portdev->cvq_lock); +		spin_unlock(&portdev->c_ivq_lock);  		buf->len = len;  		buf->offset = 0;  		handle_control_message(portdev, buf); -		spin_lock(&portdev->cvq_lock); +		spin_lock(&portdev->c_ivq_lock);  		if (add_inbuf(portdev->c_ivq, buf) < 0) {  			dev_warn(&portdev->vdev->dev,  				 "Error adding buffer to queue\n");  			free_buf(buf, false);  		}  	} -	spin_unlock(&portdev->cvq_lock); +	spin_unlock(&portdev->c_ivq_lock);  }  static void out_intr(struct virtqueue *vq) @@ -1752,13 +1756,23 @@ static void in_intr(struct virtqueue *vq)  	port->inbuf = get_inbuf(port);  	/* -	 * Don't queue up data when port is closed.  This condition +	 * Normally the port should not accept data when the port is +	 * closed. For generic serial ports, the host won't (shouldn't) +	 * send data till the guest is connected. But this condition  	 * can be reached when a console port is not yet connected (no -	 * tty is spawned) and the host sends out data to console -	 * ports.  For generic serial ports, the host won't -	 * (shouldn't) send data till the guest is connected. +	 * tty is spawned) and the other side sends out data over the +	 * vring, or when a remote devices start sending data before +	 * the ports are opened. +	 * +	 * A generic serial port will discard data if not connected, +	 * while console ports and rproc-serial ports accepts data at +	 * any time. rproc-serial is initiated with guest_connected to +	 * false because port_fops_open expects this. Console ports are +	 * hooked up with an HVC console and is initialized with +	 * guest_connected to true.  	 */ -	if (!port->guest_connected) + +	if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev))  		discard_port_data(port);  	spin_unlock_irqrestore(&port->inbuf_lock, flags); @@ -1986,10 +2000,12 @@ static int virtcons_probe(struct virtio_device *vdev)  	if (multiport) {  		unsigned int nr_added_bufs; -		spin_lock_init(&portdev->cvq_lock); +		spin_lock_init(&portdev->c_ivq_lock); +		spin_lock_init(&portdev->c_ovq_lock);  		INIT_WORK(&portdev->control_work, &control_work_handler); -		nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock); +		nr_added_bufs = fill_queue(portdev->c_ivq, +					   &portdev->c_ivq_lock);  		if (!nr_added_bufs) {  			dev_err(&vdev->dev,  				"Error allocating buffers for control queue\n"); @@ -2140,7 +2156,7 @@ static int virtcons_restore(struct virtio_device *vdev)  		return ret;  	if (use_multiport(portdev)) -		fill_queue(portdev->c_ivq, &portdev->cvq_lock); +		fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);  	list_for_each_entry(port, &portdev->ports, list) {  		port->in_vq = portdev->in_vqs[port->id]; diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c index 1e2de730536..f873dcefe0d 100644 --- a/drivers/clk/tegra/clk-tegra20.c +++ b/drivers/clk/tegra/clk-tegra20.c @@ -703,7 +703,7 @@ static void tegra20_pll_init(void)  	clks[pll_a_out0] = clk;  	/* PLLE */ -	clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, NULL, +	clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, pmc_base,  			     0, 100000000, &pll_e_params,  			     0, pll_e_freq_table, NULL);  	clk_register_clkdev(clk, "pll_e", NULL); diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index 4e5b7fb8927..37d23a0f8c5 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c @@ -178,10 +178,16 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {  static int cpu0_cpufreq_probe(struct platform_device *pdev)  { -	struct device_node *np; +	struct device_node *np, *parent;  	int ret; -	for_each_child_of_node(of_find_node_by_path("/cpus"), np) { +	parent = of_find_node_by_path("/cpus"); +	if (!parent) { +		pr_err("failed to find OF /cpus\n"); +		return -ENOENT; +	} + +	for_each_child_of_node(parent, np) {  		if (of_get_property(np, "operating-points", NULL))  			break;  	} diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 46bde01eee6..cc4bd2f6838 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -14,8 +14,8 @@   * published by the Free Software Foundation.   */ -#ifndef _CPUFREQ_GOVERNER_H -#define _CPUFREQ_GOVERNER_H +#ifndef _CPUFREQ_GOVERNOR_H +#define _CPUFREQ_GOVERNOR_H  #include <linux/cpufreq.h>  #include <linux/kobject.h> @@ -175,4 +175,4 @@ bool need_load_eval(struct cpu_dbs_common_info *cdbs,  		unsigned int sampling_rate);  int cpufreq_governor_dbs(struct dbs_data *dbs_data,  		struct cpufreq_policy *policy, unsigned int event); -#endif /* _CPUFREQ_GOVERNER_H */ +#endif /* _CPUFREQ_GOVERNOR_H */ diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index ad72922919e..6133ef5cf67 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -502,7 +502,6 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)  	sample_time = cpu->pstate_policy->sample_rate_ms;  	delay = msecs_to_jiffies(sample_time); -	delay -= jiffies % delay;  	mod_timer_pinned(&cpu->timer, jiffies + delay);  } diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index 8bc5fef07e7..22c9063e012 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -1750,7 +1750,7 @@ static struct platform_driver cryp_driver = {  	.shutdown = ux500_cryp_shutdown,  	.driver = {  		.owner = THIS_MODULE, -		.name  = "cryp1" +		.name  = "cryp1",  		.pm    = &ux500_cryp_pm,  	}  }; diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 80b69971cf2..aeaea32bcfd 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -83,6 +83,7 @@ config INTEL_IOP_ADMA  config DW_DMAC  	tristate "Synopsys DesignWare AHB DMA support" +	depends on GENERIC_HARDIRQS  	select DMA_ENGINE  	default y if CPU_AT32AP7000  	help diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 6e13f262139..88cfc61329d 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -310,8 +310,6 @@ static void atc_complete_all(struct at_dma_chan *atchan)  	dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); -	BUG_ON(atc_chan_is_enabled(atchan)); -  	/*  	 * Submit queued descriptors ASAP, i.e. before we go through  	 * the completed ones. @@ -368,6 +366,9 @@ static void atc_advance_work(struct at_dma_chan *atchan)  {  	dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); +	if (atc_chan_is_enabled(atchan)) +		return; +  	if (list_empty(&atchan->active_list) ||  	    list_is_singular(&atchan->active_list)) {  		atc_complete_all(atchan); @@ -1078,9 +1079,7 @@ static void atc_issue_pending(struct dma_chan *chan)  		return;  	spin_lock_irqsave(&atchan->lock, flags); -	if (!atc_chan_is_enabled(atchan)) { -		atc_advance_work(atchan); -	} +	atc_advance_work(atchan);  	spin_unlock_irqrestore(&atchan->lock, flags);  } diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index c4b4fd2acc4..08b43bf3715 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -276,12 +276,20 @@ static void omap_dma_issue_pending(struct dma_chan *chan)  	spin_lock_irqsave(&c->vc.lock, flags);  	if (vchan_issue_pending(&c->vc) && !c->desc) { -		struct omap_dmadev *d = to_omap_dma_dev(chan->device); -		spin_lock(&d->lock); -		if (list_empty(&c->node)) -			list_add_tail(&c->node, &d->pending); -		spin_unlock(&d->lock); -		tasklet_schedule(&d->task); +		/* +		 * c->cyclic is used only by audio and in this case the DMA need +		 * to be started without delay. +		 */ +		if (!c->cyclic) { +			struct omap_dmadev *d = to_omap_dma_dev(chan->device); +			spin_lock(&d->lock); +			if (list_empty(&c->node)) +				list_add_tail(&c->node, &d->pending); +			spin_unlock(&d->lock); +			tasklet_schedule(&d->task); +		} else { +			omap_dma_start_desc(c); +		}  	}  	spin_unlock_irqrestore(&c->vc.lock, flags);  } diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 71815312275..5dbc5946c4c 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2882,7 +2882,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)  {  	struct dma_pl330_platdata *pdat;  	struct dma_pl330_dmac *pdmac; -	struct dma_pl330_chan *pch; +	struct dma_pl330_chan *pch, *_p;  	struct pl330_info *pi;  	struct dma_device *pd;  	struct resource *res; @@ -2984,7 +2984,16 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)  	ret = dma_async_device_register(pd);  	if (ret) {  		dev_err(&adev->dev, "unable to register DMAC\n"); -		goto probe_err2; +		goto probe_err3; +	} + +	if (adev->dev.of_node) { +		ret = of_dma_controller_register(adev->dev.of_node, +					 of_dma_pl330_xlate, pdmac); +		if (ret) { +			dev_err(&adev->dev, +			"unable to register DMA to the generic DT DMA helpers\n"); +		}  	}  	dev_info(&adev->dev, @@ -2995,16 +3004,21 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)  		pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,  		pi->pcfg.num_peri, pi->pcfg.num_events); -	ret = of_dma_controller_register(adev->dev.of_node, -					 of_dma_pl330_xlate, pdmac); -	if (ret) { -		dev_err(&adev->dev, -		"unable to register DMA to the generic DT DMA helpers\n"); -		goto probe_err2; -	} -  	return 0; +probe_err3: +	amba_set_drvdata(adev, NULL); +	/* Idle the DMAC */ +	list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, +			chan.device_node) { + +		/* Remove the channel */ +		list_del(&pch->chan.device_node); + +		/* Flush the channel */ +		pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); +		pl330_free_chan_resources(&pch->chan); +	}  probe_err2:  	pl330_del(pi);  probe_err1: @@ -3023,8 +3037,10 @@ static int pl330_remove(struct amba_device *adev)  	if (!pdmac)  		return 0; -	of_dma_controller_free(adev->dev.of_node); +	if (adev->dev.of_node) +		of_dma_controller_free(adev->dev.of_node); +	dma_async_device_unregister(&pdmac->ddma);  	amba_set_drvdata(adev, NULL);  	/* Idle the DMAC */ diff --git a/drivers/eisa/pci_eisa.c b/drivers/eisa/pci_eisa.c index cdae207028a..6c3fca97d34 100644 --- a/drivers/eisa/pci_eisa.c +++ b/drivers/eisa/pci_eisa.c @@ -19,10 +19,10 @@  /* There is only *one* pci_eisa device per machine, right ? */  static struct eisa_root_device pci_eisa_root; -static int __init pci_eisa_init(struct pci_dev *pdev, -				const struct pci_device_id *ent) +static int __init pci_eisa_init(struct pci_dev *pdev)  { -	int rc; +	int rc, i; +	struct resource *res, *bus_res = NULL;  	if ((rc = pci_enable_device (pdev))) {  		printk (KERN_ERR "pci_eisa : Could not enable device %s\n", @@ -30,9 +30,30 @@ static int __init pci_eisa_init(struct pci_dev *pdev,  		return rc;  	} +	/* +	 * The Intel 82375 PCI-EISA bridge is a subtractive-decode PCI +	 * device, so the resources available on EISA are the same as those +	 * available on the 82375 bus.  This works the same as a PCI-PCI +	 * bridge in subtractive-decode mode (see pci_read_bridge_bases()). +	 * We assume other PCI-EISA bridges are similar. +	 * +	 * eisa_root_register() can only deal with a single io port resource, +	*  so we use the first valid io port resource. +	 */ +	pci_bus_for_each_resource(pdev->bus, res, i) +		if (res && (res->flags & IORESOURCE_IO)) { +			bus_res = res; +			break; +		} + +	if (!bus_res) { +		dev_err(&pdev->dev, "No resources available\n"); +		return -1; +	} +  	pci_eisa_root.dev              = &pdev->dev; -	pci_eisa_root.res	       = pdev->bus->resource[0]; -	pci_eisa_root.bus_base_addr    = pdev->bus->resource[0]->start; +	pci_eisa_root.res	       = bus_res; +	pci_eisa_root.bus_base_addr    = bus_res->start;  	pci_eisa_root.slots	       = EISA_MAX_SLOTS;  	pci_eisa_root.dma_mask         = pdev->dma_mask;  	dev_set_drvdata(pci_eisa_root.dev, &pci_eisa_root); @@ -45,22 +66,26 @@ static int __init pci_eisa_init(struct pci_dev *pdev,  	return 0;  } -static struct pci_device_id pci_eisa_pci_tbl[] = { -	{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, -	  PCI_CLASS_BRIDGE_EISA << 8, 0xffff00, 0 }, -	{ 0, } -}; +/* + * We have to call pci_eisa_init_early() before pnpacpi_init()/isapnp_init(). + *   Otherwise pnp resource will get enabled early and could prevent eisa + *   to be initialized. + * Also need to make sure pci_eisa_init_early() is called after + * x86/pci_subsys_init(). + * So need to use subsys_initcall_sync with it. + */ +static int __init pci_eisa_init_early(void) +{ +	struct pci_dev *dev = NULL; +	int ret; -static struct pci_driver __refdata pci_eisa_driver = { -	.name		= "pci_eisa", -	.id_table	= pci_eisa_pci_tbl, -	.probe		= pci_eisa_init, -}; +	for_each_pci_dev(dev) +		if ((dev->class >> 8) == PCI_CLASS_BRIDGE_EISA) { +			ret = pci_eisa_init(dev); +			if (ret) +				return ret; +		} -static int __init pci_eisa_init_module (void) -{ -	return pci_register_driver (&pci_eisa_driver); +	return 0;  } - -device_initcall(pci_eisa_init_module); -MODULE_DEVICE_TABLE(pci, pci_eisa_pci_tbl); +subsys_initcall_sync(pci_eisa_init_early); diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 42c759a4d04..3e532002e4d 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -39,6 +39,7 @@ config FIRMWARE_MEMMAP  config EFI_VARS  	tristate "EFI Variable Support via sysfs"  	depends on EFI +	select UCS2_STRING  	default n  	help  	  If you say Y here, you are able to get EFI (Extensible Firmware diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c index 7acafb80fd4..182ce947117 100644 --- a/drivers/firmware/efivars.c +++ b/drivers/firmware/efivars.c @@ -80,6 +80,7 @@  #include <linux/slab.h>  #include <linux/pstore.h>  #include <linux/ctype.h> +#include <linux/ucs2_string.h>  #include <linux/fs.h>  #include <linux/ramfs.h> @@ -172,51 +173,6 @@ static void efivar_update_sysfs_entries(struct work_struct *);  static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries);  static bool efivar_wq_enabled = true; -/* Return the number of unicode characters in data */ -static unsigned long -utf16_strnlen(efi_char16_t *s, size_t maxlength) -{ -	unsigned long length = 0; - -	while (*s++ != 0 && length < maxlength) -		length++; -	return length; -} - -static inline unsigned long -utf16_strlen(efi_char16_t *s) -{ -	return utf16_strnlen(s, ~0UL); -} - -/* - * Return the number of bytes is the length of this string - * Note: this is NOT the same as the number of unicode characters - */ -static inline unsigned long -utf16_strsize(efi_char16_t *data, unsigned long maxlength) -{ -	return utf16_strnlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t); -} - -static inline int -utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len) -{ -	while (1) { -		if (len == 0) -			return 0; -		if (*a < *b) -			return -1; -		if (*a > *b) -			return 1; -		if (*a == 0) /* implies *b == 0 */ -			return 0; -		a++; -		b++; -		len--; -	} -} -  static bool  validate_device_path(struct efi_variable *var, int match, u8 *buffer,  		     unsigned long len) @@ -268,7 +224,7 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer,  	u16 filepathlength;  	int i, desclength = 0, namelen; -	namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName)); +	namelen = ucs2_strnlen(var->VariableName, sizeof(var->VariableName));  	/* Either "Boot" or "Driver" followed by four digits of hex */  	for (i = match; i < match+4; i++) { @@ -291,7 +247,7 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer,  	 * There's no stored length for the description, so it has to be  	 * found by hand  	 */ -	desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2; +	desclength = ucs2_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;  	/* Each boot entry must have a descriptor */  	if (!desclength) @@ -436,24 +392,12 @@ static efi_status_t  check_var_size_locked(struct efivars *efivars, u32 attributes,  			unsigned long size)  { -	u64 storage_size, remaining_size, max_size; -	efi_status_t status;  	const struct efivar_operations *fops = efivars->ops; -	if (!efivars->ops->query_variable_info) +	if (!efivars->ops->query_variable_store)  		return EFI_UNSUPPORTED; -	status = fops->query_variable_info(attributes, &storage_size, -					   &remaining_size, &max_size); - -	if (status != EFI_SUCCESS) -		return status; - -	if (!storage_size || size > remaining_size || size > max_size || -	    (remaining_size - size) < (storage_size / 2)) -		return EFI_OUT_OF_RESOURCES; - -	return status; +	return fops->query_variable_store(attributes, size);  } @@ -593,7 +537,7 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)  	spin_lock_irq(&efivars->lock);  	status = check_var_size_locked(efivars, new_var->Attributes, -	       new_var->DataSize + utf16_strsize(new_var->VariableName, 1024)); +	       new_var->DataSize + ucs2_strsize(new_var->VariableName, 1024));  	if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED)  		status = efivars->ops->set_variable(new_var->VariableName, @@ -771,7 +715,7 @@ static ssize_t efivarfs_file_write(struct file *file,  	 * QueryVariableInfo() isn't supported by the firmware.  	 */ -	varsize = datasize + utf16_strsize(var->var.VariableName, 1024); +	varsize = datasize + ucs2_strsize(var->var.VariableName, 1024);  	status = check_var_size(efivars, attributes, varsize);  	if (status != EFI_SUCCESS) { @@ -1223,7 +1167,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)  		inode = NULL; -		len = utf16_strlen(entry->var.VariableName); +		len = ucs2_strlen(entry->var.VariableName);  		/* name, plus '-', plus GUID, plus NUL*/  		name = kmalloc(len + 1 + GUID_LEN + 1, GFP_ATOMIC); @@ -1481,8 +1425,8 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,  		if (efi_guidcmp(entry->var.VendorGuid, vendor))  			continue; -		if (utf16_strncmp(entry->var.VariableName, efi_name, -				  utf16_strlen(efi_name))) { +		if (ucs2_strncmp(entry->var.VariableName, efi_name, +				  ucs2_strlen(efi_name))) {  			/*  			 * Check if an old format,  			 * which doesn't support holding @@ -1494,8 +1438,8 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,  			for (i = 0; i < DUMP_NAME_LEN; i++)  				efi_name_old[i] = name_old[i]; -			if (utf16_strncmp(entry->var.VariableName, efi_name_old, -					  utf16_strlen(efi_name_old))) +			if (ucs2_strncmp(entry->var.VariableName, efi_name_old, +					  ucs2_strlen(efi_name_old)))  				continue;  		} @@ -1573,8 +1517,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,  	 * Does this variable already exist?  	 */  	list_for_each_entry_safe(search_efivar, n, &efivars->list, list) { -		strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024); -		strsize2 = utf16_strsize(new_var->VariableName, 1024); +		strsize1 = ucs2_strsize(search_efivar->var.VariableName, 1024); +		strsize2 = ucs2_strsize(new_var->VariableName, 1024);  		if (strsize1 == strsize2 &&  			!memcmp(&(search_efivar->var.VariableName),  				new_var->VariableName, strsize1) && @@ -1590,7 +1534,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,  	}  	status = check_var_size_locked(efivars, new_var->Attributes, -	       new_var->DataSize + utf16_strsize(new_var->VariableName, 1024)); +	       new_var->DataSize + ucs2_strsize(new_var->VariableName, 1024));  	if (status && status != EFI_UNSUPPORTED) {  		spin_unlock_irq(&efivars->lock); @@ -1614,7 +1558,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,  	/* Create the entry in sysfs.  Locking is not required here */  	status = efivar_create_sysfs_entry(efivars, -					   utf16_strsize(new_var->VariableName, +					   ucs2_strsize(new_var->VariableName,  							 1024),  					   new_var->VariableName,  					   &new_var->VendorGuid); @@ -1644,8 +1588,8 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,  	 * Does this variable already exist?  	 */  	list_for_each_entry_safe(search_efivar, n, &efivars->list, list) { -		strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024); -		strsize2 = utf16_strsize(del_var->VariableName, 1024); +		strsize1 = ucs2_strsize(search_efivar->var.VariableName, 1024); +		strsize2 = ucs2_strsize(del_var->VariableName, 1024);  		if (strsize1 == strsize2 &&  			!memcmp(&(search_efivar->var.VariableName),  				del_var->VariableName, strsize1) && @@ -1691,9 +1635,9 @@ static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor)  	unsigned long strsize1, strsize2;  	bool found = false; -	strsize1 = utf16_strsize(variable_name, 1024); +	strsize1 = ucs2_strsize(variable_name, 1024);  	list_for_each_entry_safe(entry, n, &efivars->list, list) { -		strsize2 = utf16_strsize(entry->var.VariableName, 1024); +		strsize2 = ucs2_strsize(entry->var.VariableName, 1024);  		if (strsize1 == strsize2 &&  			!memcmp(variable_name, &(entry->var.VariableName),  				strsize2) && @@ -2131,7 +2075,7 @@ efivars_init(void)  	ops.get_variable = efi.get_variable;  	ops.set_variable = efi.set_variable;  	ops.get_next_variable = efi.get_next_variable; -	ops.query_variable_info = efi.query_variable_info; +	ops.query_variable_store = efi_query_variable_store;  	error = register_efivars(&__efivars, &ops, efi_kobj);  	if (error) diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c index f9dbd503fc4..de3c317bd3e 100644 --- a/drivers/gpio/gpio-ich.c +++ b/drivers/gpio/gpio-ich.c @@ -214,7 +214,7 @@ static int ichx_gpio_request(struct gpio_chip *chip, unsigned nr)  	 * If it can't be trusted, assume that the pin can be used as a GPIO.  	 */  	if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f))) -		return 1; +		return 0;  	return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV;  } diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 24059462c87..9391cf16e99 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -575,7 +575,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,  						chip->gpio_chip.ngpio,  						irq_base,  						&pca953x_irq_simple_ops, -						NULL); +						chip);  		if (!chip->domain)  			return -ENODEV; diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c index 770476a9da8..3ce5bc38ac3 100644 --- a/drivers/gpio/gpio-stmpe.c +++ b/drivers/gpio/gpio-stmpe.c @@ -307,11 +307,15 @@ static const struct irq_domain_ops stmpe_gpio_irq_simple_ops = {  	.xlate = irq_domain_xlate_twocell,  }; -static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio) +static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio, +		struct device_node *np)  { -	int base = stmpe_gpio->irq_base; +	int base = 0; -	stmpe_gpio->domain = irq_domain_add_simple(NULL, +	if (!np) +		base = stmpe_gpio->irq_base; + +	stmpe_gpio->domain = irq_domain_add_simple(np,  				stmpe_gpio->chip.ngpio, base,  				&stmpe_gpio_irq_simple_ops, stmpe_gpio);  	if (!stmpe_gpio->domain) { @@ -346,6 +350,9 @@ static int stmpe_gpio_probe(struct platform_device *pdev)  	stmpe_gpio->chip = template_chip;  	stmpe_gpio->chip.ngpio = stmpe->num_gpios;  	stmpe_gpio->chip.dev = &pdev->dev; +#ifdef CONFIG_OF +	stmpe_gpio->chip.of_node = np; +#endif  	stmpe_gpio->chip.base = pdata ? pdata->gpio_base : -1;  	if (pdata) @@ -366,7 +373,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev)  		goto out_free;  	if (irq >= 0) { -		ret = stmpe_gpio_irq_init(stmpe_gpio); +		ret = stmpe_gpio_irq_init(stmpe_gpio, np);  		if (ret)  			goto out_disable; diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 792c3e3795c..dd64a06dc5b 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -2326,7 +2326,6 @@ int drm_mode_addfb(struct drm_device *dev,  	fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);  	if (IS_ERR(fb)) {  		DRM_DEBUG_KMS("could not create framebuffer\n"); -		drm_modeset_unlock_all(dev);  		return PTR_ERR(fb);  	} @@ -2506,7 +2505,6 @@ int drm_mode_addfb2(struct drm_device *dev,  	fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);  	if (IS_ERR(fb)) {  		DRM_DEBUG_KMS("could not create framebuffer\n"); -		drm_modeset_unlock_all(dev);  		return PTR_ERR(fb);  	} diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 59d6b9bf204..892ff9f9597 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1544,10 +1544,10 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)  	if (!fb_helper->fb)  		return 0; -	drm_modeset_lock_all(dev); +	mutex_lock(&fb_helper->dev->mode_config.mutex);  	if (!drm_fb_helper_is_bound(fb_helper)) {  		fb_helper->delayed_hotplug = true; -		drm_modeset_unlock_all(dev); +		mutex_unlock(&fb_helper->dev->mode_config.mutex);  		return 0;  	}  	DRM_DEBUG_KMS("\n"); @@ -1558,9 +1558,11 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)  	count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,  						    max_height); +	mutex_unlock(&fb_helper->dev->mode_config.mutex); + +	drm_modeset_lock_all(dev);  	drm_setup_crtcs(fb_helper);  	drm_modeset_unlock_all(dev); -  	drm_fb_helper_set_par(fb_helper->fbdev);  	return 0; diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 13fdcd10a60..429e07d0b0f 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -123,6 +123,7 @@ int drm_open(struct inode *inode, struct file *filp)  	int retcode = 0;  	int need_setup = 0;  	struct address_space *old_mapping; +	struct address_space *old_imapping;  	minor = idr_find(&drm_minors_idr, minor_id);  	if (!minor) @@ -137,6 +138,7 @@ int drm_open(struct inode *inode, struct file *filp)  	if (!dev->open_count++)  		need_setup = 1;  	mutex_lock(&dev->struct_mutex); +	old_imapping = inode->i_mapping;  	old_mapping = dev->dev_mapping;  	if (old_mapping == NULL)  		dev->dev_mapping = &inode->i_data; @@ -159,8 +161,8 @@ int drm_open(struct inode *inode, struct file *filp)  err_undo:  	mutex_lock(&dev->struct_mutex); -	filp->f_mapping = old_mapping; -	inode->i_mapping = old_mapping; +	filp->f_mapping = old_imapping; +	inode->i_mapping = old_imapping;  	iput(container_of(dev->dev_mapping, struct inode, i_data));  	dev->dev_mapping = old_mapping;  	mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 3b11ab0fbc9..9a48e1a2d41 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -57,7 +57,7 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)  	if (eb == NULL) {  		int size = args->buffer_count;  		int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; -		BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head))); +		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));  		while (count > 2*size)  			count >>= 1;  		eb = kzalloc(count*sizeof(struct hlist_head) + diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 32a3693905e..1ce45a0a2d3 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -45,6 +45,9 @@  struct intel_crt {  	struct intel_encoder base; +	/* DPMS state is stored in the connector, which we need in the +	 * encoder's enable/disable callbacks */ +	struct intel_connector *connector;  	bool force_hotplug_required;  	u32 adpa_reg;  }; @@ -81,29 +84,6 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,  	return true;  } -static void intel_disable_crt(struct intel_encoder *encoder) -{ -	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; -	struct intel_crt *crt = intel_encoder_to_crt(encoder); -	u32 temp; - -	temp = I915_READ(crt->adpa_reg); -	temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE; -	temp &= ~ADPA_DAC_ENABLE; -	I915_WRITE(crt->adpa_reg, temp); -} - -static void intel_enable_crt(struct intel_encoder *encoder) -{ -	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; -	struct intel_crt *crt = intel_encoder_to_crt(encoder); -	u32 temp; - -	temp = I915_READ(crt->adpa_reg); -	temp |= ADPA_DAC_ENABLE; -	I915_WRITE(crt->adpa_reg, temp); -} -  /* Note: The caller is required to filter out dpms modes not supported by the   * platform. */  static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) @@ -135,6 +115,19 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)  	I915_WRITE(crt->adpa_reg, temp);  } +static void intel_disable_crt(struct intel_encoder *encoder) +{ +	intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF); +} + +static void intel_enable_crt(struct intel_encoder *encoder) +{ +	struct intel_crt *crt = intel_encoder_to_crt(encoder); + +	intel_crt_set_dpms(encoder, crt->connector->base.dpms); +} + +  static void intel_crt_dpms(struct drm_connector *connector, int mode)  {  	struct drm_device *dev = connector->dev; @@ -746,6 +739,7 @@ void intel_crt_init(struct drm_device *dev)  	}  	connector = &intel_connector->base; +	crt->connector = intel_connector;  	drm_connector_init(dev, &intel_connector->base,  			   &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d7d4afe0134..8fc93f90a7c 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -2559,12 +2559,15 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)  {  	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);  	struct intel_dp *intel_dp = &intel_dig_port->dp; +	struct drm_device *dev = intel_dp_to_dev(intel_dp);  	i2c_del_adapter(&intel_dp->adapter);  	drm_encoder_cleanup(encoder);  	if (is_edp(intel_dp)) {  		cancel_delayed_work_sync(&intel_dp->panel_vdd_work); +		mutex_lock(&dev->mode_config.mutex);  		ironlake_panel_vdd_off_sync(intel_dp); +		mutex_unlock(&dev->mode_config.mutex);  	}  	kfree(intel_dig_port);  } diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index fe22bb780e1..78d8e919509 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -751,8 +751,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,  	int i;  	unsigned char misc = 0;  	unsigned char ext_vga[6]; -	unsigned char ext_vga_index24; -	unsigned char dac_index90 = 0;  	u8 bppshift;  	static unsigned char dacvalue[] = { @@ -803,7 +801,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,  		option2 = 0x0000b000;  		break;  	case G200_ER: -		dac_index90 = 0;  		break;  	} @@ -852,10 +849,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,  		WREG_DAC(i, dacvalue[i]);  	} -	if (mdev->type == G200_ER) { -		WREG_DAC(0x90, dac_index90); -	} - +	if (mdev->type == G200_ER) +		WREG_DAC(0x90, 0);  	if (option)  		pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option); @@ -952,8 +947,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,  	if (mdev->type == G200_WB)  		ext_vga[1] |= 0x88; -	ext_vga_index24 = 0x05; -  	/* Set pixel clocks */  	misc = 0x2d;  	WREG8(MGA_MISC_OUT, misc); @@ -965,7 +958,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,  	}  	if (mdev->type == G200_ER) -		WREG_ECRT(24, ext_vga_index24); +		WREG_ECRT(0x24, 0x5);  	if (mdev->type == G200_EV) {  		WREG_ECRT(6, 0); diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c index e816f06637a..0e2c1a4f165 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c @@ -248,6 +248,22 @@ nouveau_bios_shadow_pci(struct nouveau_bios *bios)  	}  } +static void +nouveau_bios_shadow_platform(struct nouveau_bios *bios) +{ +	struct pci_dev *pdev = nv_device(bios)->pdev; +	size_t size; + +	void __iomem *rom = pci_platform_rom(pdev, &size); +	if (rom && size) { +		bios->data = kmalloc(size, GFP_KERNEL); +		if (bios->data) { +			memcpy_fromio(bios->data, rom, size); +			bios->size = size; +		} +	} +} +  static int  nouveau_bios_score(struct nouveau_bios *bios, const bool writeable)  { @@ -288,6 +304,7 @@ nouveau_bios_shadow(struct nouveau_bios *bios)  		{ "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL },  		{ "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL },  		{ "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL }, +		{ "PLATFORM", nouveau_bios_shadow_platform, true, 0, 0, NULL },  		{}  	};  	struct methods *mthd, *best; diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 3b6dc883e15..5eb3e0da7c6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -391,7 +391,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)  	struct nouveau_drm *drm = nouveau_drm(dev);  	struct nouveau_device *device = nv_device(drm->device);  	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); -	struct nouveau_abi16_chan *chan, *temp; +	struct nouveau_abi16_chan *chan = NULL, *temp;  	struct nouveau_abi16_ntfy *ntfy;  	struct nouveau_object *object;  	struct nv_dma_class args = {}; @@ -404,10 +404,11 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)  	if (unlikely(nv_device(abi16->device)->card_type >= NV_C0))  		return nouveau_abi16_put(abi16, -EINVAL); -	list_for_each_entry_safe(chan, temp, &abi16->channels, head) { -		if (chan->chan->handle == (NVDRM_CHAN | info->channel)) +	list_for_each_entry(temp, &abi16->channels, head) { +		if (temp->chan->handle == (NVDRM_CHAN | info->channel)) { +			chan = temp;  			break; -		chan = NULL; +		}  	}  	if (!chan) @@ -459,17 +460,18 @@ nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)  {  	struct drm_nouveau_gpuobj_free *fini = data;  	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); -	struct nouveau_abi16_chan *chan, *temp; +	struct nouveau_abi16_chan *chan = NULL, *temp;  	struct nouveau_abi16_ntfy *ntfy;  	int ret;  	if (unlikely(!abi16))  		return -ENOMEM; -	list_for_each_entry_safe(chan, temp, &abi16->channels, head) { -		if (chan->chan->handle == (NVDRM_CHAN | fini->channel)) +	list_for_each_entry(temp, &abi16->channels, head) { +		if (temp->chan->handle == (NVDRM_CHAN | fini->channel)) { +			chan = temp;  			break; -		chan = NULL; +		}  	}  	if (!chan) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index d1099365bfc..c95decf543e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -72,11 +72,25 @@ module_param_named(modeset, nouveau_modeset, int, 0400);  static struct drm_driver driver;  static int +nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head) +{ +	struct nouveau_drm *drm = +		container_of(event, struct nouveau_drm, vblank[head]); +	drm_handle_vblank(drm->dev, head); +	return NVKM_EVENT_KEEP; +} + +static int  nouveau_drm_vblank_enable(struct drm_device *dev, int head)  {  	struct nouveau_drm *drm = nouveau_drm(dev);  	struct nouveau_disp *pdisp = nouveau_disp(drm->device); -	nouveau_event_get(pdisp->vblank, head, &drm->vblank); + +	if (WARN_ON_ONCE(head > ARRAY_SIZE(drm->vblank))) +		return -EIO; +	WARN_ON_ONCE(drm->vblank[head].func); +	drm->vblank[head].func = nouveau_drm_vblank_handler; +	nouveau_event_get(pdisp->vblank, head, &drm->vblank[head]);  	return 0;  } @@ -85,16 +99,11 @@ nouveau_drm_vblank_disable(struct drm_device *dev, int head)  {  	struct nouveau_drm *drm = nouveau_drm(dev);  	struct nouveau_disp *pdisp = nouveau_disp(drm->device); -	nouveau_event_put(pdisp->vblank, head, &drm->vblank); -} - -static int -nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head) -{ -	struct nouveau_drm *drm = -		container_of(event, struct nouveau_drm, vblank); -	drm_handle_vblank(drm->dev, head); -	return NVKM_EVENT_KEEP; +	if (drm->vblank[head].func) +		nouveau_event_put(pdisp->vblank, head, &drm->vblank[head]); +	else +		WARN_ON_ONCE(1); +	drm->vblank[head].func = NULL;  }  static u64 @@ -292,7 +301,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)  	dev->dev_private = drm;  	drm->dev = dev; -	drm->vblank.func = nouveau_drm_vblank_handler;  	INIT_LIST_HEAD(&drm->clients);  	spin_lock_init(&drm->tile.lock); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h index b25df374c90..9c39bafbef2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h @@ -113,7 +113,7 @@ struct nouveau_drm {  	struct nvbios vbios;  	struct nouveau_display *display;  	struct backlight_device *backlight; -	struct nouveau_eventh vblank; +	struct nouveau_eventh vblank[4];  	/* power management */  	struct nouveau_pm *pm; diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 7f0e6c3f37d..1ddc03e51bf 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -479,7 +479,7 @@ nv50_display_flip_wait(void *data)  {  	struct nv50_display_flip *flip = data;  	if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) == -					      flip->chan->data); +					      flip->chan->data)  		return true;  	usleep_range(1, 2);  	return false; diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index b8015913d38..fa3c56fba29 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -99,6 +99,29 @@ static bool radeon_read_bios(struct radeon_device *rdev)  	return true;  } +static bool radeon_read_platform_bios(struct radeon_device *rdev) +{ +	uint8_t __iomem *bios; +	size_t size; + +	rdev->bios = NULL; + +	bios = pci_platform_rom(rdev->pdev, &size); +	if (!bios) { +		return false; +	} + +	if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { +		return false; +	} +	rdev->bios = kmemdup(bios, size, GFP_KERNEL); +	if (rdev->bios == NULL) { +		return false; +	} + +	return true; +} +  #ifdef CONFIG_ACPI  /* ATRM is used to get the BIOS on the discrete cards in   * dual-gpu systems. @@ -620,6 +643,9 @@ bool radeon_get_bios(struct radeon_device *rdev)  	if (r == false) {  		r = radeon_read_disabled_bios(rdev);  	} +	if (r == false) { +		r = radeon_read_platform_bios(rdev); +	}  	if (r == false || rdev->bios == NULL) {  		DRM_ERROR("Unable to locate a BIOS ROM\n");  		rdev->bios = NULL; diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index fe5cdbcf263..b44d548c56f 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c @@ -61,6 +61,10 @@ static int udl_get_modes(struct drm_connector *connector)  	int ret;  	edid = (struct edid *)udl_get_edid(udl); +	if (!edid) { +		drm_mode_connector_update_edid_property(connector, NULL); +		return 0; +	}  	/*  	 * We only read the main block, but if the monitor reports extension diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 512b01c04ea..aa341d13586 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -2077,7 +2077,6 @@ static const struct hid_device_id hid_ignore_list[] = {  	{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) }, -	{ HID_USB_DEVICE(USB_VENDOR_ID_MASTERKIT, USB_DEVICE_ID_MASTERKIT_MA901RADIO) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) }, @@ -2244,6 +2243,18 @@ bool hid_ignore(struct hid_device *hdev)  		     hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST))  			return true;  		break; +	case USB_VENDOR_ID_ATMEL_V_USB: +		/* Masterkit MA901 usb radio based on Atmel tiny85 chip and +		 * it has the same USB ID as many Atmel V-USB devices. This +		 * usb radio is handled by radio-ma901.c driver so we want +		 * ignore the hid. Check the name, bus, product and ignore +		 * if we have MA901 usb radio. +		 */ +		if (hdev->product == USB_DEVICE_ID_ATMEL_V_USB && +			hdev->bus == BUS_USB && +			strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0) +			return true; +		break;  	}  	if (hdev->type == HID_TYPE_USBMOUSE && diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index c4388776f4e..5309fd5eb0e 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -158,6 +158,8 @@  #define USB_VENDOR_ID_ATMEL		0x03eb  #define USB_DEVICE_ID_ATMEL_MULTITOUCH	0x211c  #define USB_DEVICE_ID_ATMEL_MXT_DIGITIZER	0x2118 +#define USB_VENDOR_ID_ATMEL_V_USB	0x16c0 +#define USB_DEVICE_ID_ATMEL_V_USB	0x05df  #define USB_VENDOR_ID_AUREAL		0x0755  #define USB_DEVICE_ID_AUREAL_W01RN	0x2626 @@ -557,9 +559,6 @@  #define USB_VENDOR_ID_MADCATZ		0x0738  #define USB_DEVICE_ID_MADCATZ_BEATPAD	0x4540 -#define USB_VENDOR_ID_MASTERKIT			0x16c0 -#define USB_DEVICE_ID_MASTERKIT_MA901RADIO	0x05df -  #define USB_VENDOR_ID_MCC		0x09db  #define USB_DEVICE_ID_MCC_PMD1024LS	0x0076  #define USB_DEVICE_ID_MCC_PMD1208LS	0x007a diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index f7f113ba083..a8ce44296cf 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -462,6 +462,21 @@ static int magicmouse_input_mapping(struct hid_device *hdev,  	return 0;  } +static void magicmouse_input_configured(struct hid_device *hdev, +		struct hid_input *hi) + +{ +	struct magicmouse_sc *msc = hid_get_drvdata(hdev); + +	int ret = magicmouse_setup_input(msc->input, hdev); +	if (ret) { +		hid_err(hdev, "magicmouse setup input failed (%d)\n", ret); +		/* clean msc->input to notify probe() of the failure */ +		msc->input = NULL; +	} +} + +  static int magicmouse_probe(struct hid_device *hdev,  	const struct hid_device_id *id)  { @@ -493,15 +508,10 @@ static int magicmouse_probe(struct hid_device *hdev,  		goto err_free;  	} -	/* We do this after hid-input is done parsing reports so that -	 * hid-input uses the most natural button and axis IDs. -	 */ -	if (msc->input) { -		ret = magicmouse_setup_input(msc->input, hdev); -		if (ret) { -			hid_err(hdev, "magicmouse setup input failed (%d)\n", ret); -			goto err_stop_hw; -		} +	if (!msc->input) { +		hid_err(hdev, "magicmouse input not registered\n"); +		ret = -ENOMEM; +		goto err_stop_hw;  	}  	if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE) @@ -568,6 +578,7 @@ static struct hid_driver magicmouse_driver = {  	.remove = magicmouse_remove,  	.raw_event = magicmouse_raw_event,  	.input_mapping = magicmouse_input_mapping, +	.input_configured = magicmouse_input_configured,  };  module_hid_driver(magicmouse_driver); diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c index db713c0dfba..461a0d739d7 100644 --- a/drivers/hwspinlock/hwspinlock_core.c +++ b/drivers/hwspinlock/hwspinlock_core.c @@ -416,6 +416,8 @@ static int __hwspin_lock_request(struct hwspinlock *hwlock)  	ret = pm_runtime_get_sync(dev);  	if (ret < 0) {  		dev_err(dev, "%s: can't power on device\n", __func__); +		pm_runtime_put_noidle(dev); +		module_put(dev->driver->owner);  		return ret;  	} diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 0ceb6e1b0f6..e3085c487ac 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -182,7 +182,6 @@ static int dw_i2c_probe(struct platform_device *pdev)  	adap->algo = &i2c_dw_algo;  	adap->dev.parent = &pdev->dev;  	adap->dev.of_node = pdev->dev.of_node; -	ACPI_HANDLE_SET(&adap->dev, ACPI_HANDLE(&pdev->dev));  	r = i2c_add_numbered_adapter(adap);  	if (r) { diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 5d667501386..1a38dd7dfe4 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -465,6 +465,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {  	ICPU(0x3c, idle_cpu_hsw),  	ICPU(0x3f, idle_cpu_hsw),  	ICPU(0x45, idle_cpu_hsw), +	ICPU(0x46, idle_cpu_hsw),  	{}  };  MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c index 08a6c6d39e5..911205d3d5a 100644 --- a/drivers/infiniband/hw/qib/qib_sd7220.c +++ b/drivers/infiniband/hw/qib/qib_sd7220.c @@ -44,7 +44,7 @@  #include "qib.h"  #include "qib_7220.h" -#define SD7220_FW_NAME "intel/sd7220.fw" +#define SD7220_FW_NAME "qlogic/sd7220.fw"  MODULE_FIRMWARE(SD7220_FW_NAME);  /* diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 1daa97913b7..0bfd8cf2520 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -359,7 +359,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)  		case 0x802: /* Intuos4 General Pen */  		case 0x804: /* Intuos4 Marker Pen */  		case 0x40802: /* Intuos4 Classic Pen */ -		case 0x18803: /* DTH2242 Grip Pen */ +		case 0x18802: /* DTH2242 Grip Pen */  		case 0x022:  			wacom->tool[idx] = BTN_TOOL_PEN;  			break; @@ -1912,7 +1912,7 @@ static const struct wacom_features wacom_features_0xBB =  	{ "Wacom Intuos4 12x19",  WACOM_PKGLEN_INTUOS,    97536, 60960, 2047,  	  63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };  static const struct wacom_features wacom_features_0xBC = -	{ "Wacom Intuos4 WL",     WACOM_PKGLEN_INTUOS,    40840, 25400, 2047, +	{ "Wacom Intuos4 WL",     WACOM_PKGLEN_INTUOS,    40640, 25400, 2047,  	  63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };  static const struct wacom_features wacom_features_0x26 =  	{ "Wacom Intuos5 touch S", WACOM_PKGLEN_INTUOS,  31496, 19685, 2047, @@ -2144,7 +2144,7 @@ const struct usb_device_id wacom_ids[] = {  	{ USB_DEVICE_WACOM(0x44) },  	{ USB_DEVICE_WACOM(0x45) },  	{ USB_DEVICE_WACOM(0x59) }, -	{ USB_DEVICE_WACOM(0x5D) }, +	{ USB_DEVICE_DETAILED(0x5D, USB_CLASS_HID, 0, 0) },  	{ USB_DEVICE_WACOM(0xB0) },  	{ USB_DEVICE_WACOM(0xB1) },  	{ USB_DEVICE_WACOM(0xB2) }, @@ -2209,7 +2209,7 @@ const struct usb_device_id wacom_ids[] = {  	{ USB_DEVICE_WACOM(0x47) },  	{ USB_DEVICE_WACOM(0xF4) },  	{ USB_DEVICE_WACOM(0xF8) }, -	{ USB_DEVICE_WACOM(0xF6) }, +	{ USB_DEVICE_DETAILED(0xF6, USB_CLASS_HID, 0, 0) },  	{ USB_DEVICE_WACOM(0xFA) },  	{ USB_DEVICE_LENOVO(0x6004) },  	{ } diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index a32e0d5aa45..fc6aebf1e4b 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -236,7 +236,8 @@ static int gic_retrigger(struct irq_data *d)  	if (gic_arch_extn.irq_retrigger)  		return gic_arch_extn.irq_retrigger(d); -	return -ENXIO; +	/* the genirq layer expects 0 if we can't retrigger in hardware */ +	return 0;  }  #ifdef CONFIG_SMP diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 66120bd46d1..10744091e6c 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -6,6 +6,7 @@  #include "dm.h"  #include "dm-bio-prison.h" +#include "dm-bio-record.h"  #include "dm-cache-metadata.h"  #include <linux/dm-io.h> @@ -201,10 +202,15 @@ struct per_bio_data {  	unsigned req_nr:2;  	struct dm_deferred_entry *all_io_entry; -	/* writethrough fields */ +	/* +	 * writethrough fields.  These MUST remain at the end of this +	 * structure and the 'cache' member must be the first as it +	 * is used to determine the offsetof the writethrough fields. +	 */  	struct cache *cache;  	dm_cblock_t cblock;  	bio_end_io_t *saved_bi_end_io; +	struct dm_bio_details bio_details;  };  struct dm_cache_migration { @@ -513,16 +519,28 @@ static void save_stats(struct cache *cache)  /*----------------------------------------------------------------   * Per bio data   *--------------------------------------------------------------*/ -static struct per_bio_data *get_per_bio_data(struct bio *bio) + +/* + * If using writeback, leave out struct per_bio_data's writethrough fields. + */ +#define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache)) +#define PB_DATA_SIZE_WT (sizeof(struct per_bio_data)) + +static size_t get_per_bio_data_size(struct cache *cache) +{ +	return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB; +} + +static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)  { -	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); +	struct per_bio_data *pb = dm_per_bio_data(bio, data_size);  	BUG_ON(!pb);  	return pb;  } -static struct per_bio_data *init_per_bio_data(struct bio *bio) +static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)  { -	struct per_bio_data *pb = get_per_bio_data(bio); +	struct per_bio_data *pb = get_per_bio_data(bio, data_size);  	pb->tick = false;  	pb->req_nr = dm_bio_get_target_bio_nr(bio); @@ -556,7 +574,8 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,  static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)  {  	unsigned long flags; -	struct per_bio_data *pb = get_per_bio_data(bio); +	size_t pb_data_size = get_per_bio_data_size(cache); +	struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);  	spin_lock_irqsave(&cache->lock, flags);  	if (cache->need_tick_bio && @@ -635,7 +654,7 @@ static void defer_writethrough_bio(struct cache *cache, struct bio *bio)  static void writethrough_endio(struct bio *bio, int err)  { -	struct per_bio_data *pb = get_per_bio_data(bio); +	struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);  	bio->bi_end_io = pb->saved_bi_end_io;  	if (err) { @@ -643,6 +662,7 @@ static void writethrough_endio(struct bio *bio, int err)  		return;  	} +	dm_bio_restore(&pb->bio_details, bio);  	remap_to_cache(pb->cache, bio, pb->cblock);  	/* @@ -662,11 +682,12 @@ static void writethrough_endio(struct bio *bio, int err)  static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,  				       dm_oblock_t oblock, dm_cblock_t cblock)  { -	struct per_bio_data *pb = get_per_bio_data(bio); +	struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);  	pb->cache = cache;  	pb->cblock = cblock;  	pb->saved_bi_end_io = bio->bi_end_io; +	dm_bio_record(&pb->bio_details, bio);  	bio->bi_end_io = writethrough_endio;  	remap_to_origin_clear_discard(pb->cache, bio, oblock); @@ -1035,7 +1056,8 @@ static void defer_bio(struct cache *cache, struct bio *bio)  static void process_flush_bio(struct cache *cache, struct bio *bio)  { -	struct per_bio_data *pb = get_per_bio_data(bio); +	size_t pb_data_size = get_per_bio_data_size(cache); +	struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);  	BUG_ON(bio->bi_size);  	if (!pb->req_nr) @@ -1107,7 +1129,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,  	dm_oblock_t block = get_bio_block(cache, bio);  	struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;  	struct policy_result lookup_result; -	struct per_bio_data *pb = get_per_bio_data(bio); +	size_t pb_data_size = get_per_bio_data_size(cache); +	struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);  	bool discarded_block = is_discarded_oblock(cache, block);  	bool can_migrate = discarded_block || spare_migration_bandwidth(cache); @@ -1881,7 +1904,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)  	cache->ti = ca->ti;  	ti->private = cache; -	ti->per_bio_data_size = sizeof(struct per_bio_data);  	ti->num_flush_bios = 2;  	ti->flush_supported = true; @@ -1890,6 +1912,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)  	ti->discard_zeroes_data_unsupported = true;  	memcpy(&cache->features, &ca->features, sizeof(cache->features)); +	ti->per_bio_data_size = get_per_bio_data_size(cache);  	cache->callbacks.congested_fn = cache_is_congested;  	dm_table_add_target_callbacks(ti->table, &cache->callbacks); @@ -2092,6 +2115,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)  	int r;  	dm_oblock_t block = get_bio_block(cache, bio); +	size_t pb_data_size = get_per_bio_data_size(cache);  	bool can_migrate = false;  	bool discarded_block;  	struct dm_bio_prison_cell *cell; @@ -2108,7 +2132,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)  		return DM_MAPIO_REMAPPED;  	} -	pb = init_per_bio_data(bio); +	pb = init_per_bio_data(bio, pb_data_size);  	if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {  		defer_bio(cache, bio); @@ -2193,7 +2217,8 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)  {  	struct cache *cache = ti->private;  	unsigned long flags; -	struct per_bio_data *pb = get_per_bio_data(bio); +	size_t pb_data_size = get_per_bio_data_size(cache); +	struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);  	if (pb->tick) {  		policy_tick(cache->policy); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 7e469260fe5..9a0bdad9ad8 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -611,6 +611,7 @@ static void dec_pending(struct dm_io *io, int error)  			queue_io(md, bio);  		} else {  			/* done with normal IO or empty flush */ +			trace_block_bio_complete(md->queue, bio, io_error);  			bio_endio(bio, io_error);  		}  	} diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 24909eb13fe..f4e87bfc756 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -184,6 +184,8 @@ static void return_io(struct bio *return_bi)  		return_bi = bi->bi_next;  		bi->bi_next = NULL;  		bi->bi_size = 0; +		trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), +					 bi, 0);  		bio_endio(bi, 0);  		bi = return_bi;  	} @@ -3914,6 +3916,8 @@ static void raid5_align_endio(struct bio *bi, int error)  	rdev_dec_pending(rdev, conf->mddev);  	if (!error && uptodate) { +		trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev), +					 raid_bi, 0);  		bio_endio(raid_bi, 0);  		if (atomic_dec_and_test(&conf->active_aligned_reads))  			wake_up(&conf->wait_for_stripe); @@ -4382,6 +4386,8 @@ static void make_request(struct mddev *mddev, struct bio * bi)  		if ( rw == WRITE )  			md_write_end(mddev); +		trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), +					 bi, 0);  		bio_endio(bi, 0);  	}  } @@ -4758,8 +4764,11 @@ static int  retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)  		handled++;  	}  	remaining = raid5_dec_bi_active_stripes(raid_bio); -	if (remaining == 0) +	if (remaining == 0) { +		trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev), +					 raid_bio, 0);  		bio_endio(raid_bio, 0); +	}  	if (atomic_dec_and_test(&conf->active_aligned_reads))  		wake_up(&conf->wait_for_stripe);  	return handled; diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 05d7b633346..a0639e77997 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -204,7 +204,7 @@ config VIDEO_SAMSUNG_EXYNOS_GSC  config VIDEO_SH_VEU  	tristate "SuperH VEU mem2mem video processing driver" -	depends on VIDEO_DEV && VIDEO_V4L2 +	depends on VIDEO_DEV && VIDEO_V4L2 && GENERIC_HARDIRQS  	select VIDEOBUF2_DMA_CONTIG  	select V4L2_MEM2MEM_DEV  	help diff --git a/drivers/media/radio/radio-ma901.c b/drivers/media/radio/radio-ma901.c index c61f590029a..348dafc0318 100644 --- a/drivers/media/radio/radio-ma901.c +++ b/drivers/media/radio/radio-ma901.c @@ -347,9 +347,20 @@ static void usb_ma901radio_release(struct v4l2_device *v4l2_dev)  static int usb_ma901radio_probe(struct usb_interface *intf,  				const struct usb_device_id *id)  { +	struct usb_device *dev = interface_to_usbdev(intf);  	struct ma901radio_device *radio;  	int retval = 0; +	/* Masterkit MA901 usb radio has the same USB ID as many others +	 * Atmel V-USB devices. Let's make additional checks to be sure +	 * that this is our device. +	 */ + +	if (dev->product && dev->manufacturer && +		(strncmp(dev->product, "MA901", 5) != 0 +		|| strncmp(dev->manufacturer, "www.masterkit.ru", 16) != 0)) +		return -ENODEV; +  	radio = kzalloc(sizeof(struct ma901radio_device), GFP_KERNEL);  	if (!radio) {  		dev_err(&intf->dev, "kzalloc for ma901radio_device failed\n"); diff --git a/drivers/misc/vmw_vmci/Kconfig b/drivers/misc/vmw_vmci/Kconfig index 39c2ecadb27..ea98f7e9ccd 100644 --- a/drivers/misc/vmw_vmci/Kconfig +++ b/drivers/misc/vmw_vmci/Kconfig @@ -4,7 +4,7 @@  config VMWARE_VMCI  	tristate "VMware VMCI Driver" -	depends on X86 && PCI +	depends on X86 && PCI && NET  	help  	  This is VMware's Virtual Machine Communication Interface.  It enables  	  high-speed communication between host and guest in a virtual diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 92ab30ab00d..dc571ebc1aa 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -1123,33 +1123,6 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file,  }  #endif -static inline unsigned long get_vm_size(struct vm_area_struct *vma) -{ -	return vma->vm_end - vma->vm_start; -} - -static inline resource_size_t get_vm_offset(struct vm_area_struct *vma) -{ -	return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT; -} - -/* - * Set a new vm offset. - * - * Verify that the incoming offset really works as a page offset, - * and that the offset and size fit in a resource_size_t. - */ -static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off) -{ -	pgoff_t pgoff = off >> PAGE_SHIFT; -	if (off != (resource_size_t) pgoff << PAGE_SHIFT) -		return -EINVAL; -	if (off + get_vm_size(vma) - 1 < off) -		return -EINVAL; -	vma->vm_pgoff = pgoff; -	return 0; -} -  /*   * set up a mapping for shared memory segments   */ @@ -1159,45 +1132,17 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)  	struct mtd_file_info *mfi = file->private_data;  	struct mtd_info *mtd = mfi->mtd;  	struct map_info *map = mtd->priv; -	resource_size_t start, off; -	unsigned long len, vma_len;          /* This is broken because it assumes the MTD device is map-based  	   and that mtd->priv is a valid struct map_info.  It should be  	   replaced with something that uses the mtd_get_unmapped_area()  	   operation properly. */  	if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) { -		off = get_vm_offset(vma); -		start = map->phys; -		len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size); -		start &= PAGE_MASK; -		vma_len = get_vm_size(vma); - -		/* Overflow in off+len? */ -		if (vma_len + off < off) -			return -EINVAL; -		/* Does it fit in the mapping? */ -		if (vma_len + off > len) -			return -EINVAL; - -		off += start; -		/* Did that overflow? */ -		if (off < start) -			return -EINVAL; -		if (set_vm_offset(vma, off) < 0) -			return -EINVAL; -		vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; -  #ifdef pgprot_noncached -		if (file->f_flags & O_DSYNC || off >= __pa(high_memory)) +		if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))  			vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);  #endif -		if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, -				       vma->vm_end - vma->vm_start, -				       vma->vm_page_prot)) -			return -EAGAIN; - -		return 0; +		return vm_iomap_memory(vma, map->phys, map->size);  	}  	return -ENOSYS;  #else diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 6bbd90e1123..dbbea0eec13 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -846,8 +846,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,  		if (bond->dev->flags & IFF_ALLMULTI)  			dev_set_allmulti(old_active->dev, -1); +		netif_addr_lock_bh(bond->dev);  		netdev_for_each_mc_addr(ha, bond->dev)  			dev_mc_del(old_active->dev, ha->addr); +		netif_addr_unlock_bh(bond->dev);  	}  	if (new_active) { @@ -858,8 +860,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,  		if (bond->dev->flags & IFF_ALLMULTI)  			dev_set_allmulti(new_active->dev, 1); +		netif_addr_lock_bh(bond->dev);  		netdev_for_each_mc_addr(ha, bond->dev)  			dev_mc_add(new_active->dev, ha->addr); +		netif_addr_unlock_bh(bond->dev);  	}  } @@ -1901,11 +1905,29 @@ err_dest_symlinks:  	bond_destroy_slave_symlinks(bond_dev, slave_dev);  err_detach: +	if (!USES_PRIMARY(bond->params.mode)) { +		netif_addr_lock_bh(bond_dev); +		bond_mc_list_flush(bond_dev, slave_dev); +		netif_addr_unlock_bh(bond_dev); +	} +	bond_del_vlans_from_slave(bond, slave_dev);  	write_lock_bh(&bond->lock);  	bond_detach_slave(bond, new_slave); +	if (bond->primary_slave == new_slave) +		bond->primary_slave = NULL;  	write_unlock_bh(&bond->lock); +	if (bond->curr_active_slave == new_slave) { +		read_lock(&bond->lock); +		write_lock_bh(&bond->curr_slave_lock); +		bond_change_active_slave(bond, NULL); +		bond_select_active_slave(bond); +		write_unlock_bh(&bond->curr_slave_lock); +		read_unlock(&bond->lock); +	} +	slave_disable_netpoll(new_slave);  err_close: +	slave_dev->priv_flags &= ~IFF_BONDING;  	dev_close(slave_dev);  err_unset_master: @@ -1976,12 +1998,11 @@ static int __bond_release_one(struct net_device *bond_dev,  		return -EINVAL;  	} +	write_unlock_bh(&bond->lock);  	/* unregister rx_handler early so bond_handle_frame wouldn't be called  	 * for this slave anymore.  	 */  	netdev_rx_handler_unregister(slave_dev); -	write_unlock_bh(&bond->lock); -	synchronize_net();  	write_lock_bh(&bond->lock);  	if (!all && !bond->params.fail_over_mac) { @@ -3169,11 +3190,20 @@ static int bond_slave_netdev_event(unsigned long event,  				   struct net_device *slave_dev)  {  	struct slave *slave = bond_slave_get_rtnl(slave_dev); -	struct bonding *bond = slave->bond; -	struct net_device *bond_dev = slave->bond->dev; +	struct bonding *bond; +	struct net_device *bond_dev;  	u32 old_speed;  	u8 old_duplex; +	/* A netdev event can be generated while enslaving a device +	 * before netdev_rx_handler_register is called in which case +	 * slave will be NULL +	 */ +	if (!slave) +		return NOTIFY_DONE; +	bond_dev = slave->bond->dev; +	bond = slave->bond; +  	switch (event) {  	case NETDEV_UNREGISTER:  		if (bond->setup_by_slave) @@ -3287,20 +3317,22 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)   */  static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)  { -	struct ethhdr *data = (struct ethhdr *)skb->data; -	struct iphdr *iph; -	struct ipv6hdr *ipv6h; +	const struct ethhdr *data; +	const struct iphdr *iph; +	const struct ipv6hdr *ipv6h;  	u32 v6hash; -	__be32 *s, *d; +	const __be32 *s, *d;  	if (skb->protocol == htons(ETH_P_IP) && -	    skb_network_header_len(skb) >= sizeof(*iph)) { +	    pskb_network_may_pull(skb, sizeof(*iph))) {  		iph = ip_hdr(skb); +		data = (struct ethhdr *)skb->data;  		return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^  			(data->h_dest[5] ^ data->h_source[5])) % count;  	} else if (skb->protocol == htons(ETH_P_IPV6) && -		   skb_network_header_len(skb) >= sizeof(*ipv6h)) { +		   pskb_network_may_pull(skb, sizeof(*ipv6h))) {  		ipv6h = ipv6_hdr(skb); +		data = (struct ethhdr *)skb->data;  		s = &ipv6h->saddr.s6_addr32[0];  		d = &ipv6h->daddr.s6_addr32[0];  		v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]); @@ -3319,33 +3351,36 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)  static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)  {  	u32 layer4_xor = 0; -	struct iphdr *iph; -	struct ipv6hdr *ipv6h; -	__be32 *s, *d; -	__be16 *layer4hdr; +	const struct iphdr *iph; +	const struct ipv6hdr *ipv6h; +	const __be32 *s, *d; +	const __be16 *l4 = NULL; +	__be16 _l4[2]; +	int noff = skb_network_offset(skb); +	int poff;  	if (skb->protocol == htons(ETH_P_IP) && -	    skb_network_header_len(skb) >= sizeof(*iph)) { +	    pskb_may_pull(skb, noff + sizeof(*iph))) {  		iph = ip_hdr(skb); -		if (!ip_is_fragment(iph) && -		    (iph->protocol == IPPROTO_TCP || -		     iph->protocol == IPPROTO_UDP) && -		    (skb_headlen(skb) - skb_network_offset(skb) >= -		     iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) { -			layer4hdr = (__be16 *)((u32 *)iph + iph->ihl); -			layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); +		poff = proto_ports_offset(iph->protocol); + +		if (!ip_is_fragment(iph) && poff >= 0) { +			l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff, +						sizeof(_l4), &_l4); +			if (l4) +				layer4_xor = ntohs(l4[0] ^ l4[1]);  		}  		return (layer4_xor ^  			((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;  	} else if (skb->protocol == htons(ETH_P_IPV6) && -		   skb_network_header_len(skb) >= sizeof(*ipv6h)) { +		   pskb_may_pull(skb, noff + sizeof(*ipv6h))) {  		ipv6h = ipv6_hdr(skb); -		if ((ipv6h->nexthdr == IPPROTO_TCP || -		     ipv6h->nexthdr == IPPROTO_UDP) && -		    (skb_headlen(skb) - skb_network_offset(skb) >= -		     sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) { -			layer4hdr = (__be16 *)(ipv6h + 1); -			layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); +		poff = proto_ports_offset(ipv6h->nexthdr); +		if (poff >= 0) { +			l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff, +						sizeof(_l4), &_l4); +			if (l4) +				layer4_xor = ntohs(l4[0] ^ l4[1]);  		}  		s = &ipv6h->saddr.s6_addr32[0];  		d = &ipv6h->daddr.s6_addr32[0]; @@ -4847,9 +4882,18 @@ static int __net_init bond_net_init(struct net *net)  static void __net_exit bond_net_exit(struct net *net)  {  	struct bond_net *bn = net_generic(net, bond_net_id); +	struct bonding *bond, *tmp_bond; +	LIST_HEAD(list);  	bond_destroy_sysfs(bn);  	bond_destroy_proc_dir(bn); + +	/* Kill off any bonds created after unregistering bond rtnl ops */ +	rtnl_lock(); +	list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list) +		unregister_netdevice_queue(bond->dev, &list); +	unregister_netdevice_many(&list); +	rtnl_unlock();  }  static struct pernet_operations bond_net_ops = { diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index db103e03ba0..ea7a388f484 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -527,7 +527,7 @@ static ssize_t bonding_store_arp_interval(struct device *d,  		goto out;  	}  	if (new_value < 0) { -		pr_err("%s: Invalid arp_interval value %d not in range 1-%d; rejected.\n", +		pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n",  		       bond->dev->name, new_value, INT_MAX);  		ret = -EINVAL;  		goto out; @@ -542,14 +542,15 @@ static ssize_t bonding_store_arp_interval(struct device *d,  	pr_info("%s: Setting ARP monitoring interval to %d.\n",  		bond->dev->name, new_value);  	bond->params.arp_interval = new_value; -	if (bond->params.miimon) { -		pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", -			bond->dev->name, bond->dev->name); -		bond->params.miimon = 0; -	} -	if (!bond->params.arp_targets[0]) { -		pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n", -			bond->dev->name); +	if (new_value) { +		if (bond->params.miimon) { +			pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", +				bond->dev->name, bond->dev->name); +			bond->params.miimon = 0; +		} +		if (!bond->params.arp_targets[0]) +			pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n", +				bond->dev->name);  	}  	if (bond->dev->flags & IFF_UP) {  		/* If the interface is up, we may need to fire off @@ -557,10 +558,13 @@ static ssize_t bonding_store_arp_interval(struct device *d,  		 * timer will get fired off when the open function  		 * is called.  		 */ -		cancel_delayed_work_sync(&bond->mii_work); -		queue_delayed_work(bond->wq, &bond->arp_work, 0); +		if (!new_value) { +			cancel_delayed_work_sync(&bond->arp_work); +		} else { +			cancel_delayed_work_sync(&bond->mii_work); +			queue_delayed_work(bond->wq, &bond->arp_work, 0); +		}  	} -  out:  	rtnl_unlock();  	return ret; @@ -702,7 +706,7 @@ static ssize_t bonding_store_downdelay(struct device *d,  	}  	if (new_value < 0) {  		pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n", -		       bond->dev->name, new_value, 1, INT_MAX); +		       bond->dev->name, new_value, 0, INT_MAX);  		ret = -EINVAL;  		goto out;  	} else { @@ -757,8 +761,8 @@ static ssize_t bonding_store_updelay(struct device *d,  		goto out;  	}  	if (new_value < 0) { -		pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n", -		       bond->dev->name, new_value, 1, INT_MAX); +		pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n", +		       bond->dev->name, new_value, 0, INT_MAX);  		ret = -EINVAL;  		goto out;  	} else { @@ -968,37 +972,37 @@ static ssize_t bonding_store_miimon(struct device *d,  	}  	if (new_value < 0) {  		pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n", -		       bond->dev->name, new_value, 1, INT_MAX); +		       bond->dev->name, new_value, 0, INT_MAX);  		ret = -EINVAL;  		goto out; -	} else { -		pr_info("%s: Setting MII monitoring interval to %d.\n", -			bond->dev->name, new_value); -		bond->params.miimon = new_value; -		if (bond->params.updelay) -			pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n", -				bond->dev->name, -				bond->params.updelay * bond->params.miimon); -		if (bond->params.downdelay) -			pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n", -				bond->dev->name, -				bond->params.downdelay * bond->params.miimon); -		if (bond->params.arp_interval) { -			pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", -				bond->dev->name); -			bond->params.arp_interval = 0; -			if (bond->params.arp_validate) { -				bond->params.arp_validate = -					BOND_ARP_VALIDATE_NONE; -			} -		} - -		if (bond->dev->flags & IFF_UP) { -			/* If the interface is up, we may need to fire off -			 * the MII timer. If the interface is down, the -			 * timer will get fired off when the open function -			 * is called. -			 */ +	} +	pr_info("%s: Setting MII monitoring interval to %d.\n", +		bond->dev->name, new_value); +	bond->params.miimon = new_value; +	if (bond->params.updelay) +		pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n", +			bond->dev->name, +			bond->params.updelay * bond->params.miimon); +	if (bond->params.downdelay) +		pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n", +			bond->dev->name, +			bond->params.downdelay * bond->params.miimon); +	if (new_value && bond->params.arp_interval) { +		pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", +			bond->dev->name); +		bond->params.arp_interval = 0; +		if (bond->params.arp_validate) +			bond->params.arp_validate = BOND_ARP_VALIDATE_NONE; +	} +	if (bond->dev->flags & IFF_UP) { +		/* If the interface is up, we may need to fire off +		 * the MII timer. If the interface is down, the +		 * timer will get fired off when the open function +		 * is called. +		 */ +		if (!new_value) { +			cancel_delayed_work_sync(&bond->mii_work); +		} else {  			cancel_delayed_work_sync(&bond->arp_work);  			queue_delayed_work(bond->wq, &bond->mii_work, 0);  		} diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index f32b9fc6a98..9aa0c64c33c 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c @@ -929,6 +929,7 @@ static int mcp251x_open(struct net_device *net)  	struct mcp251x_priv *priv = netdev_priv(net);  	struct spi_device *spi = priv->spi;  	struct mcp251x_platform_data *pdata = spi->dev.platform_data; +	unsigned long flags;  	int ret;  	ret = open_candev(net); @@ -945,9 +946,14 @@ static int mcp251x_open(struct net_device *net)  	priv->tx_skb = NULL;  	priv->tx_len = 0; +	flags = IRQF_ONESHOT; +	if (pdata->irq_flags) +		flags |= pdata->irq_flags; +	else +		flags |= IRQF_TRIGGER_FALLING; +  	ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, -		  pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING, -		  DEVICE_NAME, priv); +				   flags, DEVICE_NAME, priv);  	if (ret) {  		dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);  		if (pdata->transceiver_enable) diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig index b39ca5b3ea7..ff2ba86cd4a 100644 --- a/drivers/net/can/sja1000/Kconfig +++ b/drivers/net/can/sja1000/Kconfig @@ -46,6 +46,7 @@ config CAN_EMS_PCI  config CAN_PEAK_PCMCIA  	tristate "PEAK PCAN-PC Card"  	depends on PCMCIA +	depends on HAS_IOPORT  	---help---  	  This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels)  	  from PEAK-System (http://www.peak-system.com). To compile this diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index a042cdc260d..3c18d7d000e 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c @@ -348,7 +348,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)  	 */  	if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) ==  	    REG_CR_BASICCAN_INITIAL && -	    (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) && +	    (priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_BASICCAN_INITIAL) &&  	    (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL))  		flag = 1; @@ -360,7 +360,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)  	 * See states on p. 23 of the Datasheet.  	 */  	if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL && -	    priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL && +	    priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_PELICAN_INITIAL &&  	    priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL)  		return flag; diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index daf4013a8fc..e4df307eaa9 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -92,7 +92,7 @@ static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)  	 */  	spin_lock_irqsave(&priv->cmdreg_lock, flags);  	priv->write_reg(priv, REG_CMR, val); -	priv->read_reg(priv, REG_SR); +	priv->read_reg(priv, SJA1000_REG_SR);  	spin_unlock_irqrestore(&priv->cmdreg_lock, flags);  } @@ -502,7 +502,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)  	while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) {  		n++; -		status = priv->read_reg(priv, REG_SR); +		status = priv->read_reg(priv, SJA1000_REG_SR);  		/* check for absent controller due to hw unplug */  		if (status == 0xFF && sja1000_is_absent(priv))  			return IRQ_NONE; @@ -530,7 +530,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)  			/* receive interrupt */  			while (status & SR_RBS) {  				sja1000_rx(dev); -				status = priv->read_reg(priv, REG_SR); +				status = priv->read_reg(priv, SJA1000_REG_SR);  				/* check for absent controller */  				if (status == 0xFF && sja1000_is_absent(priv))  					return IRQ_NONE; diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h index afa99847a51..aa48e053da2 100644 --- a/drivers/net/can/sja1000/sja1000.h +++ b/drivers/net/can/sja1000/sja1000.h @@ -56,7 +56,7 @@  /* SJA1000 registers - manual section 6.4 (Pelican Mode) */  #define REG_MOD		0x00  #define REG_CMR		0x01 -#define REG_SR		0x02 +#define SJA1000_REG_SR		0x02  #define REG_IR		0x03  #define REG_IER		0x04  #define REG_ALC		0x0B diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c index 6433b81256c..8e0c4a00193 100644 --- a/drivers/net/can/sja1000/sja1000_of_platform.c +++ b/drivers/net/can/sja1000/sja1000_of_platform.c @@ -96,8 +96,8 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)  	struct net_device *dev;  	struct sja1000_priv *priv;  	struct resource res; -	const u32 *prop; -	int err, irq, res_size, prop_size; +	u32 prop; +	int err, irq, res_size;  	void __iomem *base;  	err = of_address_to_resource(np, 0, &res); @@ -138,27 +138,27 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)  	priv->read_reg = sja1000_ofp_read_reg;  	priv->write_reg = sja1000_ofp_write_reg; -	prop = of_get_property(np, "nxp,external-clock-frequency", &prop_size); -	if (prop && (prop_size ==  sizeof(u32))) -		priv->can.clock.freq = *prop / 2; +	err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop); +	if (!err) +		priv->can.clock.freq = prop / 2;  	else  		priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */ -	prop = of_get_property(np, "nxp,tx-output-mode", &prop_size); -	if (prop && (prop_size == sizeof(u32))) -		priv->ocr |= *prop & OCR_MODE_MASK; +	err = of_property_read_u32(np, "nxp,tx-output-mode", &prop); +	if (!err) +		priv->ocr |= prop & OCR_MODE_MASK;  	else  		priv->ocr |= OCR_MODE_NORMAL; /* default */ -	prop = of_get_property(np, "nxp,tx-output-config", &prop_size); -	if (prop && (prop_size == sizeof(u32))) -		priv->ocr |= (*prop << OCR_TX_SHIFT) & OCR_TX_MASK; +	err = of_property_read_u32(np, "nxp,tx-output-config", &prop); +	if (!err) +		priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;  	else  		priv->ocr |= OCR_TX0_PULLDOWN; /* default */ -	prop = of_get_property(np, "nxp,clock-out-frequency", &prop_size); -	if (prop && (prop_size == sizeof(u32)) && *prop) { -		u32 divider = priv->can.clock.freq * 2 / *prop; +	err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop); +	if (!err && prop) { +		u32 divider = priv->can.clock.freq * 2 / prop;  		if (divider > 1)  			priv->cdr |= divider / 2 - 1; @@ -168,8 +168,7 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)  		priv->cdr |= CDR_CLK_OFF; /* default */  	} -	prop = of_get_property(np, "nxp,no-comparator-bypass", NULL); -	if (!prop) +	if (!of_property_read_bool(np, "nxp,no-comparator-bypass"))  		priv->cdr |= CDR_CBP; /* default */  	priv->irq_flags = IRQF_SHARED; diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index cab306a9888..e1d26433d61 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -828,7 +828,7 @@ static int ax_probe(struct platform_device *pdev)  	struct ei_device *ei_local;  	struct ax_device *ax;  	struct resource *irq, *mem, *mem2; -	resource_size_t mem_size, mem2_size = 0; +	unsigned long mem_size, mem2_size = 0;  	int ret = 0;  	dev = ax__alloc_ei_netdev(sizeof(struct ax_device)); diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h index 829b5ad71d0..b5fd934585e 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e.h +++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h @@ -186,7 +186,7 @@ struct atl1e_tpd_desc {  /* how about 0x2000 */  #define MAX_TX_BUF_LEN      0x2000  #define MAX_TX_BUF_SHIFT    13 -/*#define MAX_TX_BUF_LEN  0x3000 */ +#define MAX_TSO_SEG_SIZE    0x3c00  /* rrs word 1 bit 0:31 */  #define RRS_RX_CSUM_MASK	0xFFFF @@ -438,7 +438,6 @@ struct atl1e_adapter {  	struct atl1e_hw        hw;  	struct atl1e_hw_stats  hw_stats; -	bool have_msi;  	u32 wol;  	u16 link_speed;  	u16 link_duplex; diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 92f4734f860..ac25f05ff68 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1849,34 +1849,19 @@ static void atl1e_free_irq(struct atl1e_adapter *adapter)  	struct net_device *netdev = adapter->netdev;  	free_irq(adapter->pdev->irq, netdev); - -	if (adapter->have_msi) -		pci_disable_msi(adapter->pdev);  }  static int atl1e_request_irq(struct atl1e_adapter *adapter)  {  	struct pci_dev    *pdev   = adapter->pdev;  	struct net_device *netdev = adapter->netdev; -	int flags = 0;  	int err = 0; -	adapter->have_msi = true; -	err = pci_enable_msi(pdev); -	if (err) { -		netdev_dbg(netdev, -			   "Unable to allocate MSI interrupt Error: %d\n", err); -		adapter->have_msi = false; -	} - -	if (!adapter->have_msi) -		flags |= IRQF_SHARED; -	err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev); +	err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name, +			  netdev);  	if (err) {  		netdev_dbg(adapter->netdev,  			   "Unable to allocate interrupt Error: %d\n", err); -		if (adapter->have_msi) -			pci_disable_msi(pdev);  		return err;  	}  	netdev_dbg(netdev, "atl1e_request_irq OK\n"); @@ -2344,6 +2329,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	INIT_WORK(&adapter->reset_task, atl1e_reset_task);  	INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); +	netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE);  	err = register_netdev(netdev);  	if (err) {  		netdev_err(netdev, "register netdevice failed\n"); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 4046f97378c..57619dd4a92 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2614,6 +2614,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)  			}  		} +		/* initialize FW coalescing state machines in RAM */ +		bnx2x_update_coalesce(bp); +  		/* setup the leading queue */  		rc = bnx2x_setup_leading(bp);  		if (rc) { @@ -4580,11 +4583,11 @@ static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,  	u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);  	u32 addr = BAR_CSTRORM_INTMEM +  		   CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); -	u16 flags = REG_RD16(bp, addr); +	u8 flags = REG_RD8(bp, addr);  	/* clear and set */  	flags &= ~HC_INDEX_DATA_HC_ENABLED;  	flags |= enable_flag; -	REG_WR16(bp, addr, flags); +	REG_WR8(bp, addr, flags);  	DP(NETIF_MSG_IFUP,  	   "port %x fw_sb_id %d sb_index %d disable %d\n",  	   port, fw_sb_id, sb_index, disable); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 77ebae0ac64..0283f343b0d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -13437,13 +13437,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,  {  	struct bnx2x *bp = params->bp;  	u16 base_page, next_page, not_kr2_device, lane; -	int sigdet = bnx2x_warpcore_get_sigdet(phy, params); - -	if (!sigdet) { -		if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) -			bnx2x_kr2_recovery(params, vars, phy); -		return; -	} +	int sigdet;  	/* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery  	 * since some switches tend to reinit the AN process and clear the @@ -13454,6 +13448,16 @@ static void bnx2x_check_kr2_wa(struct link_params *params,  		vars->check_kr2_recovery_cnt--;  		return;  	} + +	sigdet = bnx2x_warpcore_get_sigdet(phy, params); +	if (!sigdet) { +		if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { +			bnx2x_kr2_recovery(params, vars, phy); +			DP(NETIF_MSG_LINK, "No sigdet\n"); +		} +		return; +	} +  	lane = bnx2x_get_warpcore_lane(phy, params);  	CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,  			  MDIO_AER_BLOCK_AER_REG, lane); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e81a747ea8c..c50696b396f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -4947,7 +4947,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)  				  q);  	} -	if (!NO_FCOE(bp)) { +	if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {  		fp = &bp->fp[FCOE_IDX(bp)];  		queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; @@ -9878,6 +9878,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)  				REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);  			}  		} +		if (!CHIP_IS_E1x(bp)) +			/* block FW from writing to host */ +			REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); +  		/* wait until BRB is empty */  		tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);  		while (timer_count) { @@ -13354,6 +13358,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev)  	RCU_INIT_POINTER(bp->cnic_ops, NULL);  	mutex_unlock(&bp->cnic_mutex);  	synchronize_rcu(); +	bp->cnic_enabled = false;  	kfree(bp->cnic_kwq);  	bp->cnic_kwq = NULL; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 67d2663b397..17a972734ba 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -14604,8 +14604,11 @@ static void tg3_read_vpd(struct tg3 *tp)  		if (j + len > block_end)  			goto partno; -		memcpy(tp->fw_ver, &vpd_data[j], len); -		strncat(tp->fw_ver, " bc ", vpdlen - len - 1); +		if (len >= sizeof(tp->fw_ver)) +			len = sizeof(tp->fw_ver) - 1; +		memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); +		snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, +			 &vpd_data[j]);  	}  partno: diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index a170065b597..b0ebc9f6d55 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -163,6 +163,7 @@  #define XGMAC_FLOW_CTRL_FCB_BPA	0x00000001	/* Flow Control Busy ... */  /* XGMAC_INT_STAT reg */ +#define XGMAC_INT_STAT_PMTIM	0x00800000	/* PMT Interrupt Mask */  #define XGMAC_INT_STAT_PMT	0x0080		/* PMT Interrupt Status */  #define XGMAC_INT_STAT_LPI	0x0040		/* LPI Interrupt Status */ @@ -960,6 +961,9 @@ static int xgmac_hw_init(struct net_device *dev)  	writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);  	writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); +	/* Mask power mgt interrupt */ +	writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); +  	/* XGMAC requires AXI bus init. This is a 'magic number' for now */  	writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS); @@ -1141,6 +1145,9 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)  		struct sk_buff *skb;  		int frame_len; +		if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ)) +			break; +  		entry = priv->rx_tail;  		p = priv->dma_rx + entry;  		if (desc_get_owner(p)) @@ -1825,7 +1832,7 @@ static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)  	unsigned int pmt = 0;  	if (mode & WAKE_MAGIC) -		pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT; +		pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN;  	if (mode & WAKE_UCAST)  		pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST; diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 8cdf02503d1..9eada8e8607 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -257,6 +257,107 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count)  		tmp = readl(reg);  } +/* + * Sleep, either by using msleep() or if we are suspending, then + * use mdelay() to sleep. + */ +static void dm9000_msleep(board_info_t *db, unsigned int ms) +{ +	if (db->in_suspend) +		mdelay(ms); +	else +		msleep(ms); +} + +/* Read a word from phyxcer */ +static int +dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) +{ +	board_info_t *db = netdev_priv(dev); +	unsigned long flags; +	unsigned int reg_save; +	int ret; + +	mutex_lock(&db->addr_lock); + +	spin_lock_irqsave(&db->lock, flags); + +	/* Save previous register address */ +	reg_save = readb(db->io_addr); + +	/* Fill the phyxcer register into REG_0C */ +	iow(db, DM9000_EPAR, DM9000_PHY | reg); + +	/* Issue phyxcer read command */ +	iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); + +	writeb(reg_save, db->io_addr); +	spin_unlock_irqrestore(&db->lock, flags); + +	dm9000_msleep(db, 1);		/* Wait read complete */ + +	spin_lock_irqsave(&db->lock, flags); +	reg_save = readb(db->io_addr); + +	iow(db, DM9000_EPCR, 0x0);	/* Clear phyxcer read command */ + +	/* The read data keeps on REG_0D & REG_0E */ +	ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); + +	/* restore the previous address */ +	writeb(reg_save, db->io_addr); +	spin_unlock_irqrestore(&db->lock, flags); + +	mutex_unlock(&db->addr_lock); + +	dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); +	return ret; +} + +/* Write a word to phyxcer */ +static void +dm9000_phy_write(struct net_device *dev, +		 int phyaddr_unused, int reg, int value) +{ +	board_info_t *db = netdev_priv(dev); +	unsigned long flags; +	unsigned long reg_save; + +	dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); +	mutex_lock(&db->addr_lock); + +	spin_lock_irqsave(&db->lock, flags); + +	/* Save previous register address */ +	reg_save = readb(db->io_addr); + +	/* Fill the phyxcer register into REG_0C */ +	iow(db, DM9000_EPAR, DM9000_PHY | reg); + +	/* Fill the written data into REG_0D & REG_0E */ +	iow(db, DM9000_EPDRL, value); +	iow(db, DM9000_EPDRH, value >> 8); + +	/* Issue phyxcer write command */ +	iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); + +	writeb(reg_save, db->io_addr); +	spin_unlock_irqrestore(&db->lock, flags); + +	dm9000_msleep(db, 1);		/* Wait write complete */ + +	spin_lock_irqsave(&db->lock, flags); +	reg_save = readb(db->io_addr); + +	iow(db, DM9000_EPCR, 0x0);	/* Clear phyxcer write command */ + +	/* restore the previous address */ +	writeb(reg_save, db->io_addr); + +	spin_unlock_irqrestore(&db->lock, flags); +	mutex_unlock(&db->addr_lock); +} +  /* dm9000_set_io   *   * select the specified set of io routines to use with the @@ -795,6 +896,9 @@ dm9000_init_dm9000(struct net_device *dev)  	iow(db, DM9000_GPCR, GPCR_GEP_CNTL);	/* Let GPIO0 output */ +	dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ +	dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */ +  	ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;  	/* if wol is needed, then always set NCR_WAKEEN otherwise we end @@ -1201,109 +1305,6 @@ dm9000_open(struct net_device *dev)  	return 0;  } -/* - * Sleep, either by using msleep() or if we are suspending, then - * use mdelay() to sleep. - */ -static void dm9000_msleep(board_info_t *db, unsigned int ms) -{ -	if (db->in_suspend) -		mdelay(ms); -	else -		msleep(ms); -} - -/* - *   Read a word from phyxcer - */ -static int -dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) -{ -	board_info_t *db = netdev_priv(dev); -	unsigned long flags; -	unsigned int reg_save; -	int ret; - -	mutex_lock(&db->addr_lock); - -	spin_lock_irqsave(&db->lock,flags); - -	/* Save previous register address */ -	reg_save = readb(db->io_addr); - -	/* Fill the phyxcer register into REG_0C */ -	iow(db, DM9000_EPAR, DM9000_PHY | reg); - -	iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);	/* Issue phyxcer read command */ - -	writeb(reg_save, db->io_addr); -	spin_unlock_irqrestore(&db->lock,flags); - -	dm9000_msleep(db, 1);		/* Wait read complete */ - -	spin_lock_irqsave(&db->lock,flags); -	reg_save = readb(db->io_addr); - -	iow(db, DM9000_EPCR, 0x0);	/* Clear phyxcer read command */ - -	/* The read data keeps on REG_0D & REG_0E */ -	ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); - -	/* restore the previous address */ -	writeb(reg_save, db->io_addr); -	spin_unlock_irqrestore(&db->lock,flags); - -	mutex_unlock(&db->addr_lock); - -	dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); -	return ret; -} - -/* - *   Write a word to phyxcer - */ -static void -dm9000_phy_write(struct net_device *dev, -		 int phyaddr_unused, int reg, int value) -{ -	board_info_t *db = netdev_priv(dev); -	unsigned long flags; -	unsigned long reg_save; - -	dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); -	mutex_lock(&db->addr_lock); - -	spin_lock_irqsave(&db->lock,flags); - -	/* Save previous register address */ -	reg_save = readb(db->io_addr); - -	/* Fill the phyxcer register into REG_0C */ -	iow(db, DM9000_EPAR, DM9000_PHY | reg); - -	/* Fill the written data into REG_0D & REG_0E */ -	iow(db, DM9000_EPDRL, value); -	iow(db, DM9000_EPDRH, value >> 8); - -	iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);	/* Issue phyxcer write command */ - -	writeb(reg_save, db->io_addr); -	spin_unlock_irqrestore(&db->lock, flags); - -	dm9000_msleep(db, 1);		/* Wait write complete */ - -	spin_lock_irqsave(&db->lock,flags); -	reg_save = readb(db->io_addr); - -	iow(db, DM9000_EPCR, 0x0);	/* Clear phyxcer write command */ - -	/* restore the previous address */ -	writeb(reg_save, db->io_addr); - -	spin_unlock_irqrestore(&db->lock, flags); -	mutex_unlock(&db->addr_lock); -} -  static void  dm9000_shutdown(struct net_device *dev)  { @@ -1502,7 +1503,12 @@ dm9000_probe(struct platform_device *pdev)  	db->flags |= DM9000_PLATF_SIMPLE_PHY;  #endif -	dm9000_reset(db); +	/* Fixing bug on dm9000_probe, takeover dm9000_reset(db), +	 * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo +	 * while probe stage. +	 */ + +	iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST);  	/* try multiple times, DM9000 sometimes gets the read wrong */  	for (i = 0; i < 8; i++) { diff --git a/drivers/net/ethernet/davicom/dm9000.h b/drivers/net/ethernet/davicom/dm9000.h index 55688bd1a3e..9ce058adaba 100644 --- a/drivers/net/ethernet/davicom/dm9000.h +++ b/drivers/net/ethernet/davicom/dm9000.h @@ -69,7 +69,9 @@  #define NCR_WAKEEN          (1<<6)  #define NCR_FCOL            (1<<4)  #define NCR_FDX             (1<<3) -#define NCR_LBK             (3<<1) + +#define NCR_RESERVED        (3<<1) +#define NCR_MAC_LBK         (1<<1)  #define NCR_RST	            (1<<0)  #define NSR_SPEED           (1<<7) @@ -167,5 +169,12 @@  #define ISR_LNKCHNG		(1<<5)  #define ISR_UNDERRUN		(1<<4) +/* Davicom MII registers. + */ + +#define MII_DM_DSPCR		0x1b    /* DSP Control Register */ + +#define DSPCR_INIT_PARAM	0xE100	/* DSP init parameter */ +  #endif /* _DM9000X_H_ */ diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 08e54f3d288..2886c9b63f9 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -759,8 +759,9 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,  	if (vlan_tx_tag_present(skb)) {  		vlan_tag = be_get_tx_vlan_tag(adapter, skb); -		__vlan_put_tag(skb, vlan_tag); -		skb->vlan_tci = 0; +		skb = __vlan_put_tag(skb, vlan_tag); +		if (skb) +			skb->vlan_tci = 0;  	}  	return skb; diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c index 911d0253dbb..73195f643c9 100644 --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec.c @@ -345,6 +345,53 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)  	return NETDEV_TX_OK;  } +/* Init RX & TX buffer descriptors + */ +static void fec_enet_bd_init(struct net_device *dev) +{ +	struct fec_enet_private *fep = netdev_priv(dev); +	struct bufdesc *bdp; +	unsigned int i; + +	/* Initialize the receive buffer descriptors. */ +	bdp = fep->rx_bd_base; +	for (i = 0; i < RX_RING_SIZE; i++) { + +		/* Initialize the BD for every fragment in the page. */ +		if (bdp->cbd_bufaddr) +			bdp->cbd_sc = BD_ENET_RX_EMPTY; +		else +			bdp->cbd_sc = 0; +		bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); +	} + +	/* Set the last buffer to wrap */ +	bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); +	bdp->cbd_sc |= BD_SC_WRAP; + +	fep->cur_rx = fep->rx_bd_base; + +	/* ...and the same for transmit */ +	bdp = fep->tx_bd_base; +	fep->cur_tx = bdp; +	for (i = 0; i < TX_RING_SIZE; i++) { + +		/* Initialize the BD for every fragment in the page. */ +		bdp->cbd_sc = 0; +		if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) { +			dev_kfree_skb_any(fep->tx_skbuff[i]); +			fep->tx_skbuff[i] = NULL; +		} +		bdp->cbd_bufaddr = 0; +		bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); +	} + +	/* Set the last buffer to wrap */ +	bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); +	bdp->cbd_sc |= BD_SC_WRAP; +	fep->dirty_tx = bdp; +} +  /* This function is called to start or restart the FEC during a link   * change.  This only happens when switching between half and full   * duplex. @@ -388,6 +435,8 @@ fec_restart(struct net_device *ndev, int duplex)  	/* Set maximum receive buffer size. */  	writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); +	fec_enet_bd_init(ndev); +  	/* Set receive and transmit descriptor base. */  	writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);  	if (fep->bufdesc_ex) @@ -397,7 +446,6 @@ fec_restart(struct net_device *ndev, int duplex)  		writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)  			* RX_RING_SIZE,	fep->hwp + FEC_X_DES_START); -	fep->cur_rx = fep->rx_bd_base;  	for (i = 0; i <= TX_RING_MOD_MASK; i++) {  		if (fep->tx_skbuff[i]) { @@ -954,6 +1002,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)  	} else {  		if (fep->link) {  			fec_stop(ndev); +			fep->link = phy_dev->link;  			status_change = 1;  		}  	} @@ -1597,8 +1646,6 @@ static int fec_enet_init(struct net_device *ndev)  {  	struct fec_enet_private *fep = netdev_priv(ndev);  	struct bufdesc *cbd_base; -	struct bufdesc *bdp; -	unsigned int i;  	/* Allocate memory for buffer descriptors. */  	cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, @@ -1608,6 +1655,7 @@ static int fec_enet_init(struct net_device *ndev)  		return -ENOMEM;  	} +	memset(cbd_base, 0, PAGE_SIZE);  	spin_lock_init(&fep->hw_lock);  	fep->netdev = ndev; @@ -1631,35 +1679,6 @@ static int fec_enet_init(struct net_device *ndev)  	writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);  	netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); -	/* Initialize the receive buffer descriptors. */ -	bdp = fep->rx_bd_base; -	for (i = 0; i < RX_RING_SIZE; i++) { - -		/* Initialize the BD for every fragment in the page. */ -		bdp->cbd_sc = 0; -		bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); -	} - -	/* Set the last buffer to wrap */ -	bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); -	bdp->cbd_sc |= BD_SC_WRAP; - -	/* ...and the same for transmit */ -	bdp = fep->tx_bd_base; -	fep->cur_tx = bdp; -	for (i = 0; i < TX_RING_SIZE; i++) { - -		/* Initialize the BD for every fragment in the page. */ -		bdp->cbd_sc = 0; -		bdp->cbd_bufaddr = 0; -		bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); -	} - -	/* Set the last buffer to wrap */ -	bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); -	bdp->cbd_sc |= BD_SC_WRAP; -	fep->dirty_tx = bdp; -  	fec_restart(ndev, 0);  	return 0; diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index ec800b093e7..d2bea3f07c7 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -870,7 +870,7 @@ err_unlock:  }  static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, -	void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) +	int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))  {  	struct cb *cb;  	unsigned long flags; @@ -888,10 +888,13 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,  	nic->cbs_avail--;  	cb->skb = skb; +	err = cb_prepare(nic, cb, skb); +	if (err) +		goto err_unlock; +  	if (unlikely(!nic->cbs_avail))  		err = -ENOSPC; -	cb_prepare(nic, cb, skb);  	/* Order is important otherwise we'll be in a race with h/w:  	 * set S-bit in current first, then clear S-bit in previous. */ @@ -1091,7 +1094,7 @@ static void e100_get_defaults(struct nic *nic)  	nic->mii.mdio_write = mdio_write;  } -static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) +static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)  {  	struct config *config = &cb->u.config;  	u8 *c = (u8 *)config; @@ -1181,6 +1184,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)  	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,  		     "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",  		     c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); +	return 0;  }  /************************************************************************* @@ -1331,7 +1335,7 @@ static const struct firmware *e100_request_firmware(struct nic *nic)  	return fw;  } -static void e100_setup_ucode(struct nic *nic, struct cb *cb, +static int e100_setup_ucode(struct nic *nic, struct cb *cb,  			     struct sk_buff *skb)  {  	const struct firmware *fw = (void *)skb; @@ -1358,6 +1362,7 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb,  	cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);  	cb->command = cpu_to_le16(cb_ucode | cb_el); +	return 0;  }  static inline int e100_load_ucode_wait(struct nic *nic) @@ -1400,18 +1405,20 @@ static inline int e100_load_ucode_wait(struct nic *nic)  	return err;  } -static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, +static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,  	struct sk_buff *skb)  {  	cb->command = cpu_to_le16(cb_iaaddr);  	memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); +	return 0;  } -static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) +static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)  {  	cb->command = cpu_to_le16(cb_dump);  	cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +  		offsetof(struct mem, dump_buf)); +	return 0;  }  static int e100_phy_check_without_mii(struct nic *nic) @@ -1581,7 +1588,7 @@ static int e100_hw_init(struct nic *nic)  	return 0;  } -static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) +static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)  {  	struct net_device *netdev = nic->netdev;  	struct netdev_hw_addr *ha; @@ -1596,6 +1603,7 @@ static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)  		memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,  			ETH_ALEN);  	} +	return 0;  }  static void e100_set_multicast_list(struct net_device *netdev) @@ -1756,11 +1764,18 @@ static void e100_watchdog(unsigned long data)  		  round_jiffies(jiffies + E100_WATCHDOG_PERIOD));  } -static void e100_xmit_prepare(struct nic *nic, struct cb *cb, +static int e100_xmit_prepare(struct nic *nic, struct cb *cb,  	struct sk_buff *skb)  { +	dma_addr_t dma_addr;  	cb->command = nic->tx_command; +	dma_addr = pci_map_single(nic->pdev, +				  skb->data, skb->len, PCI_DMA_TODEVICE); +	/* If we can't map the skb, have the upper layer try later */ +	if (pci_dma_mapping_error(nic->pdev, dma_addr)) +		return -ENOMEM; +  	/*  	 * Use the last 4 bytes of the SKB payload packet as the CRC, used for  	 * testing, ie sending frames with bad CRC. @@ -1777,11 +1792,10 @@ static void e100_xmit_prepare(struct nic *nic, struct cb *cb,  	cb->u.tcb.tcb_byte_count = 0;  	cb->u.tcb.threshold = nic->tx_threshold;  	cb->u.tcb.tbd_count = 1; -	cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, -		skb->data, skb->len, PCI_DMA_TODEVICE)); -	/* check for mapping failure? */ +	cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);  	cb->u.tcb.tbd.size = cpu_to_le16(skb->len);  	skb_tx_timestamp(skb); +	return 0;  }  static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 43462d596a4..ffd287196bf 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -1053,6 +1053,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)  		txdr->buffer_info[i].dma =  			dma_map_single(&pdev->dev, skb->data, skb->len,  				       DMA_TO_DEVICE); +		if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) { +			ret_val = 4; +			goto err_nomem; +		}  		tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);  		tx_desc->lower.data = cpu_to_le32(skb->len);  		tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | @@ -1069,7 +1073,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)  	rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer),  				    GFP_KERNEL);  	if (!rxdr->buffer_info) { -		ret_val = 4; +		ret_val = 5;  		goto err_nomem;  	} @@ -1077,7 +1081,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)  	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,  					GFP_KERNEL);  	if (!rxdr->desc) { -		ret_val = 5; +		ret_val = 6;  		goto err_nomem;  	}  	memset(rxdr->desc, 0, rxdr->size); @@ -1101,7 +1105,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)  		skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);  		if (!skb) { -			ret_val = 6; +			ret_val = 7;  			goto err_nomem;  		}  		skb_reserve(skb, NET_IP_ALIGN); @@ -1110,6 +1114,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)  		rxdr->buffer_info[i].dma =  			dma_map_single(&pdev->dev, skb->data,  				       E1000_RXBUFFER_2048, DMA_FROM_DEVICE); +		if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) { +			ret_val = 8; +			goto err_nomem; +		}  		rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);  		memset(skb->data, 0x00, skb->len);  	} diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 948b86ffa4f..7e615e2bf7e 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -848,11 +848,16 @@ check_page:  			}  		} -		if (!buffer_info->dma) +		if (!buffer_info->dma) {  			buffer_info->dma = dma_map_page(&pdev->dev,  			                                buffer_info->page, 0,  			                                PAGE_SIZE,  							DMA_FROM_DEVICE); +			if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { +				adapter->alloc_rx_buff_failed++; +				break; +			} +		}  		rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);  		rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 25151401c2a..ab577a763a2 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -284,18 +284,10 @@ struct igb_q_vector {  enum e1000_ring_flags_t {  	IGB_RING_FLAG_RX_SCTP_CSUM,  	IGB_RING_FLAG_RX_LB_VLAN_BSWAP, -	IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,  	IGB_RING_FLAG_TX_CTX_IDX,  	IGB_RING_FLAG_TX_DETECT_HANG  }; -#define ring_uses_build_skb(ring) \ -	test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) -#define set_ring_build_skb_enabled(ring) \ -	set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) -#define clear_ring_build_skb_enabled(ring) \ -	clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) -  #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)  #define IGB_RX_DESC(R, i)	    \ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 8496adfc6a6..64f75291e3a 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3350,20 +3350,6 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,  	wr32(E1000_RXDCTL(reg_idx), rxdctl);  } -static void igb_set_rx_buffer_len(struct igb_adapter *adapter, -				  struct igb_ring *rx_ring) -{ -#define IGB_MAX_BUILD_SKB_SIZE \ -	(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \ -	 (NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN)) - -	/* set build_skb flag */ -	if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE) -		set_ring_build_skb_enabled(rx_ring); -	else -		clear_ring_build_skb_enabled(rx_ring); -} -  /**   * igb_configure_rx - Configure receive Unit after Reset   * @adapter: board private structure @@ -3383,11 +3369,8 @@ static void igb_configure_rx(struct igb_adapter *adapter)  	/* Setup the HW Rx Head and Tail Descriptor Pointers and  	 * the Base and Length of the Rx Descriptor Ring */ -	for (i = 0; i < adapter->num_rx_queues; i++) { -		struct igb_ring *rx_ring = adapter->rx_ring[i]; -		igb_set_rx_buffer_len(adapter, rx_ring); -		igb_configure_rx_ring(adapter, rx_ring); -	} +	for (i = 0; i < adapter->num_rx_queues; i++) +		igb_configure_rx_ring(adapter, adapter->rx_ring[i]);  }  /** @@ -6203,78 +6186,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,  	return igb_can_reuse_rx_page(rx_buffer, page, truesize);  } -static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring, -					   union e1000_adv_rx_desc *rx_desc) -{ -	struct igb_rx_buffer *rx_buffer; -	struct sk_buff *skb; -	struct page *page; -	void *page_addr; -	unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); -#if (PAGE_SIZE < 8192) -	unsigned int truesize = IGB_RX_BUFSZ; -#else -	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + -				SKB_DATA_ALIGN(NET_SKB_PAD + -					       NET_IP_ALIGN + -					       size); -#endif - -	/* If we spanned a buffer we have a huge mess so test for it */ -	BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))); - -	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; -	page = rx_buffer->page; -	prefetchw(page); - -	page_addr = page_address(page) + rx_buffer->page_offset; - -	/* prefetch first cache line of first page */ -	prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN); -#if L1_CACHE_BYTES < 128 -	prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN); -#endif - -	/* build an skb to around the page buffer */ -	skb = build_skb(page_addr, truesize); -	if (unlikely(!skb)) { -		rx_ring->rx_stats.alloc_failed++; -		return NULL; -	} - -	/* we are reusing so sync this buffer for CPU use */ -	dma_sync_single_range_for_cpu(rx_ring->dev, -				      rx_buffer->dma, -				      rx_buffer->page_offset, -				      IGB_RX_BUFSZ, -				      DMA_FROM_DEVICE); - -	/* update pointers within the skb to store the data */ -	skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); -	__skb_put(skb, size); - -	/* pull timestamp out of packet data */ -	if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { -		igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb); -		__skb_pull(skb, IGB_TS_HDR_LEN); -	} - -	if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) { -		/* hand second half of page back to the ring */ -		igb_reuse_rx_page(rx_ring, rx_buffer); -	} else { -		/* we are not reusing the buffer so unmap it */ -		dma_unmap_page(rx_ring->dev, rx_buffer->dma, -			       PAGE_SIZE, DMA_FROM_DEVICE); -	} - -	/* clear contents of buffer_info */ -	rx_buffer->dma = 0; -	rx_buffer->page = NULL; - -	return skb; -} -  static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,  					   union e1000_adv_rx_desc *rx_desc,  					   struct sk_buff *skb) @@ -6690,10 +6601,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)  		rmb();  		/* retrieve a buffer from the ring */ -		if (ring_uses_build_skb(rx_ring)) -			skb = igb_build_rx_buffer(rx_ring, rx_desc); -		else -			skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); +		skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);  		/* exit if we failed to retrieve a buffer */  		if (!skb) @@ -6780,14 +6688,6 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,  	return true;  } -static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring) -{ -	if (ring_uses_build_skb(rx_ring)) -		return NET_SKB_PAD + NET_IP_ALIGN; -	else -		return 0; -} -  /**   * igb_alloc_rx_buffers - Replace used receive buffers; packet split   * @adapter: address of board private structure @@ -6814,9 +6714,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)  		 * Refresh the desc even if buffer_addrs didn't change  		 * because each write-back erases this info.  		 */ -		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + -						     bi->page_offset + -						     igb_rx_offset(rx_ring)); +		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);  		rx_desc++;  		bi++; diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index ea480837343..b5f94abe3cf 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -2159,6 +2159,10 @@ map_skb:  		                                  skb->data,  		                                  adapter->rx_buffer_len,  						  DMA_FROM_DEVICE); +		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { +			adapter->alloc_rx_buff_failed++; +			break; +		}  		rx_desc = IXGB_RX_DESC(*rx_ring, i);  		rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); @@ -2168,7 +2172,8 @@ map_skb:  		rx_desc->status = 0; -		if (++i == rx_ring->count) i = 0; +		if (++i == rx_ring->count) +			i = 0;  		buffer_info = &rx_ring->buffer_info[i];  	} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index db5611ae407..79f4a26ea6c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7922,12 +7922,19 @@ static int __init ixgbe_init_module(void)  	ixgbe_dbg_init();  #endif /* CONFIG_DEBUG_FS */ +	ret = pci_register_driver(&ixgbe_driver); +	if (ret) { +#ifdef CONFIG_DEBUG_FS +		ixgbe_dbg_exit(); +#endif /* CONFIG_DEBUG_FS */ +		return ret; +	} +  #ifdef CONFIG_IXGBE_DCA  	dca_register_notify(&dca_notifier);  #endif -	ret = pci_register_driver(&ixgbe_driver); -	return ret; +	return 0;  }  module_init(ixgbe_init_module); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index d44b4d21268..97e33669c0b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -1049,6 +1049,12 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)  	if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))  		return -EINVAL;  	if (vlan || qos) { +		if (adapter->vfinfo[vf].pf_vlan) +			err = ixgbe_set_vf_vlan(adapter, false, +						adapter->vfinfo[vf].pf_vlan, +						vf); +		if (err) +			goto out;  		err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);  		if (err)  			goto out; diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index edfba937092..434e33c527d 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -33,6 +33,7 @@ config MV643XX_ETH  config MVMDIO  	tristate "Marvell MDIO interface support" +	select PHYLIB  	---help---  	  This driver supports the MDIO interface found in the network  	  interface units of the Marvell EBU SoCs (Kirkwood, Orion5x, @@ -45,7 +46,6 @@ config MVMDIO  config MVNETA  	tristate "Marvell Armada 370/XP network interface support"  	depends on MACH_ARMADA_370_XP -	select PHYLIB  	select MVMDIO  	---help---  	  This driver supports the network interface units in the diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index cd345b8969b..a47a097c21e 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -374,7 +374,6 @@ static int rxq_number = 8;  static int txq_number = 8;  static int rxq_def; -static int txq_def;  #define MVNETA_DRIVER_NAME "mvneta"  #define MVNETA_DRIVER_VERSION "1.0" @@ -1475,7 +1474,8 @@ error:  static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)  {  	struct mvneta_port *pp = netdev_priv(dev); -	struct mvneta_tx_queue *txq = &pp->txqs[txq_def]; +	u16 txq_id = skb_get_queue_mapping(skb); +	struct mvneta_tx_queue *txq = &pp->txqs[txq_id];  	struct mvneta_tx_desc *tx_desc;  	struct netdev_queue *nq;  	int frags = 0; @@ -1485,7 +1485,7 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)  		goto out;  	frags = skb_shinfo(skb)->nr_frags + 1; -	nq    = netdev_get_tx_queue(dev, txq_def); +	nq    = netdev_get_tx_queue(dev, txq_id);  	/* Get a descriptor for the first part of the packet */  	tx_desc = mvneta_txq_next_desc_get(txq); @@ -2689,7 +2689,7 @@ static int mvneta_probe(struct platform_device *pdev)  		return -EINVAL;  	} -	dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8); +	dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);  	if (!dev)  		return -ENOMEM; @@ -2771,16 +2771,17 @@ static int mvneta_probe(struct platform_device *pdev)  	netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight); +	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; +	dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM; +	dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM; +	dev->priv_flags |= IFF_UNICAST_FLT; +  	err = register_netdev(dev);  	if (err < 0) {  		dev_err(&pdev->dev, "failed to register\n");  		goto err_deinit;  	} -	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; -	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM; -	dev->priv_flags |= IFF_UNICAST_FLT; -  	netdev_info(dev, "mac: %pM\n", dev->dev_addr);  	platform_set_drvdata(pdev, pp->dev); @@ -2843,4 +2844,3 @@ module_param(rxq_number, int, S_IRUGO);  module_param(txq_number, int, S_IRUGO);  module_param(rxq_def, int, S_IRUGO); -module_param(txq_def, int, S_IRUGO); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index fc07ca35721..6a0e671fcec 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -1067,7 +1067,7 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)  		sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);  		sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); -		tp = space - 2048/8; +		tp = space - 8192/8;  		sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);  		sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);  	} else { diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h index 615ac63ea86..ec6dcd80152 100644 --- a/drivers/net/ethernet/marvell/sky2.h +++ b/drivers/net/ethernet/marvell/sky2.h @@ -2074,7 +2074,7 @@ enum {  	GM_IS_RX_FF_OR	= 1<<1,	/* Receive FIFO Overrun */  	GM_IS_RX_COMPL	= 1<<0,	/* Frame Reception Complete */ -#define GMAC_DEF_MSK     GM_IS_TX_FF_UR +#define GMAC_DEF_MSK     (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR)  };  /*	GMAC_LINK_CTRL	16 bit	GMAC Link Control Reg (YUKON only) */ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index f278b10ef71..30d78f806dc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -411,8 +411,8 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)  static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)  { -	unsigned int i; -	for (i = ETH_ALEN - 1; i; --i) { +	int i; +	for (i = ETH_ALEN - 1; i >= 0; --i) {  		dst_mac[i] = src_mac & 0xff;  		src_mac >>= 8;  	} diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index 33bcb63d56a..8fb481252e2 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -528,7 +528,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)  	for (; rxfc != 0; rxfc--) {  		rxh = ks8851_rdreg32(ks, KS_RXFHSR);  		rxstat = rxh & 0xffff; -		rxlen = rxh >> 16; +		rxlen = (rxh >> 16) & 0xfff;  		netif_dbg(ks, rx_status, ks->netdev,  			  "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index cd5ae8813cb..edd63f1230f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -1500,6 +1500,12 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)  		}  	} while ((adapter->ahw->linkup && ahw->has_link_events) != 1); +	/* Make sure carrier is off and queue is stopped during loopback */ +	if (netif_running(netdev)) { +		netif_carrier_off(netdev); +		netif_stop_queue(netdev); +	} +  	ret = qlcnic_do_lb_test(adapter, mode);  	qlcnic_83xx_clear_lb_mode(adapter, mode); @@ -2780,6 +2786,7 @@ static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter,  void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)  {  	struct qlcnic_cmd_args cmd; +	struct net_device *netdev = adapter->netdev;  	int ret = 0;  	qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS); @@ -2789,7 +2796,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)  	data = qlcnic_83xx_fill_stats(adapter, &cmd, data,  				      QLC_83XX_STAT_TX, &ret);  	if (ret) { -		dev_info(&adapter->pdev->dev, "Error getting MAC stats\n"); +		netdev_err(netdev, "Error getting Tx stats\n");  		goto out;  	}  	/* Get MAC stats */ @@ -2799,8 +2806,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)  	data = qlcnic_83xx_fill_stats(adapter, &cmd, data,  				      QLC_83XX_STAT_MAC, &ret);  	if (ret) { -		dev_info(&adapter->pdev->dev, -			 "Error getting Rx stats\n"); +		netdev_err(netdev, "Error getting MAC stats\n");  		goto out;  	}  	/* Get Rx stats */ @@ -2810,8 +2816,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)  	data = qlcnic_83xx_fill_stats(adapter, &cmd, data,  				      QLC_83XX_STAT_RX, &ret);  	if (ret) -		dev_info(&adapter->pdev->dev, -			 "Error getting Tx stats\n"); +		netdev_err(netdev, "Error getting Rx stats\n");  out:  	qlcnic_free_mbx_args(&cmd);  } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 0e630061bff..5fa847fe388 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -358,8 +358,7 @@ set_flags:  		memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);  	}  	opcode = TX_ETHER_PKT; -	if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && -	    skb_shinfo(skb)->gso_size > 0) { +	if (skb_is_gso(skb)) {  		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);  		first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);  		first_desc->total_hdr_length = hdr_len; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 987fb6f8adc..5ef328af61d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -200,10 +200,10 @@ beacon_err:  	}  	err = qlcnic_config_led(adapter, b_state, b_rate); -	if (!err) +	if (!err) {  		err = len; -	else  		ahw->beacon_state = b_state; +	}  	if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))  		qlcnic_diag_free_res(adapter->netdev, max_sds_rings); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index a131d7b5d2f..7e8d6826396 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h @@ -18,7 +18,7 @@   */  #define DRV_NAME  	"qlge"  #define DRV_STRING 	"QLogic 10 Gigabit PCI-E Ethernet Driver " -#define DRV_VERSION	"v1.00.00.31" +#define DRV_VERSION	"v1.00.00.32"  #define WQ_ADDR_ALIGN	0x3	/* 4 byte alignment */ diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c index 6f316ab2325..0780e039b27 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c @@ -379,13 +379,13 @@ static int ql_get_settings(struct net_device *ndev,  	ecmd->supported = SUPPORTED_10000baseT_Full;  	ecmd->advertising = ADVERTISED_10000baseT_Full; -	ecmd->autoneg = AUTONEG_ENABLE;  	ecmd->transceiver = XCVR_EXTERNAL;  	if ((qdev->link_status & STS_LINK_TYPE_MASK) ==  				STS_LINK_TYPE_10GBASET) {  		ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);  		ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);  		ecmd->port = PORT_TP; +		ecmd->autoneg = AUTONEG_ENABLE;  	} else {  		ecmd->supported |= SUPPORTED_FIBRE;  		ecmd->advertising |= ADVERTISED_FIBRE; diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index b13ab544a7e..8033555e53c 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -1434,11 +1434,13 @@ map_error:  }  /* Categorizing receive firmware frame errors */ -static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err) +static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err, +				 struct rx_ring *rx_ring)  {  	struct nic_stats *stats = &qdev->nic_stats;  	stats->rx_err_count++; +	rx_ring->rx_errors++;  	switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {  	case IB_MAC_IOCB_RSP_ERR_CODE_ERR: @@ -1474,6 +1476,12 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,  	struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);  	struct napi_struct *napi = &rx_ring->napi; +	/* Frame error, so drop the packet. */ +	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { +		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); +		put_page(lbq_desc->p.pg_chunk.page); +		return; +	}  	napi->dev = qdev->ndev;  	skb = napi_get_frags(napi); @@ -1529,6 +1537,12 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,  	addr = lbq_desc->p.pg_chunk.va;  	prefetch(addr); +	/* Frame error, so drop the packet. */ +	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { +		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); +		goto err_out; +	} +  	/* The max framesize filter on this chip is set higher than  	 * MTU since FCoE uses 2k frames.  	 */ @@ -1614,6 +1628,13 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,  	memcpy(skb_put(new_skb, length), skb->data, length);  	skb = new_skb; +	/* Frame error, so drop the packet. */ +	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { +		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); +		dev_kfree_skb_any(skb); +		return; +	} +  	/* loopback self test for ethtool */  	if (test_bit(QL_SELFTEST, &qdev->flags)) {  		ql_check_lb_frame(qdev, skb); @@ -1919,6 +1940,13 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,  		return;  	} +	/* Frame error, so drop the packet. */ +	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { +		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); +		dev_kfree_skb_any(skb); +		return; +	} +  	/* The max framesize filter on this chip is set higher than  	 * MTU since FCoE uses 2k frames.  	 */ @@ -2000,12 +2028,6 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,  	QL_DUMP_IB_MAC_RSP(ib_mac_rsp); -	/* Frame error, so drop the packet. */ -	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { -		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2); -		return (unsigned long)length; -	} -  	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {  		/* The data and headers are split into  		 * separate buffers. diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 28fb50a1e9c..4ecbe64a758 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -3818,6 +3818,30 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp)  	}  } +static void rtl_speed_down(struct rtl8169_private *tp) +{ +	u32 adv; +	int lpa; + +	rtl_writephy(tp, 0x1f, 0x0000); +	lpa = rtl_readphy(tp, MII_LPA); + +	if (lpa & (LPA_10HALF | LPA_10FULL)) +		adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; +	else if (lpa & (LPA_100HALF | LPA_100FULL)) +		adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | +		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; +	else +		adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | +		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | +		      (tp->mii.supports_gmii ? +		       ADVERTISED_1000baseT_Half | +		       ADVERTISED_1000baseT_Full : 0); + +	rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, +			  adv); +} +  static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)  {  	void __iomem *ioaddr = tp->mmio_addr; @@ -3848,9 +3872,7 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)  	if (!(__rtl8169_get_wol(tp) & WAKE_ANY))  		return false; -	rtl_writephy(tp, 0x1f, 0x0000); -	rtl_writephy(tp, MII_BMCR, 0x0000); - +	rtl_speed_down(tp);  	rtl_wol_suspend_quirk(tp);  	return true; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index bf5e3cf97c4..6ed333fe5c0 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1216,10 +1216,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)  		if (felic_stat & ECSR_LCHNG) {  			/* Link Changed */  			if (mdp->cd->no_psr || mdp->no_ether_link) { -				if (mdp->link == PHY_DOWN) -					link_stat = 0; -				else -					link_stat = PHY_ST_LINK; +				goto ignore_link;  			} else {  				link_stat = (sh_eth_read(ndev, PSR));  				if (mdp->ether_link_active_low) @@ -1242,6 +1239,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)  		}  	} +ignore_link:  	if (intr_status & EESR_TWB) {  		/* Write buck end. unused write back interrupt */  		if (intr_status & EESR_TABT)	/* Transmit Abort int */ @@ -1326,12 +1324,18 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)  	struct sh_eth_private *mdp = netdev_priv(ndev);  	struct sh_eth_cpu_data *cd = mdp->cd;  	irqreturn_t ret = IRQ_NONE; -	u32 intr_status = 0; +	unsigned long intr_status;  	spin_lock(&mdp->lock); -	/* Get interrpt stat */ +	/* Get interrupt status */  	intr_status = sh_eth_read(ndev, EESR); +	/* Mask it with the interrupt mask, forcing ECI interrupt to be always +	 * enabled since it's the one that  comes thru regardless of the mask, +	 * and we need to fully handle it in sh_eth_error() in order to quench +	 * it as it doesn't get cleared by just writing 1 to the ECI bit... +	 */ +	intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI;  	/* Clear interrupt */  	if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |  			EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | @@ -1373,7 +1377,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)  	struct phy_device *phydev = mdp->phydev;  	int new_state = 0; -	if (phydev->link != PHY_DOWN) { +	if (phydev->link) {  		if (phydev->duplex != mdp->duplex) {  			new_state = 1;  			mdp->duplex = phydev->duplex; @@ -1387,17 +1391,21 @@ static void sh_eth_adjust_link(struct net_device *ndev)  			if (mdp->cd->set_rate)  				mdp->cd->set_rate(ndev);  		} -		if (mdp->link == PHY_DOWN) { +		if (!mdp->link) {  			sh_eth_write(ndev,  				(sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);  			new_state = 1;  			mdp->link = phydev->link; +			if (mdp->cd->no_psr || mdp->no_ether_link) +				sh_eth_rcv_snd_enable(ndev);  		}  	} else if (mdp->link) {  		new_state = 1; -		mdp->link = PHY_DOWN; +		mdp->link = 0;  		mdp->speed = 0;  		mdp->duplex = -1; +		if (mdp->cd->no_psr || mdp->no_ether_link) +			sh_eth_rcv_snd_disable(ndev);  	}  	if (new_state && netif_msg_link(mdp)) @@ -1414,7 +1422,7 @@ static int sh_eth_phy_init(struct net_device *ndev)  	snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,  		mdp->mii_bus->id , mdp->phy_id); -	mdp->link = PHY_DOWN; +	mdp->link = 0;  	mdp->speed = 0;  	mdp->duplex = -1; diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index e6655678458..828be451500 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -723,7 +723,7 @@ struct sh_eth_private {  	u32 phy_id;					/* PHY ID */  	struct mii_bus *mii_bus;	/* MDIO bus control */  	struct phy_device *phydev;	/* PHY device control */ -	enum phy_state link; +	int link;  	phy_interface_t phy_interface;  	int msg_enable;  	int speed; diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index 0c74a702d46..50617c5a0bd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c @@ -149,6 +149,7 @@ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)  {  	writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);  	writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK); +	writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK);  }  /* This reads the MAC core counters (if actaully supported). diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index df32a090d08..4781d3d8e18 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -436,7 +436,7 @@ void cpsw_tx_handler(void *token, int len, int status)  	 * queue is stopped then start the queue as we have free desc for tx  	 */  	if (unlikely(netif_queue_stopped(ndev))) -		netif_start_queue(ndev); +		netif_wake_queue(ndev);  	cpts_tx_timestamp(priv->cpts, skb);  	priv->stats.tx_packets++;  	priv->stats.tx_bytes += len; @@ -1380,7 +1380,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,  			memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);  		if (data->dual_emac) { -			if (of_property_read_u32(node, "dual_emac_res_vlan", +			if (of_property_read_u32(slave_node, "dual_emac_res_vlan",  						 &prop)) {  				pr_err("Missing dual_emac_res_vlan in DT.\n");  				slave_data->dual_emac_res_vlan = i+1; diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index ae1b77aa199..72300bc9e37 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1053,7 +1053,7 @@ static void emac_tx_handler(void *token, int len, int status)  	 * queue is stopped then start the queue as we have free desc for tx  	 */  	if (unlikely(netif_queue_stopped(ndev))) -		netif_start_queue(ndev); +		netif_wake_queue(ndev);  	ndev->stats.tx_packets++;  	ndev->stats.tx_bytes += len;  	dev_kfree_skb_any(skb); diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 1cd77483da5..f5f0f09e4cc 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -470,8 +470,10 @@ static void netvsc_send_completion(struct hv_device *device,  			packet->trans_id;  		/* Notify the layer above us */ -		nvsc_packet->completion.send.send_completion( -			nvsc_packet->completion.send.send_completion_ctx); +		if (nvsc_packet) +			nvsc_packet->completion.send.send_completion( +				nvsc_packet->completion.send. +				send_completion_ctx);  		num_outstanding_sends =  			atomic_dec_return(&net_device->num_outstanding_sends); @@ -498,6 +500,7 @@ int netvsc_send(struct hv_device *device,  	int ret = 0;  	struct nvsp_message sendMessage;  	struct net_device *ndev; +	u64 req_id;  	net_device = get_outbound_net_device(device);  	if (!net_device) @@ -518,20 +521,24 @@ int netvsc_send(struct hv_device *device,  		0xFFFFFFFF;  	sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0; +	if (packet->completion.send.send_completion) +		req_id = (u64)packet; +	else +		req_id = 0; +  	if (packet->page_buf_cnt) {  		ret = vmbus_sendpacket_pagebuffer(device->channel,  						  packet->page_buf,  						  packet->page_buf_cnt,  						  &sendMessage,  						  sizeof(struct nvsp_message), -						  (unsigned long)packet); +						  req_id);  	} else {  		ret = vmbus_sendpacket(device->channel, &sendMessage,  				sizeof(struct nvsp_message), -				(unsigned long)packet, +				req_id,  				VM_PKT_DATA_INBAND,  				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); -  	}  	if (ret == 0) { diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 5f85205cd12..8341b62e552 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -241,13 +241,11 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,  	if (status == 1) {  		netif_carrier_on(net); -		netif_wake_queue(net);  		ndev_ctx = netdev_priv(net);  		schedule_delayed_work(&ndev_ctx->dwork, 0);  		schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));  	} else {  		netif_carrier_off(net); -		netif_tx_disable(net);  	}  } diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 2b657d4d63a..0775f0aefd1 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -61,9 +61,6 @@ struct rndis_request {  static void rndis_filter_send_completion(void *ctx); -static void rndis_filter_send_request_completion(void *ctx); - -  static struct rndis_device *get_rndis_device(void)  { @@ -241,10 +238,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,  			packet->page_buf[0].len;  	} -	packet->completion.send.send_completion_ctx = req;/* packet; */ -	packet->completion.send.send_completion = -		rndis_filter_send_request_completion; -	packet->completion.send.send_completion_tid = (unsigned long)dev; +	packet->completion.send.send_completion = NULL;  	ret = netvsc_send(dev->net_dev->dev, packet);  	return ret; @@ -999,9 +993,3 @@ static void rndis_filter_send_completion(void *ctx)  	/* Pass it back to the original handler */  	filter_pkt->completion(filter_pkt->completion_ctx);  } - - -static void rndis_filter_send_request_completion(void *ctx) -{ -	/* Noop */ -} diff --git a/drivers/net/tun.c b/drivers/net/tun.c index b7c457adc0d..729ed533bb3 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1594,7 +1594,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)  		if (tun->flags & TUN_TAP_MQ &&  		    (tun->numqueues + tun->numdisabled > 1)) -			return err; +			return -EBUSY;  	}  	else {  		char *name; diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 16c84299729..6bd91676d2c 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -134,7 +134,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb  		goto error;  	if (skb) { -		if (skb->len <= sizeof(ETH_HLEN)) +		if (skb->len <= ETH_HLEN)  			goto error;  		/* mapping VLANs to MBIM sessions: diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 968d5d50751..2a3579f6791 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -13,6 +13,7 @@  #include <linux/module.h>  #include <linux/netdevice.h>  #include <linux/ethtool.h> +#include <linux/etherdevice.h>  #include <linux/mii.h>  #include <linux/usb.h>  #include <linux/usb/cdc.h> @@ -52,6 +53,96 @@ struct qmi_wwan_state {  	struct usb_interface *data;  }; +/* default ethernet address used by the modem */ +static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3}; + +/* Make up an ethernet header if the packet doesn't have one. + * + * A firmware bug common among several devices cause them to send raw + * IP packets under some circumstances.  There is no way for the + * driver/host to know when this will happen.  And even when the bug + * hits, some packets will still arrive with an intact header. + * + * The supported devices are only capably of sending IPv4, IPv6 and + * ARP packets on a point-to-point link. Any packet with an ethernet + * header will have either our address or a broadcast/multicast + * address as destination.  ARP packets will always have a header. + * + * This means that this function will reliably add the appropriate + * header iff necessary, provided our hardware address does not start + * with 4 or 6. + * + * Another common firmware bug results in all packets being addressed + * to 00:a0:c6:00:00:00 despite the host address being different. + * This function will also fixup such packets. + */ +static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb) +{ +	__be16 proto; + +	/* usbnet rx_complete guarantees that skb->len is at least +	 * hard_header_len, so we can inspect the dest address without +	 * checking skb->len +	 */ +	switch (skb->data[0] & 0xf0) { +	case 0x40: +		proto = htons(ETH_P_IP); +		break; +	case 0x60: +		proto = htons(ETH_P_IPV6); +		break; +	case 0x00: +		if (is_multicast_ether_addr(skb->data)) +			return 1; +		/* possibly bogus destination - rewrite just in case */ +		skb_reset_mac_header(skb); +		goto fix_dest; +	default: +		/* pass along other packets without modifications */ +		return 1; +	} +	if (skb_headroom(skb) < ETH_HLEN) +		return 0; +	skb_push(skb, ETH_HLEN); +	skb_reset_mac_header(skb); +	eth_hdr(skb)->h_proto = proto; +	memset(eth_hdr(skb)->h_source, 0, ETH_ALEN); +fix_dest: +	memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); +	return 1; +} + +/* very simplistic detection of IPv4 or IPv6 headers */ +static bool possibly_iphdr(const char *data) +{ +	return (data[0] & 0xd0) == 0x40; +} + +/* disallow addresses which may be confused with IP headers */ +static int qmi_wwan_mac_addr(struct net_device *dev, void *p) +{ +	int ret; +	struct sockaddr *addr = p; + +	ret = eth_prepare_mac_addr_change(dev, p); +	if (ret < 0) +		return ret; +	if (possibly_iphdr(addr->sa_data)) +		return -EADDRNOTAVAIL; +	eth_commit_mac_addr_change(dev, p); +	return 0; +} + +static const struct net_device_ops qmi_wwan_netdev_ops = { +	.ndo_open		= usbnet_open, +	.ndo_stop		= usbnet_stop, +	.ndo_start_xmit		= usbnet_start_xmit, +	.ndo_tx_timeout		= usbnet_tx_timeout, +	.ndo_change_mtu		= usbnet_change_mtu, +	.ndo_set_mac_address	= qmi_wwan_mac_addr, +	.ndo_validate_addr	= eth_validate_addr, +}; +  /* using a counter to merge subdriver requests with our own into a combined state */  static int qmi_wwan_manage_power(struct usbnet *dev, int on)  { @@ -229,6 +320,18 @@ next_desc:  		usb_driver_release_interface(driver, info->data);  	} +	/* Never use the same address on both ends of the link, even +	 * if the buggy firmware told us to. +	 */ +	if (!compare_ether_addr(dev->net->dev_addr, default_modem_addr)) +		eth_hw_addr_random(dev->net); + +	/* make MAC addr easily distinguishable from an IP header */ +	if (possibly_iphdr(dev->net->dev_addr)) { +		dev->net->dev_addr[0] |= 0x02;	/* set local assignment bit */ +		dev->net->dev_addr[0] &= 0xbf;	/* clear "IP" bit */ +	} +	dev->net->netdev_ops = &qmi_wwan_netdev_ops;  err:  	return status;  } @@ -307,6 +410,7 @@ static const struct driver_info	qmi_wwan_info = {  	.bind		= qmi_wwan_bind,  	.unbind		= qmi_wwan_unbind,  	.manage_power	= qmi_wwan_manage_power, +	.rx_fixup       = qmi_wwan_rx_fixup,  };  #define HUAWEI_VENDOR_ID	0x12D1 diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 9abe51710f2..1a15ec14c38 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -914,8 +914,12 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size)  static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)  {  	struct usbnet *dev = netdev_priv(netdev); +	int ret; + +	if (new_mtu > MAX_SINGLE_PACKET_SIZE) +		return -EINVAL; -	int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu); +	ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);  	if (ret < 0) {  		netdev_warn(dev->net, "Failed to set mac rx frame length\n");  		return ret; @@ -1324,7 +1328,7 @@ static int smsc75xx_reset(struct usbnet *dev)  	netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf); -	ret = smsc75xx_set_rx_max_frame_length(dev, 1514); +	ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);  	if (ret < 0) {  		netdev_warn(dev->net, "Failed to set max rx frame length\n");  		return ret; @@ -2134,8 +2138,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)  			else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT))  				dev->net->stats.rx_frame_errors++;  		} else { -			/* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */ -			if (unlikely(size > (ETH_FRAME_LEN + 12))) { +			/* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */ +			if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) {  				netif_dbg(dev, rx_err, dev->net,  					  "size err rx_cmd_a=0x%08x\n",  					  rx_cmd_a); diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h index 28fd99203f6..bdee2ed6721 100644 --- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h @@ -519,7 +519,7 @@ static const u32 ar9580_1p0_mac_core[][2] = {  	{0x00008258, 0x00000000},  	{0x0000825c, 0x40000000},  	{0x00008260, 0x00080922}, -	{0x00008264, 0x9bc00010}, +	{0x00008264, 0x9d400010},  	{0x00008268, 0xffffffff},  	{0x0000826c, 0x0000ffff},  	{0x00008270, 0x00000000}, diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c index 467b60014b7..73fe8d6db56 100644 --- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c +++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c @@ -143,14 +143,14 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)  	u32 sz, i;  	struct channel_detector *cd; -	cd = kmalloc(sizeof(*cd), GFP_KERNEL); +	cd = kmalloc(sizeof(*cd), GFP_ATOMIC);  	if (cd == NULL)  		goto fail;  	INIT_LIST_HEAD(&cd->head);  	cd->freq = freq;  	sz = sizeof(cd->detectors) * dpd->num_radar_types; -	cd->detectors = kzalloc(sz, GFP_KERNEL); +	cd->detectors = kzalloc(sz, GFP_ATOMIC);  	if (cd->detectors == NULL)  		goto fail; diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c index 91b8dceeadb..5e48c5515b8 100644 --- a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c +++ b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c @@ -218,7 +218,7 @@ static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)  {  	struct pulse_elem *p = pool_get_pulse_elem();  	if (p == NULL) { -		p = kmalloc(sizeof(*p), GFP_KERNEL); +		p = kmalloc(sizeof(*p), GFP_ATOMIC);  		if (p == NULL) {  			DFS_POOL_STAT_INC(pulse_alloc_error);  			return false; @@ -299,7 +299,7 @@ static bool pseq_handler_create_sequences(struct pri_detector *pde,  		ps.deadline_ts = ps.first_ts + ps.dur;  		new_ps = pool_get_pseq_elem();  		if (new_ps == NULL) { -			new_ps = kmalloc(sizeof(*new_ps), GFP_KERNEL); +			new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC);  			if (new_ps == NULL) {  				DFS_POOL_STAT_INC(pseq_alloc_error);  				return false; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 716058b6755..a47f5e05fc0 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -796,7 +796,7 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv)  	 * required version.  	 */  	if (priv->fw_version_major != MAJOR_VERSION_REQ || -	    priv->fw_version_minor != MINOR_VERSION_REQ) { +	    priv->fw_version_minor < MINOR_VERSION_REQ) {  		dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n",  			MAJOR_VERSION_REQ, MINOR_VERSION_REQ);  		return -EINVAL; diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c index 39c84ecf6a4..7fdac6c7b3e 100644 --- a/drivers/net/wireless/ath/ath9k/link.c +++ b/drivers/net/wireless/ath/ath9k/link.c @@ -170,7 +170,8 @@ void ath_rx_poll(unsigned long data)  {  	struct ath_softc *sc = (struct ath_softc *)data; -	ieee80211_queue_work(sc->hw, &sc->hw_check_work); +	if (!test_bit(SC_OP_INVALID, &sc->sc_flags)) +		ieee80211_queue_work(sc->hw, &sc->hw_check_work);  }  /* diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 6e66f9c6782..988372d218a 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -280,6 +280,10 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)  	if (r) {  		ath_err(common,  			"Unable to reset channel, reset status %d\n", r); + +		ath9k_hw_enable_interrupts(ah); +		ath9k_queue_reset(sc, RESET_TYPE_BB_HANG); +  		goto out;  	} diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 38bc5a7997f..122146943bf 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c @@ -1487,8 +1487,12 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,  	const struct b43_dma_ops *ops;  	struct b43_dmaring *ring;  	struct b43_dmadesc_meta *meta; +	static const struct b43_txstatus fake; /* filled with 0 */ +	const struct b43_txstatus *txstat;  	int slot, firstused;  	bool frame_succeed; +	int skip; +	static u8 err_out1, err_out2;  	ring = parse_cookie(dev, status->cookie, &slot);  	if (unlikely(!ring)) @@ -1501,13 +1505,36 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,  	firstused = ring->current_slot - ring->used_slots + 1;  	if (firstused < 0)  		firstused = ring->nr_slots + firstused; + +	skip = 0;  	if (unlikely(slot != firstused)) {  		/* This possibly is a firmware bug and will result in -		 * malfunction, memory leaks and/or stall of DMA functionality. */ -		b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. " -		       "Expected %d, but got %d\n", -		       ring->index, firstused, slot); -		return; +		 * malfunction, memory leaks and/or stall of DMA functionality. +		 */ +		if (slot == next_slot(ring, next_slot(ring, firstused))) { +			/* If a single header/data pair was missed, skip over +			 * the first two slots in an attempt to recover. +			 */ +			slot = firstused; +			skip = 2; +			if (!err_out1) { +				/* Report the error once. */ +				b43dbg(dev->wl, +				       "Skip on DMA ring %d slot %d.\n", +				       ring->index, slot); +				err_out1 = 1; +			} +		} else { +			/* More than a single header/data pair were missed. +			 * Report this error once. +			 */ +			if (!err_out2) +				b43dbg(dev->wl, +				       "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n", +				       ring->index, firstused, slot); +			err_out2 = 1; +			return; +		}  	}  	ops = ring->ops; @@ -1522,11 +1549,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,  			       slot, firstused, ring->index);  			break;  		} +  		if (meta->skb) {  			struct b43_private_tx_info *priv_info = -				b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); +			     b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); -			unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); +			unmap_descbuffer(ring, meta->dmaaddr, +					 meta->skb->len, 1);  			kfree(priv_info->bouncebuffer);  			priv_info->bouncebuffer = NULL;  		} else { @@ -1538,8 +1567,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,  			struct ieee80211_tx_info *info;  			if (unlikely(!meta->skb)) { -				/* This is a scatter-gather fragment of a frame, so -				 * the skb pointer must not be NULL. */ +				/* This is a scatter-gather fragment of a frame, +				 * so the skb pointer must not be NULL. +				 */  				b43dbg(dev->wl, "TX status unexpected NULL skb "  				       "at slot %d (first=%d) on ring %d\n",  				       slot, firstused, ring->index); @@ -1550,9 +1580,18 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,  			/*  			 * Call back to inform the ieee80211 subsystem about -			 * the status of the transmission. +			 * the status of the transmission. When skipping over +			 * a missed TX status report, use a status structure +			 * filled with zeros to indicate that the frame was not +			 * sent (frame_count 0) and not acknowledged  			 */ -			frame_succeed = b43_fill_txstatus_report(dev, info, status); +			if (unlikely(skip)) +				txstat = &fake; +			else +				txstat = status; + +			frame_succeed = b43_fill_txstatus_report(dev, info, +								 txstat);  #ifdef CONFIG_B43_DEBUG  			if (frame_succeed)  				ring->nr_succeed_tx_packets++; @@ -1580,12 +1619,14 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,  		/* Everything unmapped and free'd. So it's not used anymore. */  		ring->used_slots--; -		if (meta->is_last_fragment) { +		if (meta->is_last_fragment && !skip) {  			/* This is the last scatter-gather  			 * fragment of the frame. We are done. */  			break;  		}  		slot = next_slot(ring, slot); +		if (skip > 0) +			--skip;  	}  	if (ring->stopped) {  		B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c index 3c35382ee6c..b70f220bc4b 100644 --- a/drivers/net/wireless/b43/phy_n.c +++ b/drivers/net/wireless/b43/phy_n.c @@ -1564,7 +1564,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)  	u16 clip_off[2] = { 0xFFFF, 0xFFFF };  	u8 vcm_final = 0; -	s8 offset[4]; +	s32 offset[4];  	s32 results[8][4] = { };  	s32 results_min[4] = { };  	s32 poll_results[4] = { }; @@ -1615,7 +1615,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)  		}  		for (i = 0; i < 4; i += 2) {  			s32 curr; -			s32 mind = 40; +			s32 mind = 0x100000;  			s32 minpoll = 249;  			u8 minvcm = 0;  			if (2 * core != i) @@ -1732,7 +1732,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)  	u8 regs_save_radio[2];  	u16 regs_save_phy[2]; -	s8 offset[4]; +	s32 offset[4];  	u8 core;  	u8 rail; @@ -1799,7 +1799,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)  	}  	for (i = 0; i < 4; i++) { -		s32 mind = 40; +		s32 mind = 0x100000;  		u8 minvcm = 0;  		s32 minpoll = 249;  		s32 curr; @@ -5165,7 +5165,8 @@ static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid)  #endif  #ifdef CONFIG_B43_SSB  	case B43_BUS_SSB: -		/* FIXME */ +		ssb_pmu_spuravoid_pllupdate(&dev->dev->sdev->bus->chipco, +					    avoid);  		break;  #endif  	} diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index 4469321c0eb..35fc68be158 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c @@ -3317,15 +3317,15 @@ static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)  		goto err;  	} -	/* External image takes precedence if specified */  	if (brcmf_sdbrcm_download_code_file(bus)) {  		brcmf_err("dongle image file download failed\n");  		goto err;  	} -	/* External nvram takes precedence if specified */ -	if (brcmf_sdbrcm_download_nvram(bus)) +	if (brcmf_sdbrcm_download_nvram(bus)) {  		brcmf_err("dongle nvram file download failed\n"); +		goto err; +	}  	/* Take arm out of reset */  	if (brcmf_sdbrcm_download_state(bus, false)) { diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 2af9c0f0798..78da3eff75e 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -1891,8 +1891,10 @@ static s32  brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,  	      u8 key_idx, const u8 *mac_addr, struct key_params *params)  { +	struct brcmf_if *ifp = netdev_priv(ndev);  	struct brcmf_wsec_key key;  	s32 err = 0; +	u8 keybuf[8];  	memset(&key, 0, sizeof(key));  	key.index = (u32) key_idx; @@ -1916,8 +1918,9 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,  		brcmf_dbg(CONN, "Setting the key index %d\n", key.index);  		memcpy(key.data, params->key, key.len); -		if (params->cipher == WLAN_CIPHER_SUITE_TKIP) { -			u8 keybuf[8]; +		if ((ifp->vif->mode != WL_MODE_AP) && +		    (params->cipher == WLAN_CIPHER_SUITE_TKIP)) { +			brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");  			memcpy(keybuf, &key.data[24], sizeof(keybuf));  			memcpy(&key.data[24], &key.data[16], sizeof(keybuf));  			memcpy(&key.data[16], keybuf, sizeof(keybuf)); @@ -2013,7 +2016,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,  		break;  	case WLAN_CIPHER_SUITE_TKIP:  		if (ifp->vif->mode != WL_MODE_AP) { -			brcmf_dbg(CONN, "Swapping key\n"); +			brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");  			memcpy(keybuf, &key.data[24], sizeof(keybuf));  			memcpy(&key.data[24], &key.data[16], sizeof(keybuf));  			memcpy(&key.data[16], keybuf, sizeof(keybuf)); @@ -2118,8 +2121,7 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,  		err = -EAGAIN;  		goto done;  	} -	switch (wsec & ~SES_OW_ENABLED) { -	case WEP_ENABLED: +	if (wsec & WEP_ENABLED) {  		sec = &profile->sec;  		if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {  			params.cipher = WLAN_CIPHER_SUITE_WEP40; @@ -2128,16 +2130,13 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,  			params.cipher = WLAN_CIPHER_SUITE_WEP104;  			brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");  		} -		break; -	case TKIP_ENABLED: +	} else if (wsec & TKIP_ENABLED) {  		params.cipher = WLAN_CIPHER_SUITE_TKIP;  		brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n"); -		break; -	case AES_ENABLED: +	} else if (wsec & AES_ENABLED) {  		params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;  		brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n"); -		break; -	default: +	} else  {  		brcmf_err("Invalid algo (0x%x)\n", wsec);  		err = -EINVAL;  		goto done; @@ -3824,8 +3823,9 @@ exit:  static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)  {  	struct brcmf_if *ifp = netdev_priv(ndev); -	s32 err = -EPERM; +	s32 err;  	struct brcmf_fil_bss_enable_le bss_enable; +	struct brcmf_join_params join_params;  	brcmf_dbg(TRACE, "Enter\n"); @@ -3833,16 +3833,21 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)  		/* Due to most likely deauths outstanding we sleep */  		/* first to make sure they get processed by fw. */  		msleep(400); -		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0); -		if (err < 0) { -			brcmf_err("setting AP mode failed %d\n", err); -			goto exit; -		} + +		memset(&join_params, 0, sizeof(join_params)); +		err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID, +					     &join_params, sizeof(join_params)); +		if (err < 0) +			brcmf_err("SET SSID error (%d)\n", err);  		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0); -		if (err < 0) { +		if (err < 0)  			brcmf_err("BRCMF_C_UP error %d\n", err); -			goto exit; -		} +		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0); +		if (err < 0) +			brcmf_err("setting AP mode failed %d\n", err); +		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0); +		if (err < 0) +			brcmf_err("setting INFRA mode failed %d\n", err);  	} else {  		bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx);  		bss_enable.enable = cpu_to_le32(0); @@ -3855,7 +3860,6 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)  	set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);  	clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); -exit:  	return err;  } @@ -4124,10 +4128,6 @@ static const struct ieee80211_iface_limit brcmf_iface_limits[] = {  	},  	{  		.max = 1, -		.types = BIT(NL80211_IFTYPE_P2P_DEVICE) -	}, -	{ -		.max = 1,  		.types = BIT(NL80211_IFTYPE_P2P_CLIENT) |  			 BIT(NL80211_IFTYPE_P2P_GO)  	}, @@ -4183,8 +4183,7 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)  				 BIT(NL80211_IFTYPE_ADHOC) |  				 BIT(NL80211_IFTYPE_AP) |  				 BIT(NL80211_IFTYPE_P2P_CLIENT) | -				 BIT(NL80211_IFTYPE_P2P_GO) | -				 BIT(NL80211_IFTYPE_P2P_DEVICE); +				 BIT(NL80211_IFTYPE_P2P_GO);  	wiphy->iface_combinations = brcmf_iface_combos;  	wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos);  	wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index c6451c61407..e2340b231aa 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -274,6 +274,130 @@ static void brcms_set_basic_rate(struct brcm_rateset *rs, u16 rate, bool is_br)  	}  } +/** + * This function frees the WL per-device resources. + * + * This function frees resources owned by the WL device pointed to + * by the wl parameter. + * + * precondition: can both be called locked and unlocked + * + */ +static void brcms_free(struct brcms_info *wl) +{ +	struct brcms_timer *t, *next; + +	/* free ucode data */ +	if (wl->fw.fw_cnt) +		brcms_ucode_data_free(&wl->ucode); +	if (wl->irq) +		free_irq(wl->irq, wl); + +	/* kill dpc */ +	tasklet_kill(&wl->tasklet); + +	if (wl->pub) { +		brcms_debugfs_detach(wl->pub); +		brcms_c_module_unregister(wl->pub, "linux", wl); +	} + +	/* free common resources */ +	if (wl->wlc) { +		brcms_c_detach(wl->wlc); +		wl->wlc = NULL; +		wl->pub = NULL; +	} + +	/* virtual interface deletion is deferred so we cannot spinwait */ + +	/* wait for all pending callbacks to complete */ +	while (atomic_read(&wl->callbacks) > 0) +		schedule(); + +	/* free timers */ +	for (t = wl->timers; t; t = next) { +		next = t->next; +#ifdef DEBUG +		kfree(t->name); +#endif +		kfree(t); +	} +} + +/* +* called from both kernel as from this kernel module (error flow on attach) +* precondition: perimeter lock is not acquired. +*/ +static void brcms_remove(struct bcma_device *pdev) +{ +	struct ieee80211_hw *hw = bcma_get_drvdata(pdev); +	struct brcms_info *wl = hw->priv; + +	if (wl->wlc) { +		wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false); +		wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); +		ieee80211_unregister_hw(hw); +	} + +	brcms_free(wl); + +	bcma_set_drvdata(pdev, NULL); +	ieee80211_free_hw(hw); +} + +/* + * Precondition: Since this function is called in brcms_pci_probe() context, + * no locking is required. + */ +static void brcms_release_fw(struct brcms_info *wl) +{ +	int i; +	for (i = 0; i < MAX_FW_IMAGES; i++) { +		release_firmware(wl->fw.fw_bin[i]); +		release_firmware(wl->fw.fw_hdr[i]); +	} +} + +/* + * Precondition: Since this function is called in brcms_pci_probe() context, + * no locking is required. + */ +static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev) +{ +	int status; +	struct device *device = &pdev->dev; +	char fw_name[100]; +	int i; + +	memset(&wl->fw, 0, sizeof(struct brcms_firmware)); +	for (i = 0; i < MAX_FW_IMAGES; i++) { +		if (brcms_firmwares[i] == NULL) +			break; +		sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i], +			UCODE_LOADER_API_VER); +		status = request_firmware(&wl->fw.fw_bin[i], fw_name, device); +		if (status) { +			wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", +				  KBUILD_MODNAME, fw_name); +			return status; +		} +		sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i], +			UCODE_LOADER_API_VER); +		status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device); +		if (status) { +			wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", +				  KBUILD_MODNAME, fw_name); +			return status; +		} +		wl->fw.hdr_num_entries[i] = +		    wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr)); +	} +	wl->fw.fw_cnt = i; +	status = brcms_ucode_data_init(wl, &wl->ucode); +	brcms_release_fw(wl); +	return status; +} +  static void brcms_ops_tx(struct ieee80211_hw *hw,  			 struct ieee80211_tx_control *control,  			 struct sk_buff *skb) @@ -306,6 +430,14 @@ static int brcms_ops_start(struct ieee80211_hw *hw)  	if (!blocked)  		wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); +	if (!wl->ucode.bcm43xx_bomminor) { +		err = brcms_request_fw(wl, wl->wlc->hw->d11core); +		if (err) { +			brcms_remove(wl->wlc->hw->d11core); +			return -ENOENT; +		} +	} +  	spin_lock_bh(&wl->lock);  	/* avoid acknowledging frames before a non-monitor device is added */  	wl->mute_tx = true; @@ -793,128 +925,6 @@ void brcms_dpc(unsigned long data)  	wake_up(&wl->tx_flush_wq);  } -/* - * Precondition: Since this function is called in brcms_pci_probe() context, - * no locking is required. - */ -static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev) -{ -	int status; -	struct device *device = &pdev->dev; -	char fw_name[100]; -	int i; - -	memset(&wl->fw, 0, sizeof(struct brcms_firmware)); -	for (i = 0; i < MAX_FW_IMAGES; i++) { -		if (brcms_firmwares[i] == NULL) -			break; -		sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i], -			UCODE_LOADER_API_VER); -		status = request_firmware(&wl->fw.fw_bin[i], fw_name, device); -		if (status) { -			wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", -				  KBUILD_MODNAME, fw_name); -			return status; -		} -		sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i], -			UCODE_LOADER_API_VER); -		status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device); -		if (status) { -			wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", -				  KBUILD_MODNAME, fw_name); -			return status; -		} -		wl->fw.hdr_num_entries[i] = -		    wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr)); -	} -	wl->fw.fw_cnt = i; -	return brcms_ucode_data_init(wl, &wl->ucode); -} - -/* - * Precondition: Since this function is called in brcms_pci_probe() context, - * no locking is required. - */ -static void brcms_release_fw(struct brcms_info *wl) -{ -	int i; -	for (i = 0; i < MAX_FW_IMAGES; i++) { -		release_firmware(wl->fw.fw_bin[i]); -		release_firmware(wl->fw.fw_hdr[i]); -	} -} - -/** - * This function frees the WL per-device resources. - * - * This function frees resources owned by the WL device pointed to - * by the wl parameter. - * - * precondition: can both be called locked and unlocked - * - */ -static void brcms_free(struct brcms_info *wl) -{ -	struct brcms_timer *t, *next; - -	/* free ucode data */ -	if (wl->fw.fw_cnt) -		brcms_ucode_data_free(&wl->ucode); -	if (wl->irq) -		free_irq(wl->irq, wl); - -	/* kill dpc */ -	tasklet_kill(&wl->tasklet); - -	if (wl->pub) { -		brcms_debugfs_detach(wl->pub); -		brcms_c_module_unregister(wl->pub, "linux", wl); -	} - -	/* free common resources */ -	if (wl->wlc) { -		brcms_c_detach(wl->wlc); -		wl->wlc = NULL; -		wl->pub = NULL; -	} - -	/* virtual interface deletion is deferred so we cannot spinwait */ - -	/* wait for all pending callbacks to complete */ -	while (atomic_read(&wl->callbacks) > 0) -		schedule(); - -	/* free timers */ -	for (t = wl->timers; t; t = next) { -		next = t->next; -#ifdef DEBUG -		kfree(t->name); -#endif -		kfree(t); -	} -} - -/* -* called from both kernel as from this kernel module (error flow on attach) -* precondition: perimeter lock is not acquired. -*/ -static void brcms_remove(struct bcma_device *pdev) -{ -	struct ieee80211_hw *hw = bcma_get_drvdata(pdev); -	struct brcms_info *wl = hw->priv; - -	if (wl->wlc) { -		wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false); -		wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); -		ieee80211_unregister_hw(hw); -	} - -	brcms_free(wl); - -	bcma_set_drvdata(pdev, NULL); -	ieee80211_free_hw(hw); -} -  static irqreturn_t brcms_isr(int irq, void *dev_id)  {  	struct brcms_info *wl; @@ -1047,18 +1057,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)  	spin_lock_init(&wl->lock);  	spin_lock_init(&wl->isr_lock); -	/* prepare ucode */ -	if (brcms_request_fw(wl, pdev) < 0) { -		wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in " -			  "%s\n", KBUILD_MODNAME, "/lib/firmware/brcm"); -		brcms_release_fw(wl); -		brcms_remove(pdev); -		return NULL; -	} -  	/* common load-time initialization */  	wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err); -	brcms_release_fw(wl);  	if (!wl->wlc) {  		wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n",  			  KBUILD_MODNAME, err); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c index 21a82423247..18d37645e2c 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c @@ -1137,9 +1137,8 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,  	gain0_15 = ((biq1 & 0xf) << 12) |  		   ((tia & 0xf) << 8) |  		   ((lna2 & 0x3) << 6) | -		   ((lna2 & 0x3) << 4) | -		   ((lna1 & 0x3) << 2) | -		   ((lna1 & 0x3) << 0); +		   ((lna2 & +		     0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0);  	mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0);  	mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0); @@ -1157,8 +1156,6 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,  	}  	mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0); -	mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11); -	mod_phy_reg(pi, 0x4e6, (0x3 << 3), lna1 << 3);  } @@ -1331,43 +1328,6 @@ static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples)  	return (iq_est.i_pwr + iq_est.q_pwr) / nsamples;  } -static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain, -				      u16 tia_gain, u16 lna2_gain) -{ -	u32 i_thresh_l, q_thresh_l; -	u32 i_thresh_h, q_thresh_h; -	struct lcnphy_iq_est iq_est_h, iq_est_l; - -	wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 0, biq1_gain, tia_gain, -					       lna2_gain, 0); - -	wlc_lcnphy_rx_gain_override_enable(pi, true); -	wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0); -	udelay(500); -	write_radio_reg(pi, RADIO_2064_REG112, 0); -	if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l)) -		return false; - -	wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0); -	udelay(500); -	write_radio_reg(pi, RADIO_2064_REG112, 0); -	if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h)) -		return false; - -	i_thresh_l = (iq_est_l.i_pwr << 1); -	i_thresh_h = (iq_est_l.i_pwr << 2) + iq_est_l.i_pwr; - -	q_thresh_l = (iq_est_l.q_pwr << 1); -	q_thresh_h = (iq_est_l.q_pwr << 2) + iq_est_l.q_pwr; -	if ((iq_est_h.i_pwr > i_thresh_l) && -	    (iq_est_h.i_pwr < i_thresh_h) && -	    (iq_est_h.q_pwr > q_thresh_l) && -	    (iq_est_h.q_pwr < q_thresh_h)) -		return true; - -	return false; -} -  static bool  wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,  		     const struct lcnphy_rx_iqcomp *iqcomp, @@ -1382,8 +1342,8 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,  	    RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old,  	    rfoverride3_old, rfoverride3val_old, rfoverride4_old,  	    rfoverride4val_old, afectrlovr_old, afectrlovrval_old; -	int tia_gain, lna2_gain, biq1_gain; -	bool set_gain; +	int tia_gain; +	u32 received_power, rx_pwr_threshold;  	u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl;  	u16 values_to_save[11];  	s16 *ptr; @@ -1408,134 +1368,126 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,  		goto cal_done;  	} -	WARN_ON(module != 1); -	tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); -	wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); - -	for (i = 0; i < 11; i++) -		values_to_save[i] = -			read_radio_reg(pi, rxiq_cal_rf_reg[i]); -	Core1TxControl_old = read_phy_reg(pi, 0x631); +	if (module == 1) { -	or_phy_reg(pi, 0x631, 0x0015); - -	RFOverride0_old = read_phy_reg(pi, 0x44c); -	RFOverrideVal0_old = read_phy_reg(pi, 0x44d); -	rfoverride2_old = read_phy_reg(pi, 0x4b0); -	rfoverride2val_old = read_phy_reg(pi, 0x4b1); -	rfoverride3_old = read_phy_reg(pi, 0x4f9); -	rfoverride3val_old = read_phy_reg(pi, 0x4fa); -	rfoverride4_old = read_phy_reg(pi, 0x938); -	rfoverride4val_old = read_phy_reg(pi, 0x939); -	afectrlovr_old = read_phy_reg(pi, 0x43b); -	afectrlovrval_old = read_phy_reg(pi, 0x43c); -	old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); -	old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db); +		tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); +		wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); -	tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi); -	if (tx_gain_override_old) { -		wlc_lcnphy_get_tx_gain(pi, &old_gains); -		tx_gain_index_old = pi_lcn->lcnphy_current_index; -	} +		for (i = 0; i < 11; i++) +			values_to_save[i] = +				read_radio_reg(pi, rxiq_cal_rf_reg[i]); +		Core1TxControl_old = read_phy_reg(pi, 0x631); -	wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx); +		or_phy_reg(pi, 0x631, 0x0015); -	mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); -	mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0); +		RFOverride0_old = read_phy_reg(pi, 0x44c); +		RFOverrideVal0_old = read_phy_reg(pi, 0x44d); +		rfoverride2_old = read_phy_reg(pi, 0x4b0); +		rfoverride2val_old = read_phy_reg(pi, 0x4b1); +		rfoverride3_old = read_phy_reg(pi, 0x4f9); +		rfoverride3val_old = read_phy_reg(pi, 0x4fa); +		rfoverride4_old = read_phy_reg(pi, 0x938); +		rfoverride4val_old = read_phy_reg(pi, 0x939); +		afectrlovr_old = read_phy_reg(pi, 0x43b); +		afectrlovrval_old = read_phy_reg(pi, 0x43c); +		old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); +		old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db); -	mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); -	mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); +		tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi); +		if (tx_gain_override_old) { +			wlc_lcnphy_get_tx_gain(pi, &old_gains); +			tx_gain_index_old = pi_lcn->lcnphy_current_index; +		} -	write_radio_reg(pi, RADIO_2064_REG116, 0x06); -	write_radio_reg(pi, RADIO_2064_REG12C, 0x07); -	write_radio_reg(pi, RADIO_2064_REG06A, 0xd3); -	write_radio_reg(pi, RADIO_2064_REG098, 0x03); -	write_radio_reg(pi, RADIO_2064_REG00B, 0x7); -	mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4); -	write_radio_reg(pi, RADIO_2064_REG01D, 0x01); -	write_radio_reg(pi, RADIO_2064_REG114, 0x01); -	write_radio_reg(pi, RADIO_2064_REG02E, 0x10); -	write_radio_reg(pi, RADIO_2064_REG12A, 0x08); +		wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx); -	mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0); -	mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0); -	mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1); -	mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1); -	mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2); -	mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2); -	mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3); -	mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3); -	mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5); -	mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5); +		mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); +		mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0); -	mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0); -	mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0); +		mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); +		mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); -	write_phy_reg(pi, 0x6da, 0xffff); -	or_phy_reg(pi, 0x6db, 0x3); +		write_radio_reg(pi, RADIO_2064_REG116, 0x06); +		write_radio_reg(pi, RADIO_2064_REG12C, 0x07); +		write_radio_reg(pi, RADIO_2064_REG06A, 0xd3); +		write_radio_reg(pi, RADIO_2064_REG098, 0x03); +		write_radio_reg(pi, RADIO_2064_REG00B, 0x7); +		mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4); +		write_radio_reg(pi, RADIO_2064_REG01D, 0x01); +		write_radio_reg(pi, RADIO_2064_REG114, 0x01); +		write_radio_reg(pi, RADIO_2064_REG02E, 0x10); +		write_radio_reg(pi, RADIO_2064_REG12A, 0x08); -	wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch); -	set_gain = false; +		mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0); +		mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0); +		mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1); +		mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1); +		mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2); +		mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2); +		mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3); +		mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3); +		mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5); +		mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5); -	lna2_gain = 3; -	while ((lna2_gain >= 0) && !set_gain) { -		tia_gain = 4; +		mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0); +		mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0); -		while ((tia_gain >= 0) && !set_gain) { -			biq1_gain = 6; +		wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0); +		write_phy_reg(pi, 0x6da, 0xffff); +		or_phy_reg(pi, 0x6db, 0x3); +		wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch); +		wlc_lcnphy_rx_gain_override_enable(pi, true); -			while ((biq1_gain >= 0) && !set_gain) { -				set_gain = wlc_lcnphy_rx_iq_cal_gain(pi, -								     (u16) -								     biq1_gain, -								     (u16) -								     tia_gain, -								     (u16) -								     lna2_gain); -				biq1_gain -= 1; -			} +		tia_gain = 8; +		rx_pwr_threshold = 950; +		while (tia_gain > 0) {  			tia_gain -= 1; -		} -		lna2_gain -= 1; -	} +			wlc_lcnphy_set_rx_gain_by_distribution(pi, +							       0, 0, 2, 2, +							       (u16) +							       tia_gain, 1, 0); +			udelay(500); -	if (set_gain) -		result = wlc_lcnphy_calc_rx_iq_comp(pi, 1024); -	else -		result = false; +			received_power = +				wlc_lcnphy_measure_digital_power(pi, 2000); +			if (received_power < rx_pwr_threshold) +				break; +		} +		result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff); -	wlc_lcnphy_stop_tx_tone(pi); +		wlc_lcnphy_stop_tx_tone(pi); -	write_phy_reg(pi, 0x631, Core1TxControl_old); +		write_phy_reg(pi, 0x631, Core1TxControl_old); -	write_phy_reg(pi, 0x44c, RFOverrideVal0_old); -	write_phy_reg(pi, 0x44d, RFOverrideVal0_old); -	write_phy_reg(pi, 0x4b0, rfoverride2_old); -	write_phy_reg(pi, 0x4b1, rfoverride2val_old); -	write_phy_reg(pi, 0x4f9, rfoverride3_old); -	write_phy_reg(pi, 0x4fa, rfoverride3val_old); -	write_phy_reg(pi, 0x938, rfoverride4_old); -	write_phy_reg(pi, 0x939, rfoverride4val_old); -	write_phy_reg(pi, 0x43b, afectrlovr_old); -	write_phy_reg(pi, 0x43c, afectrlovrval_old); -	write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); -	write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl); +		write_phy_reg(pi, 0x44c, RFOverrideVal0_old); +		write_phy_reg(pi, 0x44d, RFOverrideVal0_old); +		write_phy_reg(pi, 0x4b0, rfoverride2_old); +		write_phy_reg(pi, 0x4b1, rfoverride2val_old); +		write_phy_reg(pi, 0x4f9, rfoverride3_old); +		write_phy_reg(pi, 0x4fa, rfoverride3val_old); +		write_phy_reg(pi, 0x938, rfoverride4_old); +		write_phy_reg(pi, 0x939, rfoverride4val_old); +		write_phy_reg(pi, 0x43b, afectrlovr_old); +		write_phy_reg(pi, 0x43c, afectrlovrval_old); +		write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); +		write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl); -	wlc_lcnphy_clear_trsw_override(pi); +		wlc_lcnphy_clear_trsw_override(pi); -	mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2); +		mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2); -	for (i = 0; i < 11; i++) -		write_radio_reg(pi, rxiq_cal_rf_reg[i], -				values_to_save[i]); +		for (i = 0; i < 11; i++) +			write_radio_reg(pi, rxiq_cal_rf_reg[i], +					values_to_save[i]); -	if (tx_gain_override_old) -		wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old); -	else -		wlc_lcnphy_disable_tx_gain_override(pi); +		if (tx_gain_override_old) +			wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old); +		else +			wlc_lcnphy_disable_tx_gain_override(pi); -	wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl); -	wlc_lcnphy_rx_gain_override_enable(pi, false); +		wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl); +		wlc_lcnphy_rx_gain_override_enable(pi, false); +	}  cal_done:  	kfree(ptr); @@ -1829,17 +1781,6 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel)  		write_radio_reg(pi, RADIO_2064_REG038, 3);  		write_radio_reg(pi, RADIO_2064_REG091, 7);  	} - -	if (!(pi->sh->boardflags & BFL_FEM)) { -		u8 reg038[14] = {0xd, 0xe, 0xd, 0xd, 0xd, 0xc, -			0xa, 0xb, 0xb, 0x3, 0x3, 0x2, 0x0, 0x0}; - -		write_radio_reg(pi, RADIO_2064_REG02A, 0xf); -		write_radio_reg(pi, RADIO_2064_REG091, 0x3); -		write_radio_reg(pi, RADIO_2064_REG038, 0x3); - -		write_radio_reg(pi, RADIO_2064_REG038, reg038[channel - 1]); -	}  }  static int @@ -2034,16 +1975,6 @@ wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos)  		} else {  			mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1);  			mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); -			mod_radio_reg(pi, RADIO_2064_REG028, 0x1, 0x0); -			mod_radio_reg(pi, RADIO_2064_REG11A, 0x4, 1<<2); -			mod_radio_reg(pi, RADIO_2064_REG036, 0x10, 0x0); -			mod_radio_reg(pi, RADIO_2064_REG11A, 0x10, 1<<4); -			mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0); -			mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x77); -			mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0xe<<1); -			mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1<<7); -			mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 1<<1); -			mod_radio_reg(pi, RADIO_2064_REG029, 0xf0, 0<<4);  		}  	} else {  		mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2); @@ -2130,14 +2061,12 @@ static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi)  		    (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12));  	mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5)); -	mod_radio_reg(pi, RADIO_2064_REG07C, (1 << 0), (1 << 0));  }  static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)  {  	struct phytbl_info tab;  	u32 rfseq, ind; -	u8 tssi_sel;  	tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;  	tab.tbl_width = 32; @@ -2159,13 +2088,7 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)  	mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4); -	if (pi->sh->boardflags & BFL_FEM) { -		tssi_sel = 0x1; -		wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT); -	} else { -		tssi_sel = 0xe; -		wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_POST_PA); -	} +	wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);  	mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14);  	mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15); @@ -2201,10 +2124,9 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)  	mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0);  	if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { -		mod_radio_reg(pi, RADIO_2064_REG028, 0xf, tssi_sel); +		mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe);  		mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4);  	} else { -		mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, tssi_sel << 1);  		mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1);  		mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3);  	} @@ -2251,10 +2173,6 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)  	mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8); -	mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x0); -	mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0); -	mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); -  	wlc_lcnphy_pwrctrl_rssiparams(pi);  } @@ -2873,8 +2791,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)  		read_radio_reg(pi, RADIO_2064_REG007) & 1;  	u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;  	u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4; -	u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi); -  	idleTssi = read_phy_reg(pi, 0x4ab);  	suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &  			 MCTL_EN_MAC)); @@ -2892,12 +2808,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)  	mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4);  	mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2);  	wlc_lcnphy_tssi_setup(pi); - -	mod_phy_reg(pi, 0x4d7, (0x1 << 0), (1 << 0)); -	mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1 << 6)); - -	wlc_lcnphy_set_bbmult(pi, 0x0); -  	wlc_phy_do_dummy_tx(pi, true, OFF);  	idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0))  		    >> 0); @@ -2919,7 +2829,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)  	mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12); -	wlc_lcnphy_set_bbmult(pi, SAVE_bbmult);  	wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old);  	wlc_lcnphy_set_tx_gain(pi, &old_gains);  	wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl); @@ -3133,11 +3042,6 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)  			wlc_lcnphy_write_table(pi, &tab);  			tab.tbl_offset++;  		} -		mod_phy_reg(pi, 0x4d0, (0x1 << 0), (0) << 0); -		mod_phy_reg(pi, 0x4d3, (0xff << 0), (0) << 0); -		mod_phy_reg(pi, 0x4d3, (0xff << 8), (0) << 8); -		mod_phy_reg(pi, 0x4d0, (0x1 << 4), (0) << 4); -		mod_phy_reg(pi, 0x4d0, (0x1 << 2), (0) << 2);  		mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7); @@ -3939,6 +3843,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)  	target_gains.pad_gain = 21;  	target_gains.dac_gain = 0;  	wlc_lcnphy_set_tx_gain(pi, &target_gains); +	wlc_lcnphy_set_tx_pwr_by_index(pi, 16);  	if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) { @@ -3949,7 +3854,6 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)  					lcnphy_recal ? LCNPHY_CAL_RECAL :  					LCNPHY_CAL_FULL), false);  	} else { -		wlc_lcnphy_set_tx_pwr_by_index(pi, 16);  		wlc_lcnphy_tx_iqlo_soft_cal_full(pi);  	} @@ -4374,22 +4278,17 @@ wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi,  	if (CHSPEC_IS5G(pi->radio_chanspec))  		pa_gain = 0x70;  	else -		pa_gain = 0x60; +		pa_gain = 0x70;  	if (pi->sh->boardflags & BFL_FEM)  		pa_gain = 0x10; -  	tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;  	tab.tbl_width = 32;  	tab.tbl_len = 1;  	tab.tbl_ptr = &val;  	for (j = 0; j < 128; j++) { -		if (pi->sh->boardflags & BFL_FEM) -			gm_gain = gain_table[j].gm; -		else -			gm_gain = 15; - +		gm_gain = gain_table[j].gm;  		val = (((u32) pa_gain << 24) |  		       (gain_table[j].pad << 16) |  		       (gain_table[j].pga << 8) | gm_gain); @@ -4600,10 +4499,7 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)  	write_phy_reg(pi, 0x4ea, 0x4688); -	if (pi->sh->boardflags & BFL_FEM) -		mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0); -	else -		mod_phy_reg(pi, 0x4eb, (0x7 << 0), 3 << 0); +	mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);  	mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6); @@ -4614,13 +4510,6 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)  	wlc_lcnphy_rcal(pi);  	wlc_lcnphy_rc_cal(pi); - -	if (!(pi->sh->boardflags & BFL_FEM)) { -		write_radio_reg(pi, RADIO_2064_REG032, 0x6f); -		write_radio_reg(pi, RADIO_2064_REG033, 0x19); -		write_radio_reg(pi, RADIO_2064_REG039, 0xe); -	} -  }  static void wlc_lcnphy_radio_init(struct brcms_phy *pi) @@ -4650,20 +4539,22 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)  		wlc_lcnphy_write_table(pi, &tab);  	} -	if (!(pi->sh->boardflags & BFL_FEM)) { -		tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; -		tab.tbl_width = 16; -		tab.tbl_ptr = &val; -		tab.tbl_len = 1; +	tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; +	tab.tbl_width = 16; +	tab.tbl_ptr = &val; +	tab.tbl_len = 1; -		val = 150; -		tab.tbl_offset = 0; -		wlc_lcnphy_write_table(pi, &tab); +	val = 114; +	tab.tbl_offset = 0; +	wlc_lcnphy_write_table(pi, &tab); -		val = 220; -		tab.tbl_offset = 1; -		wlc_lcnphy_write_table(pi, &tab); -	} +	val = 130; +	tab.tbl_offset = 1; +	wlc_lcnphy_write_table(pi, &tab); + +	val = 6; +	tab.tbl_offset = 8; +	wlc_lcnphy_write_table(pi, &tab);  	if (CHSPEC_IS2G(pi->radio_chanspec)) {  		if (pi->sh->boardflags & BFL_FEM) @@ -5055,7 +4946,6 @@ void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec)  		wlc_lcnphy_load_tx_iir_filter(pi, true, 3);  	mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3); -	wlc_lcnphy_tssi_setup(pi);  }  void wlc_phy_detach_lcnphy(struct brcms_phy *pi) @@ -5094,7 +4984,8 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)  	if (!wlc_phy_txpwr_srom_read_lcnphy(pi))  		return false; -	if (LCNREV_IS(pi->pubpi.phy_rev, 1)) { +	if ((pi->sh->boardflags & BFL_FEM) && +	    (LCNREV_IS(pi->pubpi.phy_rev, 1))) {  		if (pi_lcn->lcnphy_tempsense_option == 3) {  			pi->hwpwrctrl = true;  			pi->hwpwrctrl_capable = true; diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c index b7e95acc208..622c01ca72c 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c @@ -1992,70 +1992,70 @@ static const u16 dot11lcn_sw_ctrl_tbl_4313_epa_rev0[] = {  };  static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = { -	0x0009,  	0x000a, -	0x0005, -	0x0006,  	0x0009, -	0x000a, -	0x0005,  	0x0006, -	0x0009, -	0x000a,  	0x0005, -	0x0006, -	0x0009,  	0x000a, -	0x0005, -	0x0006,  	0x0009, -	0x000a, -	0x0005,  	0x0006, -	0x0009, -	0x000a,  	0x0005, -	0x0006, -	0x0009,  	0x000a, -	0x0005, -	0x0006,  	0x0009, -	0x000a, -	0x0005,  	0x0006, -	0x0009, -	0x000a,  	0x0005, -	0x0006, -	0x0009,  	0x000a, -	0x0005, -	0x0006,  	0x0009, -	0x000a, -	0x0005,  	0x0006, -	0x0009, -	0x000a,  	0x0005, -	0x0006, +	0x000a,  	0x0009, +	0x0006, +	0x0005,  	0x000a, +	0x0009, +	0x0006,  	0x0005, +	0x000a, +	0x0009,  	0x0006, +	0x0005, +	0x000a,  	0x0009, +	0x0006, +	0x0005,  	0x000a, +	0x0009, +	0x0006,  	0x0005, +	0x000a, +	0x0009,  	0x0006, +	0x0005, +	0x000a,  	0x0009, +	0x0006, +	0x0005,  	0x000a, +	0x0009, +	0x0006,  	0x0005, +	0x000a, +	0x0009,  	0x0006, +	0x0005, +	0x000a,  	0x0009, +	0x0006, +	0x0005,  	0x000a, +	0x0009, +	0x0006,  	0x0005, +	0x000a, +	0x0009,  	0x0006, +	0x0005,  };  static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = { diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c index e8324b5e5bf..6c7493c2d69 100644 --- a/drivers/net/wireless/iwlegacy/4965-rs.c +++ b/drivers/net/wireless/iwlegacy/4965-rs.c @@ -2152,7 +2152,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,  	int rate_idx;  	int i;  	u32 rate; -	u8 use_green = il4965_rs_use_green(il, sta); +	u8 use_green;  	u8 active_tbl = 0;  	u8 valid_tx_ant;  	struct il_station_priv *sta_priv; @@ -2160,6 +2160,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,  	if (!sta || !lq_sta)  		return; +	use_green = il4965_rs_use_green(il, sta);  	sta_priv = (void *)sta->drv_priv;  	i = lq_sta->last_txrate_idx; diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c index 86ea5f4c393..44ca0e57f9f 100644 --- a/drivers/net/wireless/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/iwlwifi/dvm/lib.c @@ -1262,6 +1262,15 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)  	}  	/* +	 * This can happen upon FW ASSERT: we clear the STATUS_FW_ERROR flag +	 * in iwl_down but cancel the workers only later. +	 */ +	if (!priv->ucode_loaded) { +		IWL_ERR(priv, "Fw not loaded - dropping CMD: %x\n", cmd->id); +		return -EIO; +	} + +	/*  	 * Synchronous commands from this op-mode must hold  	 * the mutex, this ensures we don't try to send two  	 * (or more) synchronous commands at a time. diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c index 23be948cf16..a82b6b39d4f 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c @@ -1419,6 +1419,14 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,  	mutex_lock(&priv->mutex); +	if (changes & BSS_CHANGED_IDLE && bss_conf->idle) { +		/* +		 * If we go idle, then clearly no "passive-no-rx" +		 * workaround is needed any more, this is a reset. +		 */ +		iwlagn_lift_passive_no_rx(priv); +	} +  	if (unlikely(!iwl_is_ready(priv))) {  		IWL_DEBUG_MAC80211(priv, "leave - not ready\n");  		mutex_unlock(&priv->mutex); @@ -1450,16 +1458,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,  			priv->timestamp = bss_conf->sync_tsf;  			ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;  		} else { -			/* -			 * If we disassociate while there are pending -			 * frames, just wake up the queues and let the -			 * frames "escape" ... This shouldn't really -			 * be happening to start with, but we should -			 * not get stuck in this case either since it -			 * can happen if userspace gets confused. -			 */ -			iwlagn_lift_passive_no_rx(priv); -  			ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;  			if (ctx->ctxid == IWL_RXON_CTX_BSS) diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index 6aec2df3bb2..d1a670d7b10 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c @@ -1192,7 +1192,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,  			memset(&info->status, 0, sizeof(info->status));  			if (status == TX_STATUS_FAIL_PASSIVE_NO_RX && -			    iwl_is_associated_ctx(ctx) && ctx->vif && +			    ctx->vif &&  			    ctx->vif->type == NL80211_IFTYPE_STATION) {  				/* block and stop all queues */  				priv->passive_no_rx = true; diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c index 736fe9bb140..1a4ac9236a4 100644 --- a/drivers/net/wireless/iwlwifi/dvm/ucode.c +++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c @@ -367,6 +367,8 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,  		return -EIO;  	} +	priv->ucode_loaded = true; +  	if (ucode_type != IWL_UCODE_WOWLAN) {  		/* delay a bit to give rfkill time to run */  		msleep(5); @@ -380,8 +382,6 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,  		return ret;  	} -	priv->ucode_loaded = true; -  	return 0;  } diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 17bedc50e75..12c4f31ca8f 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -475,6 +475,10 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,  	/* If platform's RF_KILL switch is NOT set to KILL */  	hw_rfkill = iwl_is_rfkill_set(trans); +	if (hw_rfkill) +		set_bit(STATUS_RFKILL, &trans_pcie->status); +	else +		clear_bit(STATUS_RFKILL, &trans_pcie->status);  	iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);  	if (hw_rfkill && !run_in_rfkill)  		return -ERFKILL; @@ -641,6 +645,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,  static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)  { +	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	bool hw_rfkill;  	int err; @@ -656,6 +661,10 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)  	iwl_enable_rfkill_int(trans);  	hw_rfkill = iwl_is_rfkill_set(trans); +	if (hw_rfkill) +		set_bit(STATUS_RFKILL, &trans_pcie->status); +	else +		clear_bit(STATUS_RFKILL, &trans_pcie->status);  	iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);  	return 0; @@ -694,6 +703,10 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,  		 * op_mode.  		 */  		hw_rfkill = iwl_is_rfkill_set(trans); +		if (hw_rfkill) +			set_bit(STATUS_RFKILL, &trans_pcie->status); +		else +			clear_bit(STATUS_RFKILL, &trans_pcie->status);  		iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);  	}  } diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 8595c16f74d..cb5c6792e3a 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -1264,7 +1264,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,  	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {  		int copy = 0; -		if (!cmd->len) +		if (!cmd->len[i])  			continue;  		/* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */ diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index a44023a7bd5..8aaf56ade4d 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -1892,7 +1892,8 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,  		}  	} -	for (i = 0; i < request->n_channels; i++) { +	for (i = 0; i < min_t(u32, request->n_channels, +			      MWIFIEX_USER_SCAN_CHAN_MAX); i++) {  		chan = request->channels[i];  		priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value;  		priv->user_scan_cfg->chan_list[i].radio_type = chan->band; diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 5c395e2e6a2..feb20461339 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -1508,6 +1508,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)  		}  		memcpy(adapter->upld_buf, skb->data,  		       min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len)); +		skb_push(skb, INTF_HEADER_LEN);  		if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,  					   PCI_DMA_FROMDEVICE))  			return -1; diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index d215b4d3c51..e7f6deaf715 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -1393,8 +1393,10 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,  			queue_work(adapter->workqueue, &adapter->main_work);  			/* Perform internal scan synchronously */ -			if (!priv->scan_request) +			if (!priv->scan_request) { +				dev_dbg(adapter->dev, "wait internal scan\n");  				mwifiex_wait_queue_complete(adapter, cmd_node); +			}  		} else {  			spin_unlock_irqrestore(&adapter->scan_pending_q_lock,  					       flags); @@ -1793,7 +1795,12 @@ check_next_scan:  		/* Need to indicate IOCTL complete */  		if (adapter->curr_cmd->wait_q_enabled) {  			adapter->cmd_wait_q.status = 0; -			mwifiex_complete_cmd(adapter, adapter->curr_cmd); +			if (!priv->scan_request) { +				dev_dbg(adapter->dev, +					"complete internal scan\n"); +				mwifiex_complete_cmd(adapter, +						     adapter->curr_cmd); +			}  		}  		if (priv->report_scan_result)  			priv->report_scan_result = false; diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig index 2bf4efa3318..76cd47eb901 100644 --- a/drivers/net/wireless/rt2x00/Kconfig +++ b/drivers/net/wireless/rt2x00/Kconfig @@ -20,6 +20,7 @@ if RT2X00  config RT2400PCI  	tristate "Ralink rt2400 (PCI/PCMCIA) support"  	depends on PCI +	select RT2X00_LIB_MMIO  	select RT2X00_LIB_PCI  	select EEPROM_93CX6  	---help--- @@ -31,6 +32,7 @@ config RT2400PCI  config RT2500PCI  	tristate "Ralink rt2500 (PCI/PCMCIA) support"  	depends on PCI +	select RT2X00_LIB_MMIO  	select RT2X00_LIB_PCI  	select EEPROM_93CX6  	---help--- @@ -43,6 +45,7 @@ config RT61PCI  	tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support"  	depends on PCI  	select RT2X00_LIB_PCI +	select RT2X00_LIB_MMIO  	select RT2X00_LIB_FIRMWARE  	select RT2X00_LIB_CRYPTO  	select CRC_ITU_T @@ -57,6 +60,7 @@ config RT2800PCI  	tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support"  	depends on PCI || SOC_RT288X || SOC_RT305X  	select RT2800_LIB +	select RT2X00_LIB_MMIO  	select RT2X00_LIB_PCI if PCI  	select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X  	select RT2X00_LIB_FIRMWARE @@ -185,6 +189,9 @@ endif  config RT2800_LIB  	tristate +config RT2X00_LIB_MMIO +	tristate +  config RT2X00_LIB_PCI  	tristate  	select RT2X00_LIB diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile index 349d5b8284a..f069d8bc5b6 100644 --- a/drivers/net/wireless/rt2x00/Makefile +++ b/drivers/net/wireless/rt2x00/Makefile @@ -9,6 +9,7 @@ rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE)	+= rt2x00firmware.o  rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS)	+= rt2x00leds.o  obj-$(CONFIG_RT2X00_LIB)		+= rt2x00lib.o +obj-$(CONFIG_RT2X00_LIB_MMIO)		+= rt2x00mmio.o  obj-$(CONFIG_RT2X00_LIB_PCI)		+= rt2x00pci.o  obj-$(CONFIG_RT2X00_LIB_SOC)		+= rt2x00soc.o  obj-$(CONFIG_RT2X00_LIB_USB)		+= rt2x00usb.o diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c index 221beaaa83f..dcfb54e0c51 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/rt2x00/rt2400pci.c @@ -34,6 +34,7 @@  #include <linux/slab.h>  #include "rt2x00.h" +#include "rt2x00mmio.h"  #include "rt2x00pci.h"  #include "rt2400pci.h" diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c index 39edc59e8d0..e1d2dc9ed28 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/rt2x00/rt2500pci.c @@ -34,6 +34,7 @@  #include <linux/slab.h>  #include "rt2x00.h" +#include "rt2x00mmio.h"  #include "rt2x00pci.h"  #include "rt2500pci.h" diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index ded73da4de0..ba5a05625aa 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -41,6 +41,7 @@  #include <linux/eeprom_93cx6.h>  #include "rt2x00.h" +#include "rt2x00mmio.h"  #include "rt2x00pci.h"  #include "rt2x00soc.h"  #include "rt2800lib.h" diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.c b/drivers/net/wireless/rt2x00/rt2x00mmio.c new file mode 100644 index 00000000000..d84a680ba0c --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00mmio.c @@ -0,0 +1,216 @@ +/* +	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> +	<http://rt2x00.serialmonkey.com> + +	This program is free software; you can redistribute it and/or modify +	it under the terms of the GNU General Public License as published by +	the Free Software Foundation; either version 2 of the License, or +	(at your option) any later version. + +	This program is distributed in the hope that it will be useful, +	but WITHOUT ANY WARRANTY; without even the implied warranty of +	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +	GNU General Public License for more details. + +	You should have received a copy of the GNU General Public License +	along with this program; if not, write to the +	Free Software Foundation, Inc., +	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* +	Module: rt2x00mmio +	Abstract: rt2x00 generic mmio device routines. + */ + +#include <linux/dma-mapping.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> + +#include "rt2x00.h" +#include "rt2x00mmio.h" + +/* + * Register access. + */ +int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, +			   const unsigned int offset, +			   const struct rt2x00_field32 field, +			   u32 *reg) +{ +	unsigned int i; + +	if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) +		return 0; + +	for (i = 0; i < REGISTER_BUSY_COUNT; i++) { +		rt2x00pci_register_read(rt2x00dev, offset, reg); +		if (!rt2x00_get_field32(*reg, field)) +			return 1; +		udelay(REGISTER_BUSY_DELAY); +	} + +	printk_once(KERN_ERR "%s() Indirect register access failed: " +	      "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg); +	*reg = ~0; + +	return 0; +} +EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read); + +bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) +{ +	struct data_queue *queue = rt2x00dev->rx; +	struct queue_entry *entry; +	struct queue_entry_priv_pci *entry_priv; +	struct skb_frame_desc *skbdesc; +	int max_rx = 16; + +	while (--max_rx) { +		entry = rt2x00queue_get_entry(queue, Q_INDEX); +		entry_priv = entry->priv_data; + +		if (rt2x00dev->ops->lib->get_entry_state(entry)) +			break; + +		/* +		 * Fill in desc fields of the skb descriptor +		 */ +		skbdesc = get_skb_frame_desc(entry->skb); +		skbdesc->desc = entry_priv->desc; +		skbdesc->desc_len = entry->queue->desc_size; + +		/* +		 * DMA is already done, notify rt2x00lib that +		 * it finished successfully. +		 */ +		rt2x00lib_dmastart(entry); +		rt2x00lib_dmadone(entry); + +		/* +		 * Send the frame to rt2x00lib for further processing. +		 */ +		rt2x00lib_rxdone(entry, GFP_ATOMIC); +	} + +	return !max_rx; +} +EXPORT_SYMBOL_GPL(rt2x00pci_rxdone); + +void rt2x00pci_flush_queue(struct data_queue *queue, bool drop) +{ +	unsigned int i; + +	for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++) +		msleep(10); +} +EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue); + +/* + * Device initialization handlers. + */ +static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev, +				     struct data_queue *queue) +{ +	struct queue_entry_priv_pci *entry_priv; +	void *addr; +	dma_addr_t dma; +	unsigned int i; + +	/* +	 * Allocate DMA memory for descriptor and buffer. +	 */ +	addr = dma_alloc_coherent(rt2x00dev->dev, +				  queue->limit * queue->desc_size, +				  &dma, GFP_KERNEL); +	if (!addr) +		return -ENOMEM; + +	memset(addr, 0, queue->limit * queue->desc_size); + +	/* +	 * Initialize all queue entries to contain valid addresses. +	 */ +	for (i = 0; i < queue->limit; i++) { +		entry_priv = queue->entries[i].priv_data; +		entry_priv->desc = addr + i * queue->desc_size; +		entry_priv->desc_dma = dma + i * queue->desc_size; +	} + +	return 0; +} + +static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev, +				     struct data_queue *queue) +{ +	struct queue_entry_priv_pci *entry_priv = +	    queue->entries[0].priv_data; + +	if (entry_priv->desc) +		dma_free_coherent(rt2x00dev->dev, +				  queue->limit * queue->desc_size, +				  entry_priv->desc, entry_priv->desc_dma); +	entry_priv->desc = NULL; +} + +int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) +{ +	struct data_queue *queue; +	int status; + +	/* +	 * Allocate DMA +	 */ +	queue_for_each(rt2x00dev, queue) { +		status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue); +		if (status) +			goto exit; +	} + +	/* +	 * Register interrupt handler. +	 */ +	status = request_irq(rt2x00dev->irq, +			     rt2x00dev->ops->lib->irq_handler, +			     IRQF_SHARED, rt2x00dev->name, rt2x00dev); +	if (status) { +		ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", +		      rt2x00dev->irq, status); +		goto exit; +	} + +	return 0; + +exit: +	queue_for_each(rt2x00dev, queue) +		rt2x00pci_free_queue_dma(rt2x00dev, queue); + +	return status; +} +EXPORT_SYMBOL_GPL(rt2x00pci_initialize); + +void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev) +{ +	struct data_queue *queue; + +	/* +	 * Free irq line. +	 */ +	free_irq(rt2x00dev->irq, rt2x00dev); + +	/* +	 * Free DMA +	 */ +	queue_for_each(rt2x00dev, queue) +		rt2x00pci_free_queue_dma(rt2x00dev, queue); +} +EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize); + +/* + * rt2x00mmio module information. + */ +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("rt2x00 mmio library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.h b/drivers/net/wireless/rt2x00/rt2x00mmio.h new file mode 100644 index 00000000000..4ecaf60175b --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00mmio.h @@ -0,0 +1,119 @@ +/* +	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> +	<http://rt2x00.serialmonkey.com> + +	This program is free software; you can redistribute it and/or modify +	it under the terms of the GNU General Public License as published by +	the Free Software Foundation; either version 2 of the License, or +	(at your option) any later version. + +	This program is distributed in the hope that it will be useful, +	but WITHOUT ANY WARRANTY; without even the implied warranty of +	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +	GNU General Public License for more details. + +	You should have received a copy of the GNU General Public License +	along with this program; if not, write to the +	Free Software Foundation, Inc., +	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* +	Module: rt2x00mmio +	Abstract: Data structures for the rt2x00mmio module. + */ + +#ifndef RT2X00MMIO_H +#define RT2X00MMIO_H + +#include <linux/io.h> + +/* + * Register access. + */ +static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev, +					   const unsigned int offset, +					   u32 *value) +{ +	*value = readl(rt2x00dev->csr.base + offset); +} + +static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev, +						const unsigned int offset, +						void *value, const u32 length) +{ +	memcpy_fromio(value, rt2x00dev->csr.base + offset, length); +} + +static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev, +					    const unsigned int offset, +					    u32 value) +{ +	writel(value, rt2x00dev->csr.base + offset); +} + +static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev, +						 const unsigned int offset, +						 const void *value, +						 const u32 length) +{ +	__iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2); +} + +/** + * rt2x00pci_regbusy_read - Read from register with busy check + * @rt2x00dev: Device pointer, see &struct rt2x00_dev. + * @offset: Register offset + * @field: Field to check if register is busy + * @reg: Pointer to where register contents should be stored + * + * This function will read the given register, and checks if the + * register is busy. If it is, it will sleep for a couple of + * microseconds before reading the register again. If the register + * is not read after a certain timeout, this function will return + * FALSE. + */ +int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, +			   const unsigned int offset, +			   const struct rt2x00_field32 field, +			   u32 *reg); + +/** + * struct queue_entry_priv_pci: Per entry PCI specific information + * + * @desc: Pointer to device descriptor + * @desc_dma: DMA pointer to &desc. + * @data: Pointer to device's entry memory. + * @data_dma: DMA pointer to &data. + */ +struct queue_entry_priv_pci { +	__le32 *desc; +	dma_addr_t desc_dma; +}; + +/** + * rt2x00pci_rxdone - Handle RX done events + * @rt2x00dev: Device pointer, see &struct rt2x00_dev. + * + * Returns true if there are still rx frames pending and false if all + * pending rx frames were processed. + */ +bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev); + +/** + * rt2x00pci_flush_queue - Flush data queue + * @queue: Data queue to stop + * @drop: True to drop all pending frames. + * + * This will wait for a maximum of 100ms, waiting for the queues + * to become empty. + */ +void rt2x00pci_flush_queue(struct data_queue *queue, bool drop); + +/* + * Device initialization handlers. + */ +int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev); +void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev); + +#endif /* RT2X00MMIO_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index a0c8caef3b0..e87865e3311 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c @@ -33,182 +33,6 @@  #include "rt2x00pci.h"  /* - * Register access. - */ -int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, -			   const unsigned int offset, -			   const struct rt2x00_field32 field, -			   u32 *reg) -{ -	unsigned int i; - -	if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) -		return 0; - -	for (i = 0; i < REGISTER_BUSY_COUNT; i++) { -		rt2x00pci_register_read(rt2x00dev, offset, reg); -		if (!rt2x00_get_field32(*reg, field)) -			return 1; -		udelay(REGISTER_BUSY_DELAY); -	} - -	ERROR(rt2x00dev, "Indirect register access failed: " -	      "offset=0x%.08x, value=0x%.08x\n", offset, *reg); -	*reg = ~0; - -	return 0; -} -EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read); - -bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) -{ -	struct data_queue *queue = rt2x00dev->rx; -	struct queue_entry *entry; -	struct queue_entry_priv_pci *entry_priv; -	struct skb_frame_desc *skbdesc; -	int max_rx = 16; - -	while (--max_rx) { -		entry = rt2x00queue_get_entry(queue, Q_INDEX); -		entry_priv = entry->priv_data; - -		if (rt2x00dev->ops->lib->get_entry_state(entry)) -			break; - -		/* -		 * Fill in desc fields of the skb descriptor -		 */ -		skbdesc = get_skb_frame_desc(entry->skb); -		skbdesc->desc = entry_priv->desc; -		skbdesc->desc_len = entry->queue->desc_size; - -		/* -		 * DMA is already done, notify rt2x00lib that -		 * it finished successfully. -		 */ -		rt2x00lib_dmastart(entry); -		rt2x00lib_dmadone(entry); - -		/* -		 * Send the frame to rt2x00lib for further processing. -		 */ -		rt2x00lib_rxdone(entry, GFP_ATOMIC); -	} - -	return !max_rx; -} -EXPORT_SYMBOL_GPL(rt2x00pci_rxdone); - -void rt2x00pci_flush_queue(struct data_queue *queue, bool drop) -{ -	unsigned int i; - -	for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++) -		msleep(10); -} -EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue); - -/* - * Device initialization handlers. - */ -static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev, -				     struct data_queue *queue) -{ -	struct queue_entry_priv_pci *entry_priv; -	void *addr; -	dma_addr_t dma; -	unsigned int i; - -	/* -	 * Allocate DMA memory for descriptor and buffer. -	 */ -	addr = dma_alloc_coherent(rt2x00dev->dev, -				  queue->limit * queue->desc_size, -				  &dma, GFP_KERNEL); -	if (!addr) -		return -ENOMEM; - -	memset(addr, 0, queue->limit * queue->desc_size); - -	/* -	 * Initialize all queue entries to contain valid addresses. -	 */ -	for (i = 0; i < queue->limit; i++) { -		entry_priv = queue->entries[i].priv_data; -		entry_priv->desc = addr + i * queue->desc_size; -		entry_priv->desc_dma = dma + i * queue->desc_size; -	} - -	return 0; -} - -static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev, -				     struct data_queue *queue) -{ -	struct queue_entry_priv_pci *entry_priv = -	    queue->entries[0].priv_data; - -	if (entry_priv->desc) -		dma_free_coherent(rt2x00dev->dev, -				  queue->limit * queue->desc_size, -				  entry_priv->desc, entry_priv->desc_dma); -	entry_priv->desc = NULL; -} - -int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) -{ -	struct data_queue *queue; -	int status; - -	/* -	 * Allocate DMA -	 */ -	queue_for_each(rt2x00dev, queue) { -		status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue); -		if (status) -			goto exit; -	} - -	/* -	 * Register interrupt handler. -	 */ -	status = request_irq(rt2x00dev->irq, -			     rt2x00dev->ops->lib->irq_handler, -			     IRQF_SHARED, rt2x00dev->name, rt2x00dev); -	if (status) { -		ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", -		      rt2x00dev->irq, status); -		goto exit; -	} - -	return 0; - -exit: -	queue_for_each(rt2x00dev, queue) -		rt2x00pci_free_queue_dma(rt2x00dev, queue); - -	return status; -} -EXPORT_SYMBOL_GPL(rt2x00pci_initialize); - -void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev) -{ -	struct data_queue *queue; - -	/* -	 * Free irq line. -	 */ -	free_irq(rt2x00dev->irq, rt2x00dev); - -	/* -	 * Free DMA -	 */ -	queue_for_each(rt2x00dev, queue) -		rt2x00pci_free_queue_dma(rt2x00dev, queue); -} -EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize); - -/*   * PCI driver handlers.   */  static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev) diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h index e2c99f2b9a1..60d90b20f8b 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.h +++ b/drivers/net/wireless/rt2x00/rt2x00pci.h @@ -36,94 +36,6 @@  #define PCI_DEVICE_DATA(__ops)	.driver_data = (kernel_ulong_t)(__ops)  /* - * Register access. - */ -static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev, -					   const unsigned int offset, -					   u32 *value) -{ -	*value = readl(rt2x00dev->csr.base + offset); -} - -static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev, -						const unsigned int offset, -						void *value, const u32 length) -{ -	memcpy_fromio(value, rt2x00dev->csr.base + offset, length); -} - -static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev, -					    const unsigned int offset, -					    u32 value) -{ -	writel(value, rt2x00dev->csr.base + offset); -} - -static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev, -						 const unsigned int offset, -						 const void *value, -						 const u32 length) -{ -	__iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2); -} - -/** - * rt2x00pci_regbusy_read - Read from register with busy check - * @rt2x00dev: Device pointer, see &struct rt2x00_dev. - * @offset: Register offset - * @field: Field to check if register is busy - * @reg: Pointer to where register contents should be stored - * - * This function will read the given register, and checks if the - * register is busy. If it is, it will sleep for a couple of - * microseconds before reading the register again. If the register - * is not read after a certain timeout, this function will return - * FALSE. - */ -int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, -			   const unsigned int offset, -			   const struct rt2x00_field32 field, -			   u32 *reg); - -/** - * struct queue_entry_priv_pci: Per entry PCI specific information - * - * @desc: Pointer to device descriptor - * @desc_dma: DMA pointer to &desc. - * @data: Pointer to device's entry memory. - * @data_dma: DMA pointer to &data. - */ -struct queue_entry_priv_pci { -	__le32 *desc; -	dma_addr_t desc_dma; -}; - -/** - * rt2x00pci_rxdone - Handle RX done events - * @rt2x00dev: Device pointer, see &struct rt2x00_dev. - * - * Returns true if there are still rx frames pending and false if all - * pending rx frames were processed. - */ -bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev); - -/** - * rt2x00pci_flush_queue - Flush data queue - * @queue: Data queue to stop - * @drop: True to drop all pending frames. - * - * This will wait for a maximum of 100ms, waiting for the queues - * to become empty. - */ -void rt2x00pci_flush_queue(struct data_queue *queue, bool drop); - -/* - * Device initialization handlers. - */ -int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev); -void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev); - -/*   * PCI driver handlers.   */  int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops); diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index f95792cfcf8..9e3c8ff53e3 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c @@ -35,6 +35,7 @@  #include <linux/eeprom_93cx6.h>  #include "rt2x00.h" +#include "rt2x00mmio.h"  #include "rt2x00pci.h"  #include "rt61pci.h" diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c index eef38cfd812..ca33ae19393 100644 --- a/drivers/nfc/microread/mei.c +++ b/drivers/nfc/microread/mei.c @@ -22,7 +22,7 @@  #include <linux/slab.h>  #include <linux/interrupt.h>  #include <linux/gpio.h> -#include <linux/mei_bus.h> +#include <linux/mei_cl_bus.h>  #include <linux/nfc.h>  #include <net/nfc/hci.h> @@ -32,9 +32,6 @@  #define MICROREAD_DRIVER_NAME "microread" -#define MICROREAD_UUID UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, 0x94, \ -			       0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c) -  struct mei_nfc_hdr {  	u8 cmd;  	u8 status; @@ -48,7 +45,7 @@ struct mei_nfc_hdr {  #define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD)  struct microread_mei_phy { -	struct mei_device *mei_device; +	struct mei_cl_device *device;  	struct nfc_hci_dev *hdev;  	int powered; @@ -105,14 +102,14 @@ static int microread_mei_write(void *phy_id, struct sk_buff *skb)  	MEI_DUMP_SKB_OUT("mei frame sent", skb); -	r = mei_send(phy->device, skb->data, skb->len); +	r = mei_cl_send(phy->device, skb->data, skb->len);  	if (r > 0)  		r = 0;  	return r;  } -static void microread_event_cb(struct mei_device *device, u32 events, +static void microread_event_cb(struct mei_cl_device *device, u32 events,  			       void *context)  {  	struct microread_mei_phy *phy = context; @@ -120,7 +117,7 @@ static void microread_event_cb(struct mei_device *device, u32 events,  	if (phy->hard_fault != 0)  		return; -	if (events & BIT(MEI_EVENT_RX)) { +	if (events & BIT(MEI_CL_EVENT_RX)) {  		struct sk_buff *skb;  		int reply_size; @@ -128,7 +125,7 @@ static void microread_event_cb(struct mei_device *device, u32 events,  		if (!skb)  			return; -		reply_size = mei_recv(device, skb->data, MEI_NFC_MAX_READ); +		reply_size = mei_cl_recv(device, skb->data, MEI_NFC_MAX_READ);  		if (reply_size < MEI_NFC_HEADER_SIZE) {  			kfree(skb);  			return; @@ -149,8 +146,8 @@ static struct nfc_phy_ops mei_phy_ops = {  	.disable = microread_mei_disable,  }; -static int microread_mei_probe(struct mei_device *device, -			       const struct mei_id *id) +static int microread_mei_probe(struct mei_cl_device *device, +			       const struct mei_cl_device_id *id)  {  	struct microread_mei_phy *phy;  	int r; @@ -164,9 +161,9 @@ static int microread_mei_probe(struct mei_device *device,  	}  	phy->device = device; -	mei_set_clientdata(device, phy); +	mei_cl_set_drvdata(device, phy); -	r = mei_register_event_cb(device, microread_event_cb, phy); +	r = mei_cl_register_event_cb(device, microread_event_cb, phy);  	if (r) {  		pr_err(MICROREAD_DRIVER_NAME ": event cb registration failed\n");  		goto err_out; @@ -186,9 +183,9 @@ err_out:  	return r;  } -static int microread_mei_remove(struct mei_device *device) +static int microread_mei_remove(struct mei_cl_device *device)  { -	struct microread_mei_phy *phy = mei_get_clientdata(device); +	struct microread_mei_phy *phy = mei_cl_get_drvdata(device);  	pr_info("Removing microread\n"); @@ -202,16 +199,15 @@ static int microread_mei_remove(struct mei_device *device)  	return 0;  } -static struct mei_id microread_mei_tbl[] = { -	{ MICROREAD_DRIVER_NAME, MICROREAD_UUID }, +static struct mei_cl_device_id microread_mei_tbl[] = { +	{ MICROREAD_DRIVER_NAME },  	/* required last entry */  	{ }  }; -  MODULE_DEVICE_TABLE(mei, microread_mei_tbl); -static struct mei_driver microread_driver = { +static struct mei_cl_driver microread_driver = {  	.id_table = microread_mei_tbl,  	.name = MICROREAD_DRIVER_NAME, @@ -225,7 +221,7 @@ static int microread_mei_init(void)  	pr_debug(DRIVER_DESC ": %s\n", __func__); -	r = mei_driver_register(µread_driver); +	r = mei_cl_driver_register(µread_driver);  	if (r) {  		pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n");  		return r; @@ -236,7 +232,7 @@ static int microread_mei_init(void)  static void microread_mei_exit(void)  { -	mei_driver_unregister(µread_driver); +	mei_cl_driver_unregister(µread_driver);  }  module_init(microread_mei_init); diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index dee5dddaa29..5147c210df5 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -53,14 +53,15 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)  		return;  	} -	if (!pci_dev->pm_cap || !pci_dev->pme_support -	     || pci_check_pme_status(pci_dev)) { -		if (pci_dev->pme_poll) -			pci_dev->pme_poll = false; +	/* Clear PME Status if set. */ +	if (pci_dev->pme_support) +		pci_check_pme_status(pci_dev); -		pci_wakeup_event(pci_dev); -		pm_runtime_resume(&pci_dev->dev); -	} +	if (pci_dev->pme_poll) +		pci_dev->pme_poll = false; + +	pci_wakeup_event(pci_dev); +	pm_runtime_resume(&pci_dev->dev);  	if (pci_dev->subordinate)  		pci_pme_wakeup_bus(pci_dev->subordinate); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 1fa1e482a99..79277fb36c6 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -390,9 +390,10 @@ static void pci_device_shutdown(struct device *dev)  	/*  	 * Turn off Bus Master bit on the device to tell it to not -	 * continue to do DMA +	 * continue to do DMA. Don't touch devices in D3cold or unknown states.  	 */ -	pci_clear_master(pci_dev); +	if (pci_dev->current_state <= PCI_D3hot) +		pci_clear_master(pci_dev);  }  #ifdef CONFIG_PM diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 08c243ab034..ed4d0949833 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -185,14 +185,6 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {  #endif /* !PM */  /* - * PCIe port runtime suspend is broken for some chipsets, so use a - * black list to disable runtime PM for these chipsets. - */ -static const struct pci_device_id port_runtime_pm_black_list[] = { -	{ /* end: all zeroes */ } -}; - -/*   * pcie_portdrv_probe - Probe PCI-Express port devices   * @dev: PCI-Express port device being probed   * @@ -225,16 +217,11 @@ static int pcie_portdrv_probe(struct pci_dev *dev,  	 * it by default.  	 */  	dev->d3cold_allowed = false; -	if (!pci_match_id(port_runtime_pm_black_list, dev)) -		pm_runtime_put_noidle(&dev->dev); -  	return 0;  }  static void pcie_portdrv_remove(struct pci_dev *dev)  { -	if (!pci_match_id(port_runtime_pm_black_list, dev)) -		pm_runtime_get_noresume(&dev->dev);  	pcie_port_device_remove(dev);  	pci_disable_device(dev);  } diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index b41ac7756a4..c5d0a08a874 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c @@ -100,27 +100,6 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)  	return min((size_t)(image - rom), size);  } -static loff_t pci_find_rom(struct pci_dev *pdev, size_t *size) -{ -	struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; -	loff_t start; - -	/* assign the ROM an address if it doesn't have one */ -	if (res->parent == NULL && pci_assign_resource(pdev, PCI_ROM_RESOURCE)) -		return 0; -	start = pci_resource_start(pdev, PCI_ROM_RESOURCE); -	*size = pci_resource_len(pdev, PCI_ROM_RESOURCE); - -	if (*size == 0) -		return 0; - -	/* Enable ROM space decodes */ -	if (pci_enable_rom(pdev)) -		return 0; - -	return start; -} -  /**   * pci_map_rom - map a PCI ROM to kernel space   * @pdev: pointer to pci device struct @@ -135,7 +114,7 @@ static loff_t pci_find_rom(struct pci_dev *pdev, size_t *size)  void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)  {  	struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; -	loff_t start = 0; +	loff_t start;  	void __iomem *rom;  	/* @@ -154,21 +133,21 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)  			return (void __iomem *)(unsigned long)  				pci_resource_start(pdev, PCI_ROM_RESOURCE);  		} else { -			start = pci_find_rom(pdev, size); -		} -	} +			/* assign the ROM an address if it doesn't have one */ +			if (res->parent == NULL && +			    pci_assign_resource(pdev,PCI_ROM_RESOURCE)) +				return NULL; +			start = pci_resource_start(pdev, PCI_ROM_RESOURCE); +			*size = pci_resource_len(pdev, PCI_ROM_RESOURCE); +			if (*size == 0) +				return NULL; -	/* -	 * Some devices may provide ROMs via a source other than the BAR -	 */ -	if (!start && pdev->rom && pdev->romlen) { -		*size = pdev->romlen; -		return phys_to_virt(pdev->rom); +			/* Enable ROM space decodes */ +			if (pci_enable_rom(pdev)) +				return NULL; +		}  	} -	if (!start) -		return NULL; -  	rom = ioremap(start, *size);  	if (!rom) {  		/* restore enable if ioremap fails */ @@ -202,8 +181,7 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)  	if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY))  		return; -	if (!pdev->rom || !pdev->romlen) -		iounmap(rom); +	iounmap(rom);  	/* Disable again before continuing, leave enabled if pci=rom */  	if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) @@ -227,7 +205,24 @@ void pci_cleanup_rom(struct pci_dev *pdev)  	}  } +/** + * pci_platform_rom - provides a pointer to any ROM image provided by the + * platform + * @pdev: pointer to pci device struct + * @size: pointer to receive size of pci window over ROM + */ +void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size) +{ +	if (pdev->rom && pdev->romlen) { +		*size = pdev->romlen; +		return phys_to_virt((phys_addr_t)pdev->rom); +	} + +	return NULL; +} +  EXPORT_SYMBOL(pci_map_rom);  EXPORT_SYMBOL(pci_unmap_rom);  EXPORT_SYMBOL_GPL(pci_enable_rom);  EXPORT_SYMBOL_GPL(pci_disable_rom); +EXPORT_SYMBOL(pci_platform_rom); diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 45cacf79f3a..1a779bbfb87 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -134,7 +134,6 @@ static const struct key_entry hp_wmi_keymap[] = {  	{ KE_KEY, 0x2142, { KEY_MEDIA } },  	{ KE_KEY, 0x213b, { KEY_INFO } },  	{ KE_KEY, 0x2169, { KEY_DIRECTION } }, -	{ KE_KEY, 0x216a, { KEY_SETUP } },  	{ KE_KEY, 0x231b, { KEY_HELP } },  	{ KE_END, 0 }  }; @@ -925,9 +924,6 @@ static int __init hp_wmi_init(void)  		err = hp_wmi_input_setup();  		if (err)  			return err; -		 -		//Enable magic for hotkeys that run on the SMBus -		ec_write(0xe6,0x6e);  	}  	if (bios_capable) { diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 9a907567f41..edec135b168 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -1964,9 +1964,6 @@ struct tp_nvram_state {  /* kthread for the hotkey poller */  static struct task_struct *tpacpi_hotkey_task; -/* Acquired while the poller kthread is running, use to sync start/stop */ -static struct mutex hotkey_thread_mutex; -  /*   * Acquire mutex to write poller control variables as an   * atomic block. @@ -2462,8 +2459,6 @@ static int hotkey_kthread(void *data)  	unsigned int poll_freq;  	bool was_frozen; -	mutex_lock(&hotkey_thread_mutex); -  	if (tpacpi_lifecycle == TPACPI_LIFE_EXITING)  		goto exit; @@ -2523,7 +2518,6 @@ static int hotkey_kthread(void *data)  	}  exit: -	mutex_unlock(&hotkey_thread_mutex);  	return 0;  } @@ -2533,9 +2527,6 @@ static void hotkey_poll_stop_sync(void)  	if (tpacpi_hotkey_task) {  		kthread_stop(tpacpi_hotkey_task);  		tpacpi_hotkey_task = NULL; -		mutex_lock(&hotkey_thread_mutex); -		/* at this point, the thread did exit */ -		mutex_unlock(&hotkey_thread_mutex);  	}  } @@ -3234,7 +3225,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)  	mutex_init(&hotkey_mutex);  #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL -	mutex_init(&hotkey_thread_mutex);  	mutex_init(&hotkey_thread_data_mutex);  #endif diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index cc1f7bf53fd..c6d77e20622 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -4,7 +4,7 @@ menu "Remoteproc drivers"  config REMOTEPROC  	tristate  	depends on HAS_DMA -	select FW_CONFIG +	select FW_LOADER  	select VIRTIO  config OMAP_REMOTEPROC diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 29387df4bfc..8edb4aed5d3 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -217,7 +217,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)  	 * TODO: support predefined notifyids (via resource table)  	 */  	ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL); -	if (ret) { +	if (ret < 0) {  		dev_err(dev, "idr_alloc failed: %d\n", ret);  		dma_free_coherent(dev->parent, size, va, dma);  		return ret; @@ -366,10 +366,12 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,  	/* it is now safe to add the virtio device */  	ret = rproc_add_virtio_dev(rvdev, rsc->id);  	if (ret) -		goto free_rvdev; +		goto remove_rvdev;  	return 0; +remove_rvdev: +	list_del(&rvdev->node);  free_rvdev:  	kfree(rvdev);  	return ret; diff --git a/drivers/remoteproc/ste_modem_rproc.c b/drivers/remoteproc/ste_modem_rproc.c index a7743c06933..fb95c422005 100644 --- a/drivers/remoteproc/ste_modem_rproc.c +++ b/drivers/remoteproc/ste_modem_rproc.c @@ -240,6 +240,8 @@ static int sproc_drv_remove(struct platform_device *pdev)  	/* Unregister as remoteproc device */  	rproc_del(sproc->rproc); +	dma_free_coherent(sproc->rproc->dev.parent, SPROC_FW_SIZE, +			  sproc->fw_addr, sproc->fw_dma_addr);  	rproc_put(sproc->rproc);  	mdev->drv_data = NULL; @@ -297,10 +299,13 @@ static int sproc_probe(struct platform_device *pdev)  	/* Register as a remoteproc device */  	err = rproc_add(rproc);  	if (err) -		goto free_rproc; +		goto free_mem;  	return 0; +free_mem: +	dma_free_coherent(rproc->dev.parent, SPROC_FW_SIZE, +			  sproc->fw_addr, sproc->fw_dma_addr);  free_rproc:  	/* Reset device data upon error */  	mdev->drv_data = NULL; diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index 0a9f27e094e..434ebc3a99d 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c @@ -44,7 +44,6 @@ static DECLARE_COMPLETION(at91_rtc_updated);  static unsigned int at91_alarm_year = AT91_RTC_EPOCH;  static void __iomem *at91_rtc_regs;  static int irq; -static u32 at91_rtc_imr;  /*   * Decode time/date into rtc_time structure @@ -109,11 +108,9 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)  	cr = at91_rtc_read(AT91_RTC_CR);  	at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM); -	at91_rtc_imr |= AT91_RTC_ACKUPD;  	at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD);  	wait_for_completion(&at91_rtc_updated);	/* wait for ACKUPD interrupt */  	at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD); -	at91_rtc_imr &= ~AT91_RTC_ACKUPD;  	at91_rtc_write(AT91_RTC_TIMR,  			  bin2bcd(tm->tm_sec) << 0 @@ -145,7 +142,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)  	tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);  	tm->tm_year = at91_alarm_year - 1900; -	alrm->enabled = (at91_rtc_imr & AT91_RTC_ALARM) +	alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM)  			? 1 : 0;  	dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, @@ -171,7 +168,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)  	tm.tm_sec = alrm->time.tm_sec;  	at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); -	at91_rtc_imr &= ~AT91_RTC_ALARM;  	at91_rtc_write(AT91_RTC_TIMALR,  		  bin2bcd(tm.tm_sec) << 0  		| bin2bcd(tm.tm_min) << 8 @@ -184,7 +180,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)  	if (alrm->enabled) {  		at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); -		at91_rtc_imr |= AT91_RTC_ALARM;  		at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);  	} @@ -201,12 +196,9 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)  	if (enabled) {  		at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); -		at91_rtc_imr |= AT91_RTC_ALARM;  		at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); -	} else { +	} else  		at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); -		at91_rtc_imr &= ~AT91_RTC_ALARM; -	}  	return 0;  } @@ -215,10 +207,12 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)   */  static int at91_rtc_proc(struct device *dev, struct seq_file *seq)  { +	unsigned long imr = at91_rtc_read(AT91_RTC_IMR); +  	seq_printf(seq, "update_IRQ\t: %s\n", -			(at91_rtc_imr & AT91_RTC_ACKUPD) ? "yes" : "no"); +			(imr & AT91_RTC_ACKUPD) ? "yes" : "no");  	seq_printf(seq, "periodic_IRQ\t: %s\n", -			(at91_rtc_imr & AT91_RTC_SECEV) ? "yes" : "no"); +			(imr & AT91_RTC_SECEV) ? "yes" : "no");  	return 0;  } @@ -233,7 +227,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)  	unsigned int rtsr;  	unsigned long events = 0; -	rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_imr; +	rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR);  	if (rtsr) {		/* this interrupt is shared!  Is it ours? */  		if (rtsr & AT91_RTC_ALARM)  			events |= (RTC_AF | RTC_IRQF); @@ -297,7 +291,6 @@ static int __init at91_rtc_probe(struct platform_device *pdev)  	at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |  					AT91_RTC_SECEV | AT91_RTC_TIMEV |  					AT91_RTC_CALEV); -	at91_rtc_imr = 0;  	ret = request_irq(irq, at91_rtc_interrupt,  				IRQF_SHARED, @@ -336,7 +329,6 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)  	at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |  					AT91_RTC_SECEV | AT91_RTC_TIMEV |  					AT91_RTC_CALEV); -	at91_rtc_imr = 0;  	free_irq(irq, pdev);  	rtc_device_unregister(rtc); @@ -349,35 +341,31 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)  /* AT91RM9200 RTC Power management control */ -static u32 at91_rtc_bkpimr; - +static u32 at91_rtc_imr;  static int at91_rtc_suspend(struct device *dev)  {  	/* this IRQ is shared with DBGU and other hardware which isn't  	 * necessarily doing PM like we are...  	 */ -	at91_rtc_bkpimr = at91_rtc_imr & (AT91_RTC_ALARM|AT91_RTC_SECEV); -	if (at91_rtc_bkpimr) { -		if (device_may_wakeup(dev)) { +	at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR) +			& (AT91_RTC_ALARM|AT91_RTC_SECEV); +	if (at91_rtc_imr) { +		if (device_may_wakeup(dev))  			enable_irq_wake(irq); -		} else { -			at91_rtc_write(AT91_RTC_IDR, at91_rtc_bkpimr); -			at91_rtc_imr &= ~at91_rtc_bkpimr; -		} -} +		else +			at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr); +	}  	return 0;  }  static int at91_rtc_resume(struct device *dev)  { -	if (at91_rtc_bkpimr) { -		if (device_may_wakeup(dev)) { +	if (at91_rtc_imr) { +		if (device_may_wakeup(dev))  			disable_irq_wake(irq); -		} else { -			at91_rtc_imr |= at91_rtc_bkpimr; -			at91_rtc_write(AT91_RTC_IER, at91_rtc_bkpimr); -		} +		else +			at91_rtc_write(AT91_RTC_IER, at91_rtc_imr);  	}  	return 0;  } diff --git a/drivers/rtc/rtc-at91rm9200.h b/drivers/rtc/rtc-at91rm9200.h index 5f940b6844c..da1945e5f71 100644 --- a/drivers/rtc/rtc-at91rm9200.h +++ b/drivers/rtc/rtc-at91rm9200.h @@ -64,6 +64,7 @@  #define	AT91_RTC_SCCR		0x1c			/* Status Clear Command Register */  #define	AT91_RTC_IER		0x20			/* Interrupt Enable Register */  #define	AT91_RTC_IDR		0x24			/* Interrupt Disable Register */ +#define	AT91_RTC_IMR		0x28			/* Interrupt Mask Register */  #define	AT91_RTC_VER		0x2c			/* Valid Entry Register */  #define		AT91_RTC_NVTIM		(1 <<  0)		/* Non valid Time */ diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 5ac9c935c15..e9b9c839283 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -307,7 +307,7 @@ static void scm_blk_handle_error(struct scm_request *scmrq)  	case EQC_WR_PROHIBIT:  		spin_lock_irqsave(&bdev->lock, flags);  		if (bdev->state != SCM_WR_PROHIBIT) -			pr_info("%lu: Write access to the SCM increment is suspended\n", +			pr_info("%lx: Write access to the SCM increment is suspended\n",  				(unsigned long) bdev->scmdev->address);  		bdev->state = SCM_WR_PROHIBIT;  		spin_unlock_irqrestore(&bdev->lock, flags); @@ -445,7 +445,7 @@ void scm_blk_set_available(struct scm_blk_dev *bdev)  	spin_lock_irqsave(&bdev->lock, flags);  	if (bdev->state == SCM_WR_PROHIBIT) -		pr_info("%lu: Write access to the SCM increment is restored\n", +		pr_info("%lx: Write access to the SCM increment is restored\n",  			(unsigned long) bdev->scmdev->address);  	bdev->state = SCM_OPER;  	spin_unlock_irqrestore(&bdev->lock, flags); @@ -463,12 +463,15 @@ static int __init scm_blk_init(void)  		goto out;  	scm_major = ret; -	if (scm_alloc_rqs(nr_requests)) +	ret = scm_alloc_rqs(nr_requests); +	if (ret)  		goto out_unreg;  	scm_debug = debug_register("scm_log", 16, 1, 16); -	if (!scm_debug) +	if (!scm_debug) { +		ret = -ENOMEM;  		goto out_free; +	}  	debug_register_view(scm_debug, &debug_hex_ascii_view);  	debug_set_level(scm_debug, 2); diff --git a/drivers/s390/block/scm_drv.c b/drivers/s390/block/scm_drv.c index 5f6180d6ff0..c98cf52d78d 100644 --- a/drivers/s390/block/scm_drv.c +++ b/drivers/s390/block/scm_drv.c @@ -19,7 +19,7 @@ static void scm_notify(struct scm_device *scmdev, enum scm_event event)  	switch (event) {  	case SCM_CHANGE: -		pr_info("%lu: The capabilities of the SCM increment changed\n", +		pr_info("%lx: The capabilities of the SCM increment changed\n",  			(unsigned long) scmdev->address);  		SCM_LOG(2, "State changed");  		SCM_LOG_STATE(2, scmdev); diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index b907dba2402..cee69dac3e1 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -915,7 +915,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)  	int i, rc;  	/* Check if the tty3270 is already there. */ -	view = raw3270_find_view(&tty3270_fn, tty->index); +	view = raw3270_find_view(&tty3270_fn, tty->index + RAW3270_FIRSTMINOR);  	if (!IS_ERR(view)) {  		tp = container_of(view, struct tty3270, view);  		tty->driver_data = tp; @@ -927,15 +927,16 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)  		tp->inattr = TF_INPUT;  		return tty_port_install(&tp->port, driver, tty);  	} -	if (tty3270_max_index < tty->index) -		tty3270_max_index = tty->index; +	if (tty3270_max_index < tty->index + 1) +		tty3270_max_index = tty->index + 1;  	/* Allocate tty3270 structure on first open. */  	tp = tty3270_alloc_view();  	if (IS_ERR(tp))  		return PTR_ERR(tp); -	rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index); +	rc = raw3270_add_view(&tp->view, &tty3270_fn, +			      tty->index + RAW3270_FIRSTMINOR);  	if (rc) {  		tty3270_free_view(tp);  		return rc; @@ -1846,12 +1847,12 @@ static const struct tty_operations tty3270_ops = {  void tty3270_create_cb(int minor)  { -	tty_register_device(tty3270_driver, minor, NULL); +	tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL);  }  void tty3270_destroy_cb(int minor)  { -	tty_unregister_device(tty3270_driver, minor); +	tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR);  }  struct raw3270_notifier tty3270_notifier = @@ -1884,7 +1885,8 @@ static int __init tty3270_init(void)  	driver->driver_name = "tty3270";  	driver->name = "3270/tty";  	driver->major = IBM_TTY3270_MAJOR; -	driver->minor_start = 0; +	driver->minor_start = RAW3270_FIRSTMINOR; +	driver->name_base = RAW3270_FIRSTMINOR;  	driver->type = TTY_DRIVER_TYPE_SYSTEM;  	driver->subtype = SYSTEM_TYPE_TTY;  	driver->init_termios = tty_std_termios; diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 8c0622399fc..6ccb7457746 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -769,6 +769,7 @@ struct qeth_card {  	unsigned long thread_start_mask;  	unsigned long thread_allowed_mask;  	unsigned long thread_running_mask; +	struct task_struct *recovery_task;  	spinlock_t ip_lock;  	struct list_head ip_list;  	struct list_head *ip_tbd_list; @@ -862,6 +863,8 @@ extern struct qeth_card_list_struct qeth_core_card_list;  extern struct kmem_cache *qeth_core_header_cache;  extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS]; +void qeth_set_recovery_task(struct qeth_card *); +void qeth_clear_recovery_task(struct qeth_card *);  void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);  int qeth_threads_running(struct qeth_card *, unsigned long);  int qeth_wait_for_threads(struct qeth_card *, unsigned long); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 0d73a999983..451f9202059 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -177,6 +177,23 @@ const char *qeth_get_cardname_short(struct qeth_card *card)  	return "n/a";  } +void qeth_set_recovery_task(struct qeth_card *card) +{ +	card->recovery_task = current; +} +EXPORT_SYMBOL_GPL(qeth_set_recovery_task); + +void qeth_clear_recovery_task(struct qeth_card *card) +{ +	card->recovery_task = NULL; +} +EXPORT_SYMBOL_GPL(qeth_clear_recovery_task); + +static bool qeth_is_recovery_task(const struct qeth_card *card) +{ +	return card->recovery_task == current; +} +  void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,  			 int clear_start_mask)  { @@ -205,6 +222,8 @@ EXPORT_SYMBOL_GPL(qeth_threads_running);  int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)  { +	if (qeth_is_recovery_task(card)) +		return 0;  	return wait_event_interruptible(card->wait_q,  			qeth_threads_running(card, threads) == 0);  } diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index d690166efea..155b101bd73 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1143,6 +1143,7 @@ static int qeth_l2_recover(void *ptr)  	QETH_CARD_TEXT(card, 2, "recover2");  	dev_warn(&card->gdev->dev,  		"A recovery process has been started for the device\n"); +	qeth_set_recovery_task(card);  	__qeth_l2_set_offline(card->gdev, 1);  	rc = __qeth_l2_set_online(card->gdev, 1);  	if (!rc) @@ -1153,6 +1154,7 @@ static int qeth_l2_recover(void *ptr)  		dev_warn(&card->gdev->dev, "The qeth device driver "  				"failed to recover an error on the device\n");  	} +	qeth_clear_recovery_task(card);  	qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);  	qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);  	return 0; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 8710337dab3..1f7edf1b26c 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -3515,6 +3515,7 @@ static int qeth_l3_recover(void *ptr)  	QETH_CARD_TEXT(card, 2, "recover2");  	dev_warn(&card->gdev->dev,  		"A recovery process has been started for the device\n"); +	qeth_set_recovery_task(card);  	__qeth_l3_set_offline(card->gdev, 1);  	rc = __qeth_l3_set_online(card->gdev, 1);  	if (!rc) @@ -3525,6 +3526,7 @@ static int qeth_l3_recover(void *ptr)  		dev_warn(&card->gdev->dev, "The qeth device driver "  				"failed to recover an error on the device\n");  	} +	qeth_clear_recovery_task(card);  	qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);  	qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);  	return 0; diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c index 1a9d1e3ce64..c1441ed282e 100644 --- a/drivers/sbus/char/bbc_i2c.c +++ b/drivers/sbus/char/bbc_i2c.c @@ -282,7 +282,7 @@ static irqreturn_t bbc_i2c_interrupt(int irq, void *dev_id)  	return IRQ_HANDLED;  } -static void __init reset_one_i2c(struct bbc_i2c_bus *bp) +static void reset_one_i2c(struct bbc_i2c_bus *bp)  {  	writeb(I2C_PCF_PIN, bp->i2c_control_regs + 0x0);  	writeb(bp->own, bp->i2c_control_regs + 0x1); @@ -291,7 +291,7 @@ static void __init reset_one_i2c(struct bbc_i2c_bus *bp)  	writeb(I2C_PCF_IDLE, bp->i2c_control_regs + 0x0);  } -static struct bbc_i2c_bus * __init attach_one_i2c(struct platform_device *op, int index) +static struct bbc_i2c_bus * attach_one_i2c(struct platform_device *op, int index)  {  	struct bbc_i2c_bus *bp;  	struct device_node *dp; diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 2daf4b0da43..90bc7bd0096 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -940,6 +940,7 @@ static int bnx2fc_libfc_config(struct fc_lport *lport)  	fc_exch_init(lport);  	fc_rport_init(lport);  	fc_disc_init(lport); +	fc_disc_config(lport, lport);  	return 0;  } @@ -2133,6 +2134,7 @@ static int _bnx2fc_create(struct net_device *netdev,  	}  	ctlr = bnx2fc_to_ctlr(interface); +	cdev = fcoe_ctlr_to_ctlr_dev(ctlr);  	interface->vlan_id = vlan_id;  	interface->timer_work_queue = @@ -2143,7 +2145,7 @@ static int _bnx2fc_create(struct net_device *netdev,  		goto ifput_err;  	} -	lport = bnx2fc_if_create(interface, &interface->hba->pcidev->dev, 0); +	lport = bnx2fc_if_create(interface, &cdev->dev, 0);  	if (!lport) {  		printk(KERN_ERR PFX "Failed to create interface (%s)\n",  			netdev->name); @@ -2159,8 +2161,6 @@ static int _bnx2fc_create(struct net_device *netdev,  	/* Make this master N_port */  	ctlr->lp = lport; -	cdev = fcoe_ctlr_to_ctlr_dev(ctlr); -  	if (link_state == BNX2FC_CREATE_LINK_UP)  		cdev->enabled = FCOE_CTLR_ENABLED;  	else diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index b5d92fc93c7..9bfdc9a3f89 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -490,7 +490,6 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)  {  	struct net_device *netdev = fcoe->netdev;  	struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); -	struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);  	rtnl_lock();  	if (!fcoe->removed) @@ -501,7 +500,6 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)  	/* tear-down the FCoE controller */  	fcoe_ctlr_destroy(fip);  	scsi_host_put(fip->lp->host); -	fcoe_ctlr_device_delete(ctlr_dev);  	dev_put(netdev);  	module_put(THIS_MODULE);  } @@ -2194,6 +2192,8 @@ out_nodev:   */  static void fcoe_destroy_work(struct work_struct *work)  { +	struct fcoe_ctlr_device *cdev; +	struct fcoe_ctlr *ctlr;  	struct fcoe_port *port;  	struct fcoe_interface *fcoe;  	struct Scsi_Host *shost; @@ -2224,10 +2224,15 @@ static void fcoe_destroy_work(struct work_struct *work)  	mutex_lock(&fcoe_config_mutex);  	fcoe = port->priv; +	ctlr = fcoe_to_ctlr(fcoe); +	cdev = fcoe_ctlr_to_ctlr_dev(ctlr); +  	fcoe_if_destroy(port->lport);  	fcoe_interface_cleanup(fcoe);  	mutex_unlock(&fcoe_config_mutex); + +	fcoe_ctlr_device_delete(cdev);  }  /** @@ -2335,7 +2340,9 @@ static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode,  		rc = -EIO;  		rtnl_unlock();  		fcoe_interface_cleanup(fcoe); -		goto out_nortnl; +		mutex_unlock(&fcoe_config_mutex); +		fcoe_ctlr_device_delete(ctlr_dev); +		goto out;  	}  	/* Make this the "master" N_Port */ @@ -2375,8 +2382,8 @@ static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode,  out_nodev:  	rtnl_unlock(); -out_nortnl:  	mutex_unlock(&fcoe_config_mutex); +out:  	return rc;  } diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 08c3bc398da..a76247201be 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -2815,6 +2815,47 @@ unlock:  }  /** + * fcoe_ctlr_mode_set() - Set or reset the ctlr's mode + * @lport: The local port to be (re)configured + * @fip:   The FCoE controller whose mode is changing + * @fip_mode: The new fip mode + * + * Note that the we shouldn't be changing the libfc discovery settings + * (fc_disc_config) while an lport is going through the libfc state + * machine. The mode can only be changed when a fcoe_ctlr device is + * disabled, so that should ensure that this routine is only called + * when nothing is happening. + */ +void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip, +			enum fip_state fip_mode) +{ +	void *priv; + +	WARN_ON(lport->state != LPORT_ST_RESET && +		lport->state != LPORT_ST_DISABLED); + +	if (fip_mode == FIP_MODE_VN2VN) { +		lport->rport_priv_size = sizeof(struct fcoe_rport); +		lport->point_to_multipoint = 1; +		lport->tt.disc_recv_req = fcoe_ctlr_disc_recv; +		lport->tt.disc_start = fcoe_ctlr_disc_start; +		lport->tt.disc_stop = fcoe_ctlr_disc_stop; +		lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final; +		priv = fip; +	} else { +		lport->rport_priv_size = 0; +		lport->point_to_multipoint = 0; +		lport->tt.disc_recv_req = NULL; +		lport->tt.disc_start = NULL; +		lport->tt.disc_stop = NULL; +		lport->tt.disc_stop_final = NULL; +		priv = lport; +	} + +	fc_disc_config(lport, priv); +} + +/**   * fcoe_libfc_config() - Sets up libfc related properties for local port   * @lport:    The local port to configure libfc for   * @fip:      The FCoE controller in use by the local port @@ -2833,21 +2874,9 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip,  	fc_exch_init(lport);  	fc_elsct_init(lport);  	fc_lport_init(lport); -	if (fip->mode == FIP_MODE_VN2VN) -		lport->rport_priv_size = sizeof(struct fcoe_rport);  	fc_rport_init(lport); -	if (fip->mode == FIP_MODE_VN2VN) { -		lport->point_to_multipoint = 1; -		lport->tt.disc_recv_req = fcoe_ctlr_disc_recv; -		lport->tt.disc_start = fcoe_ctlr_disc_start; -		lport->tt.disc_stop = fcoe_ctlr_disc_stop; -		lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final; -		mutex_init(&lport->disc.disc_mutex); -		INIT_LIST_HEAD(&lport->disc.rports); -		lport->disc.priv = fip; -	} else { -		fc_disc_init(lport); -	} +	fc_disc_init(lport); +	fcoe_ctlr_mode_set(lport, fip, fip->mode);  	return 0;  }  EXPORT_SYMBOL_GPL(fcoe_libfc_config); @@ -2875,6 +2904,7 @@ EXPORT_SYMBOL(fcoe_fcf_get_selected);  void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev)  {  	struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); +	struct fc_lport *lport = ctlr->lp;  	mutex_lock(&ctlr->ctlr_mutex);  	switch (ctlr_dev->mode) { @@ -2888,5 +2918,7 @@ void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev)  	}  	mutex_unlock(&ctlr->ctlr_mutex); + +	fcoe_ctlr_mode_set(lport, ctlr, ctlr->mode);  }  EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode); diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index a044f593e8b..d0fa4b6c551 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -1899,8 +1899,8 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)  		sdev->allow_restart = 1;  		blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);  	} -	scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);  	spin_unlock_irqrestore(shost->host_lock, lock_flags); +	scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);  	return 0;  } diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index f328089a106..2197b57fb22 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -5148,7 +5148,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)  		ipr_trace;  	} -	list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); +	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);  	if (!ipr_is_naca_model(res))  		res->needs_sync_complete = 1; @@ -9349,7 +9349,10 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)  	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); -	rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); +	if (ioa_cfg->intr_flag == IPR_USE_MSIX) +		rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg); +	else +		rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);  	if (rc) {  		dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);  		return rc; @@ -9371,7 +9374,10 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); -	free_irq(pdev->irq, ioa_cfg); +	if (ioa_cfg->intr_flag == IPR_USE_MSIX) +		free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg); +	else +		free_irq(pdev->irq, ioa_cfg);  	LEAVE; @@ -9722,6 +9728,7 @@ static void __ipr_remove(struct pci_dev *pdev)  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);  	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);  	flush_work(&ioa_cfg->work_q); +	INIT_LIST_HEAD(&ioa_cfg->used_res_q);  	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);  	spin_lock(&ipr_driver_lock); diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 8e561e6a557..880a9068ca1 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -712,12 +712,13 @@ static void fc_disc_stop_final(struct fc_lport *lport)  }  /** - * fc_disc_init() - Initialize the discovery layer for a local port - * @lport: The local port that needs the discovery layer to be initialized + * fc_disc_config() - Configure the discovery layer for a local port + * @lport: The local port that needs the discovery layer to be configured + * @priv: Private data structre for users of the discovery layer   */ -int fc_disc_init(struct fc_lport *lport) +void fc_disc_config(struct fc_lport *lport, void *priv)  { -	struct fc_disc *disc; +	struct fc_disc *disc = &lport->disc;  	if (!lport->tt.disc_start)  		lport->tt.disc_start = fc_disc_start; @@ -732,12 +733,21 @@ int fc_disc_init(struct fc_lport *lport)  		lport->tt.disc_recv_req = fc_disc_recv_req;  	disc = &lport->disc; + +	disc->priv = priv; +} +EXPORT_SYMBOL(fc_disc_config); + +/** + * fc_disc_init() - Initialize the discovery layer for a local port + * @lport: The local port that needs the discovery layer to be initialized + */ +void fc_disc_init(struct fc_lport *lport) +{ +	struct fc_disc *disc = &lport->disc; +  	INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);  	mutex_init(&disc->disc_mutex);  	INIT_LIST_HEAD(&disc->rports); - -	disc->priv = lport; - -	return 0;  }  EXPORT_SYMBOL(fc_disc_init); diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index aec2e0da501..55cbd018015 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -235,6 +235,17 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)  	linkrate  = phy->linkrate;  	memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); +	/* Handle vacant phy - rest of dr data is not valid so skip it */ +	if (phy->phy_state == PHY_VACANT) { +		memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); +		phy->attached_dev_type = NO_DEVICE; +		if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) { +			phy->phy_id = phy_id; +			goto skip; +		} else +			goto out; +	} +  	phy->attached_dev_type = to_dev_type(dr);  	if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))  		goto out; @@ -272,6 +283,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)  	phy->phy->maximum_linkrate = dr->pmax_linkrate;  	phy->phy->negotiated_linkrate = phy->linkrate; + skip:  	if (new_phy)  		if (sas_phy_add(phy->phy)) {  			sas_phy_free(phy->phy); @@ -388,7 +400,7 @@ int sas_ex_phy_discover(struct domain_device *dev, int single)  	if (!disc_req)  		return -ENOMEM; -	disc_resp = alloc_smp_req(DISCOVER_RESP_SIZE); +	disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);  	if (!disc_resp) {  		kfree(disc_req);  		return -ENOMEM; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 74b67d98e95..d43faf34c1e 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -438,11 +438,12 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,  	struct lpfc_rqe *temp_hrqe;  	struct lpfc_rqe *temp_drqe;  	struct lpfc_register doorbell; -	int put_index = hq->host_index; +	int put_index;  	/* sanity check on queue memory */  	if (unlikely(!hq) || unlikely(!dq))  		return -ENOMEM; +	put_index = hq->host_index;  	temp_hrqe = hq->qe[hq->host_index].rqe;  	temp_drqe = dq->qe[dq->host_index].rqe; diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 1d82eef4e1e..b3db9dcc261 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1938,11 +1938,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)  		    "Timer for the VP[%d] has stopped\n", vha->vp_idx);  	} -	/* No pending activities shall be there on the vha now */ -	if (ql2xextended_error_logging & ql_dbg_user) -		msleep(random32()%10);  /* Just to see if something falls on -					* the net we have placed below */ -  	BUG_ON(atomic_read(&vha->vref_count));  	qla2x00_free_fcports(vha); diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 1626de52e32..fbc305f1c87 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -15,6 +15,7 @@   * | Mailbox commands             |       0x115b       | 0x111a-0x111b  |   * |                              |                    | 0x112c-0x112e  |   * |                              |                    | 0x113a         | + * |                              |                    | 0x1155-0x1158  |   * | Device Discovery             |       0x2087       | 0x2020-0x2022, |   * |                              |                    | 0x2016         |   * | Queue Command and IO tracing |       0x3031       | 0x3006-0x300b  | @@ -401,7 +402,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,  		void *ring;  	} aq, *aqp; -	if (!ha->tgt.atio_q_length) +	if (!ha->tgt.atio_ring)  		return ptr;  	num_queues = 1; diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index c6509911772..65c5ff75936 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -863,7 +863,6 @@ typedef struct {  #define	MBX_1		BIT_1  #define	MBX_0		BIT_0 -#define RNID_TYPE_SET_VERSION	0x9  #define RNID_TYPE_ASIC_TEMP	0xC  /* diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index eb3ca21a7f1..b310fa97b54 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -358,9 +358,6 @@ extern int  qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);  extern int -qla2x00_set_driver_version(scsi_qla_host_t *, char *); - -extern int  qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,  	uint16_t, uint16_t, uint16_t, uint16_t); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index edf4d14a133..b59203393cb 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -619,8 +619,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)  	if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))  		qla24xx_read_fcp_prio_cfg(vha); -	qla2x00_set_driver_version(vha, QLA2XXX_VERSION); -  	return (rval);  } @@ -1399,7 +1397,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)  			mq_size += ha->max_rsp_queues *  			    (rsp->length * sizeof(response_t));  		} -		if (ha->tgt.atio_q_length) +		if (ha->tgt.atio_ring)  			mq_size += ha->tgt.atio_q_length * sizeof(request_t);  		/* Allocate memory for Fibre Channel Event Buffer. */  		if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 186dd59ce4f..43345af5643 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -3866,64 +3866,6 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)  	return rval;  } -int -qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version) -{ -	int rval; -	mbx_cmd_t mc; -	mbx_cmd_t *mcp = &mc; -	int len; -	uint16_t dwlen; -	uint8_t *str; -	dma_addr_t str_dma; -	struct qla_hw_data *ha = vha->hw; - -	if (!IS_FWI2_CAPABLE(ha) || IS_QLA82XX(ha)) -		return QLA_FUNCTION_FAILED; - -	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1155, -	    "Entered %s.\n", __func__); - -	str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); -	if (!str) { -		ql_log(ql_log_warn, vha, 0x1156, -		    "Failed to allocate driver version param.\n"); -		return QLA_MEMORY_ALLOC_FAILED; -	} - -	memcpy(str, "\x7\x3\x11\x0", 4); -	dwlen = str[0]; -	len = dwlen * sizeof(uint32_t) - 4; -	memset(str + 4, 0, len); -	if (len > strlen(version)) -		len = strlen(version); -	memcpy(str + 4, version, len); - -	mcp->mb[0] = MBC_SET_RNID_PARAMS; -	mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen; -	mcp->mb[2] = MSW(LSD(str_dma)); -	mcp->mb[3] = LSW(LSD(str_dma)); -	mcp->mb[6] = MSW(MSD(str_dma)); -	mcp->mb[7] = LSW(MSD(str_dma)); -	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; -	mcp->in_mb = MBX_0; -	mcp->tov = MBX_TOV_SECONDS; -	mcp->flags = 0; -	rval = qla2x00_mailbox_command(vha, mcp); - -	if (rval != QLA_SUCCESS) { -		ql_dbg(ql_dbg_mbx, vha, 0x1157, -		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); -	} else { -		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1158, -		    "Done %s.\n", __func__); -	} - -	dma_pool_free(ha->s_dma_pool, str, str_dma); - -	return rval; -} -  static int  qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)  { diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 2b6e478d9e3..ec54036d1e1 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@  /*   * Driver version   */ -#define QLA2XXX_VERSION      "8.04.00.08-k" +#define QLA2XXX_VERSION      "8.04.00.13-k"  #define QLA_DRIVER_MAJOR_VER	8  #define QLA_DRIVER_MINOR_VER	4 diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 86974471af6..2a32036a940 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -4112,6 +4112,10 @@ static int st_probe(struct device *dev)  	tpnt->disk = disk;  	disk->private_data = &tpnt->driver;  	disk->queue = SDp->request_queue; +	/* SCSI tape doesn't register this gendisk via add_disk().  Manually +	 * take queue reference that release_disk() expects. */ +	if (!blk_get_queue(disk->queue)) +		goto out_put_disk;  	tpnt->driver = &st_template;  	tpnt->device = SDp; @@ -4185,7 +4189,7 @@ static int st_probe(struct device *dev)  	idr_preload_end();  	if (error < 0) {  		pr_warn("st: idr allocation failed: %d\n", error); -		goto out_put_disk; +		goto out_put_queue;  	}  	tpnt->index = error;  	sprintf(disk->disk_name, "st%d", tpnt->index); @@ -4211,6 +4215,8 @@ out_remove_devs:  	spin_lock(&st_index_lock);  	idr_remove(&st_index_idr, tpnt->index);  	spin_unlock(&st_index_lock); +out_put_queue: +	blk_put_queue(disk->queue);  out_put_disk:  	put_disk(disk);  	kfree(tpnt); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index f80eee74a31..2be0de920d6 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -55,6 +55,7 @@ comment "SPI Master Controller Drivers"  config SPI_ALTERA  	tristate "Altera SPI Controller" +	depends on GENERIC_HARDIRQS  	select SPI_BITBANG  	help  	  This is the driver for the Altera SPI Controller. @@ -310,7 +311,7 @@ config SPI_PXA2XX_DMA  config SPI_PXA2XX  	tristate "PXA2xx SSP SPI master" -	depends on ARCH_PXA || PCI || ACPI +	depends on (ARCH_PXA || PCI || ACPI) && GENERIC_HARDIRQS  	select PXA_SSP if ARCH_PXA  	help  	  This enables using a PXA2xx or Sodaville SSP port as a SPI master diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index 9578af782a7..d7df435d962 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c @@ -152,7 +152,6 @@ static void bcm63xx_spi_setup_transfer(struct spi_device *spi,  static int bcm63xx_spi_setup(struct spi_device *spi)  {  	struct bcm63xx_spi *bs; -	int ret;  	bs = spi_master_get_devdata(spi->master); @@ -490,7 +489,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)  	default:  		dev_err(dev, "unsupported MSG_CTL width: %d\n",  			 bs->msg_ctl_width); -		goto out_clk_disable; +		goto out_err;  	}  	/* Initialize hardware */ diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c index 89480b281d7..3e490ee7f27 100644 --- a/drivers/spi/spi-mpc512x-psc.c +++ b/drivers/spi/spi-mpc512x-psc.c @@ -164,7 +164,7 @@ static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi,  		for (i = count; i > 0; i--) {  			data = tx_buf ? *tx_buf++ : 0; -			if (len == EOFBYTE) +			if (len == EOFBYTE && t->cs_change)  				setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF);  			out_8(&fifo->txdata_8, data);  			len--; diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 90b27a3508a..810413883c7 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -1168,7 +1168,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)  	master->dev.parent = &pdev->dev;  	master->dev.of_node = pdev->dev.of_node; -	ACPI_HANDLE_SET(&master->dev, ACPI_HANDLE(&pdev->dev));  	/* the spi->mode bits understood by this driver: */  	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index e862ab8853a..4188b2faac5 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -994,25 +994,30 @@ static irqreturn_t s3c64xx_spi_irq(int irq, void *data)  {  	struct s3c64xx_spi_driver_data *sdd = data;  	struct spi_master *spi = sdd->master; -	unsigned int val; +	unsigned int val, clr = 0; -	val = readl(sdd->regs + S3C64XX_SPI_PENDING_CLR); +	val = readl(sdd->regs + S3C64XX_SPI_STATUS); -	val &= S3C64XX_SPI_PND_RX_OVERRUN_CLR | -		S3C64XX_SPI_PND_RX_UNDERRUN_CLR | -		S3C64XX_SPI_PND_TX_OVERRUN_CLR | -		S3C64XX_SPI_PND_TX_UNDERRUN_CLR; - -	writel(val, sdd->regs + S3C64XX_SPI_PENDING_CLR); - -	if (val & S3C64XX_SPI_PND_RX_OVERRUN_CLR) +	if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) { +		clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR;  		dev_err(&spi->dev, "RX overrun\n"); -	if (val & S3C64XX_SPI_PND_RX_UNDERRUN_CLR) +	} +	if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) { +		clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR;  		dev_err(&spi->dev, "RX underrun\n"); -	if (val & S3C64XX_SPI_PND_TX_OVERRUN_CLR) +	} +	if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) { +		clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR;  		dev_err(&spi->dev, "TX overrun\n"); -	if (val & S3C64XX_SPI_PND_TX_UNDERRUN_CLR) +	} +	if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) { +		clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR;  		dev_err(&spi->dev, "TX underrun\n"); +	} + +	/* Clear the pending irq by setting and then clearing it */ +	writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR); +	writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);  	return IRQ_HANDLED;  } @@ -1036,9 +1041,13 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)  	writel(0, regs + S3C64XX_SPI_MODE_CFG);  	writel(0, regs + S3C64XX_SPI_PACKET_CNT); -	/* Clear any irq pending bits */ -	writel(readl(regs + S3C64XX_SPI_PENDING_CLR), -				regs + S3C64XX_SPI_PENDING_CLR); +	/* Clear any irq pending bits, should set and clear the bits */ +	val = S3C64XX_SPI_PND_RX_OVERRUN_CLR | +		S3C64XX_SPI_PND_RX_UNDERRUN_CLR | +		S3C64XX_SPI_PND_TX_OVERRUN_CLR | +		S3C64XX_SPI_PND_TX_UNDERRUN_CLR; +	writel(val, regs + S3C64XX_SPI_PENDING_CLR); +	writel(0, regs + S3C64XX_SPI_PENDING_CLR);  	writel(0, regs + S3C64XX_SPI_SWAP_CFG); diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index b8698b389ef..a829563f471 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c @@ -858,21 +858,6 @@ static int tegra_slink_setup(struct spi_device *spi)  	return 0;  } -static int tegra_slink_prepare_transfer(struct spi_master *master) -{ -	struct tegra_slink_data *tspi = spi_master_get_devdata(master); - -	return pm_runtime_get_sync(tspi->dev); -} - -static int tegra_slink_unprepare_transfer(struct spi_master *master) -{ -	struct tegra_slink_data *tspi = spi_master_get_devdata(master); - -	pm_runtime_put(tspi->dev); -	return 0; -} -  static int tegra_slink_transfer_one_message(struct spi_master *master,  			struct spi_message *msg)  { @@ -885,6 +870,12 @@ static int tegra_slink_transfer_one_message(struct spi_master *master,  	msg->status = 0;  	msg->actual_length = 0; +	ret = pm_runtime_get_sync(tspi->dev); +	if (ret < 0) { +		dev_err(tspi->dev, "runtime get failed: %d\n", ret); +		goto done; +	} +  	single_xfer = list_is_singular(&msg->transfers);  	list_for_each_entry(xfer, &msg->transfers, transfer_list) {  		INIT_COMPLETION(tspi->xfer_completion); @@ -921,6 +912,8 @@ static int tegra_slink_transfer_one_message(struct spi_master *master,  exit:  	tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);  	tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2); +	pm_runtime_put(tspi->dev); +done:  	msg->status = ret;  	spi_finalize_current_message(master);  	return ret; @@ -1148,9 +1141,7 @@ static int tegra_slink_probe(struct platform_device *pdev)  	/* the spi->mode bits understood by this driver: */  	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;  	master->setup = tegra_slink_setup; -	master->prepare_transfer_hardware = tegra_slink_prepare_transfer;  	master->transfer_one_message = tegra_slink_transfer_one_message; -	master->unprepare_transfer_hardware = tegra_slink_unprepare_transfer;  	master->num_chipselect = MAX_CHIP_SELECT;  	master->bus_num = -1; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index f996c600eb8..004b10f184d 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -543,17 +543,16 @@ static void spi_pump_messages(struct kthread_work *work)  	/* Lock queue and check for queue work */  	spin_lock_irqsave(&master->queue_lock, flags);  	if (list_empty(&master->queue) || !master->running) { -		if (master->busy && master->unprepare_transfer_hardware) { -			ret = master->unprepare_transfer_hardware(master); -			if (ret) { -				spin_unlock_irqrestore(&master->queue_lock, flags); -				dev_err(&master->dev, -					"failed to unprepare transfer hardware\n"); -				return; -			} +		if (!master->busy) { +			spin_unlock_irqrestore(&master->queue_lock, flags); +			return;  		}  		master->busy = false;  		spin_unlock_irqrestore(&master->queue_lock, flags); +		if (master->unprepare_transfer_hardware && +		    master->unprepare_transfer_hardware(master)) +			dev_err(&master->dev, +				"failed to unprepare transfer hardware\n");  		return;  	} @@ -984,7 +983,7 @@ static void acpi_register_spi_devices(struct spi_master *master)  	acpi_status status;  	acpi_handle handle; -	handle = ACPI_HANDLE(&master->dev); +	handle = ACPI_HANDLE(master->dev.parent);  	if (!handle)  		return; diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c index 4c0f6d883dd..7b0bce93676 100644 --- a/drivers/ssb/driver_chipcommon_pmu.c +++ b/drivers/ssb/driver_chipcommon_pmu.c @@ -675,3 +675,32 @@ u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc)  		return 0;  	}  } + +void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid) +{ +	u32 pmu_ctl = 0; + +	switch (cc->dev->bus->chip_id) { +	case 0x4322: +		ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL0, 0x11100070); +		ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL1, 0x1014140a); +		ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL5, 0x88888854); +		if (spuravoid == 1) +			ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05201828); +		else +			ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05001828); +		pmu_ctl = SSB_CHIPCO_PMU_CTL_PLL_UPD; +		break; +	case 43222: +		/* TODO: BCM43222 requires updating PLLs too */ +		return; +	default: +		ssb_printk(KERN_ERR PFX +			   "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n", +			   cc->dev->bus->chip_id); +		return; +	} + +	chipco_set32(cc, SSB_CHIPCO_PMU_CTL, pmu_ctl); +} +EXPORT_SYMBOL_GPL(ssb_pmu_spuravoid_pllupdate); diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index ff1c5ee352c..cbe48ab4174 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -409,6 +409,7 @@ static inline int core_alua_state_standby(  	case REPORT_LUNS:  	case RECEIVE_DIAGNOSTIC:  	case SEND_DIAGNOSTIC: +		return 0;  	case MAINTENANCE_IN:  		switch (cdb[1] & 0x1f) {  		case MI_REPORT_TARGET_PGS: @@ -451,6 +452,7 @@ static inline int core_alua_state_unavailable(  	switch (cdb[0]) {  	case INQUIRY:  	case REPORT_LUNS: +		return 0;  	case MAINTENANCE_IN:  		switch (cdb[1] & 0x1f) {  		case MI_REPORT_TARGET_PGS: @@ -491,6 +493,7 @@ static inline int core_alua_state_transition(  	switch (cdb[0]) {  	case INQUIRY:  	case REPORT_LUNS: +		return 0;  	case MAINTENANCE_IN:  		switch (cdb[1] & 0x1f) {  		case MI_REPORT_TARGET_PGS: diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c index 484b6a3c9b0..302909ccf18 100644 --- a/drivers/tty/mxser.c +++ b/drivers/tty/mxser.c @@ -2643,9 +2643,9 @@ static int mxser_probe(struct pci_dev *pdev,  				mxvar_sdriver, brd->idx + i, &pdev->dev);  		if (IS_ERR(tty_dev)) {  			retval = PTR_ERR(tty_dev); -			for (i--; i >= 0; i--) +			for (; i > 0; i--)  				tty_unregister_device(mxvar_sdriver, -					brd->idx + i); +					brd->idx + i - 1);  			goto err_relbrd;  		}  	} @@ -2751,9 +2751,9 @@ static int __init mxser_module_init(void)  			tty_dev = tty_port_register_device(&brd->ports[i].port,  					mxvar_sdriver, brd->idx + i, NULL);  			if (IS_ERR(tty_dev)) { -				for (i--; i >= 0; i--) +				for (; i > 0; i--)  					tty_unregister_device(mxvar_sdriver, -						brd->idx + i); +						brd->idx + i - 1);  				for (i = 0; i < brd->info->nports; i++)  					tty_port_destroy(&brd->ports[i].port);  				free_irq(brd->irq, brd); diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c index b3455a970a1..35d9ab95c5c 100644 --- a/drivers/tty/serial/8250/8250_pnp.c +++ b/drivers/tty/serial/8250/8250_pnp.c @@ -429,7 +429,6 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)  {  	struct uart_8250_port uart;  	int ret, line, flags = dev_id->driver_data; -	struct resource *res = NULL;  	if (flags & UNKNOWN_DEV) {  		ret = serial_pnp_guess_board(dev); @@ -440,12 +439,11 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)  	memset(&uart, 0, sizeof(uart));  	if (pnp_irq_valid(dev, 0))  		uart.port.irq = pnp_irq(dev, 0); -	if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) -		res = pnp_get_resource(dev, IORESOURCE_IO, 2); -	else if (pnp_port_valid(dev, 0)) -		res = pnp_get_resource(dev, IORESOURCE_IO, 0); -	if (pnp_resource_enabled(res)) { -		uart.port.iobase = res->start; +	if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) { +		uart.port.iobase = pnp_port_start(dev, 2); +		uart.port.iotype = UPIO_PORT; +	} else if (pnp_port_valid(dev, 0)) { +		uart.port.iobase = pnp_port_start(dev, 0);  		uart.port.iotype = UPIO_PORT;  	} else if (pnp_mem_valid(dev, 0)) {  		uart.port.mapbase = pnp_mem_start(dev, 0); diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 4dc41408ecb..30d4f7a783c 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -886,6 +886,17 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,  	serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);  	/* FIFO ENABLE, DMA MODE */ +	up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK; +	/* +	 * NOTE: Setting OMAP_UART_SCR_RX_TRIG_GRANU1_MASK +	 * sets Enables the granularity of 1 for TRIGGER RX +	 * level. Along with setting RX FIFO trigger level +	 * to 1 (as noted below, 16 characters) and TLR[3:0] +	 * to zero this will result RX FIFO threshold level +	 * to 1 character, instead of 16 as noted in comment +	 * below. +	 */ +  	/* Set receive FIFO threshold to 16 characters and  	 * transmit FIFO threshold to 16 spaces  	 */ diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c index 797f9d51473..65d4e55552c 100644 --- a/drivers/usb/core/port.c +++ b/drivers/usb/core/port.c @@ -67,7 +67,6 @@ static void usb_port_device_release(struct device *dev)  {  	struct usb_port *port_dev = to_usb_port(dev); -	dev_pm_qos_hide_flags(dev);  	kfree(port_dev);  } diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 8189cb6a86a..7abc5c81af2 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -346,6 +346,7 @@ static long vfio_pci_ioctl(void *device_data,  		if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {  			size_t size; +			int max = vfio_pci_get_irq_count(vdev, hdr.index);  			if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)  				size = sizeof(uint8_t); @@ -355,7 +356,7 @@ static long vfio_pci_ioctl(void *device_data,  				return -EINVAL;  			if (hdr.argsz - minsz < hdr.count * size || -			    hdr.count > vfio_pci_get_irq_count(vdev, hdr.index)) +			    hdr.start >= max || hdr.start + hdr.count > max)  				return -EINVAL;  			data = memdup_user((void __user *)(arg + minsz), diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c index 2968b493465..957a0b98a5d 100644 --- a/drivers/vhost/tcm_vhost.c +++ b/drivers/vhost/tcm_vhost.c @@ -74,9 +74,8 @@ enum {  struct vhost_scsi {  	/* Protected by vhost_scsi->dev.mutex */ -	struct tcm_vhost_tpg *vs_tpg[VHOST_SCSI_MAX_TARGET]; +	struct tcm_vhost_tpg **vs_tpg;  	char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; -	bool vs_endpoint;  	struct vhost_dev dev;  	struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ]; @@ -579,9 +578,27 @@ static void tcm_vhost_submission_work(struct work_struct *work)  	}  } +static void vhost_scsi_send_bad_target(struct vhost_scsi *vs, +	struct vhost_virtqueue *vq, int head, unsigned out) +{ +	struct virtio_scsi_cmd_resp __user *resp; +	struct virtio_scsi_cmd_resp rsp; +	int ret; + +	memset(&rsp, 0, sizeof(rsp)); +	rsp.response = VIRTIO_SCSI_S_BAD_TARGET; +	resp = vq->iov[out].iov_base; +	ret = __copy_to_user(resp, &rsp, sizeof(rsp)); +	if (!ret) +		vhost_add_used_and_signal(&vs->dev, vq, head, 0); +	else +		pr_err("Faulted on virtio_scsi_cmd_resp\n"); +} +  static void vhost_scsi_handle_vq(struct vhost_scsi *vs,  	struct vhost_virtqueue *vq)  { +	struct tcm_vhost_tpg **vs_tpg;  	struct virtio_scsi_cmd_req v_req;  	struct tcm_vhost_tpg *tv_tpg;  	struct tcm_vhost_cmd *tv_cmd; @@ -590,8 +607,16 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,  	int head, ret;  	u8 target; -	/* Must use ioctl VHOST_SCSI_SET_ENDPOINT */ -	if (unlikely(!vs->vs_endpoint)) +	/* +	 * We can handle the vq only after the endpoint is setup by calling the +	 * VHOST_SCSI_SET_ENDPOINT ioctl. +	 * +	 * TODO: Check that we are running from vhost_worker which acts +	 * as read-side critical section for vhost kind of RCU. +	 * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h +	 */ +	vs_tpg = rcu_dereference_check(vq->private_data, 1); +	if (!vs_tpg)  		return;  	mutex_lock(&vq->mutex); @@ -661,23 +686,11 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,  		/* Extract the tpgt */  		target = v_req.lun[1]; -		tv_tpg = vs->vs_tpg[target]; +		tv_tpg = ACCESS_ONCE(vs_tpg[target]);  		/* Target does not exist, fail the request */  		if (unlikely(!tv_tpg)) { -			struct virtio_scsi_cmd_resp __user *resp; -			struct virtio_scsi_cmd_resp rsp; - -			memset(&rsp, 0, sizeof(rsp)); -			rsp.response = VIRTIO_SCSI_S_BAD_TARGET; -			resp = vq->iov[out].iov_base; -			ret = __copy_to_user(resp, &rsp, sizeof(rsp)); -			if (!ret) -				vhost_add_used_and_signal(&vs->dev, -							  vq, head, 0); -			else -				pr_err("Faulted on virtio_scsi_cmd_resp\n"); - +			vhost_scsi_send_bad_target(vs, vq, head, out);  			continue;  		} @@ -690,22 +703,13 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,  		if (IS_ERR(tv_cmd)) {  			vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",  					PTR_ERR(tv_cmd)); -			break; +			goto err_cmd;  		}  		pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"  			": %d\n", tv_cmd, exp_data_len, data_direction);  		tv_cmd->tvc_vhost = vs;  		tv_cmd->tvc_vq = vq; - -		if (unlikely(vq->iov[out].iov_len != -				sizeof(struct virtio_scsi_cmd_resp))) { -			vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu" -				" bytes, out: %d, in: %d\n", -				vq->iov[out].iov_len, out, in); -			break; -		} -  		tv_cmd->tvc_resp = vq->iov[out].iov_base;  		/* @@ -725,7 +729,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,  				" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",  				scsi_command_size(tv_cmd->tvc_cdb),  				TCM_VHOST_MAX_CDB_SIZE); -			break; /* TODO */ +			goto err_free;  		}  		tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; @@ -738,7 +742,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,  					data_direction == DMA_TO_DEVICE);  			if (unlikely(ret)) {  				vq_err(vq, "Failed to map iov to sgl\n"); -				break; /* TODO */ +				goto err_free;  			}  		} @@ -759,6 +763,13 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,  	}  	mutex_unlock(&vq->mutex); +	return; + +err_free: +	vhost_scsi_free_cmd(tv_cmd); +err_cmd: +	vhost_scsi_send_bad_target(vs, vq, head, out); +	mutex_unlock(&vq->mutex);  }  static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) @@ -780,6 +791,20 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)  	vhost_scsi_handle_vq(vs, vq);  } +static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) +{ +	vhost_poll_flush(&vs->dev.vqs[index].poll); +} + +static void vhost_scsi_flush(struct vhost_scsi *vs) +{ +	int i; + +	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) +		vhost_scsi_flush_vq(vs, i); +	vhost_work_flush(&vs->dev, &vs->vs_completion_work); +} +  /*   * Called from vhost_scsi_ioctl() context to walk the list of available   * tcm_vhost_tpg with an active struct tcm_vhost_nexus @@ -790,8 +815,10 @@ static int vhost_scsi_set_endpoint(  {  	struct tcm_vhost_tport *tv_tport;  	struct tcm_vhost_tpg *tv_tpg; +	struct tcm_vhost_tpg **vs_tpg; +	struct vhost_virtqueue *vq; +	int index, ret, i, len;  	bool match = false; -	int index, ret;  	mutex_lock(&vs->dev.mutex);  	/* Verify that ring has been setup correctly. */ @@ -803,6 +830,15 @@ static int vhost_scsi_set_endpoint(  		}  	} +	len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET; +	vs_tpg = kzalloc(len, GFP_KERNEL); +	if (!vs_tpg) { +		mutex_unlock(&vs->dev.mutex); +		return -ENOMEM; +	} +	if (vs->vs_tpg) +		memcpy(vs_tpg, vs->vs_tpg, len); +  	mutex_lock(&tcm_vhost_mutex);  	list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {  		mutex_lock(&tv_tpg->tv_tpg_mutex); @@ -817,14 +853,15 @@ static int vhost_scsi_set_endpoint(  		tv_tport = tv_tpg->tport;  		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { -			if (vs->vs_tpg[tv_tpg->tport_tpgt]) { +			if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {  				mutex_unlock(&tv_tpg->tv_tpg_mutex);  				mutex_unlock(&tcm_vhost_mutex);  				mutex_unlock(&vs->dev.mutex); +				kfree(vs_tpg);  				return -EEXIST;  			}  			tv_tpg->tv_tpg_vhost_count++; -			vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; +			vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;  			smp_mb__after_atomic_inc();  			match = true;  		} @@ -835,12 +872,27 @@ static int vhost_scsi_set_endpoint(  	if (match) {  		memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,  		       sizeof(vs->vs_vhost_wwpn)); -		vs->vs_endpoint = true; +		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { +			vq = &vs->vqs[i]; +			/* Flushing the vhost_work acts as synchronize_rcu */ +			mutex_lock(&vq->mutex); +			rcu_assign_pointer(vq->private_data, vs_tpg); +			vhost_init_used(vq); +			mutex_unlock(&vq->mutex); +		}  		ret = 0;  	} else {  		ret = -EEXIST;  	} +	/* +	 * Act as synchronize_rcu to make sure access to +	 * old vs->vs_tpg is finished. +	 */ +	vhost_scsi_flush(vs); +	kfree(vs->vs_tpg); +	vs->vs_tpg = vs_tpg; +  	mutex_unlock(&vs->dev.mutex);  	return ret;  } @@ -851,6 +903,8 @@ static int vhost_scsi_clear_endpoint(  {  	struct tcm_vhost_tport *tv_tport;  	struct tcm_vhost_tpg *tv_tpg; +	struct vhost_virtqueue *vq; +	bool match = false;  	int index, ret, i;  	u8 target; @@ -862,9 +916,14 @@ static int vhost_scsi_clear_endpoint(  			goto err_dev;  		}  	} + +	if (!vs->vs_tpg) { +		mutex_unlock(&vs->dev.mutex); +		return 0; +	} +  	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {  		target = i; -  		tv_tpg = vs->vs_tpg[target];  		if (!tv_tpg)  			continue; @@ -886,10 +945,27 @@ static int vhost_scsi_clear_endpoint(  		}  		tv_tpg->tv_tpg_vhost_count--;  		vs->vs_tpg[target] = NULL; -		vs->vs_endpoint = false; +		match = true;  		mutex_unlock(&tv_tpg->tv_tpg_mutex);  	} +	if (match) { +		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { +			vq = &vs->vqs[i]; +			/* Flushing the vhost_work acts as synchronize_rcu */ +			mutex_lock(&vq->mutex); +			rcu_assign_pointer(vq->private_data, NULL); +			mutex_unlock(&vq->mutex); +		} +	} +	/* +	 * Act as synchronize_rcu to make sure access to +	 * old vs->vs_tpg is finished. +	 */ +	vhost_scsi_flush(vs); +	kfree(vs->vs_tpg); +	vs->vs_tpg = NULL;  	mutex_unlock(&vs->dev.mutex); +  	return 0;  err_tpg: @@ -899,6 +975,24 @@ err_dev:  	return ret;  } +static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) +{ +	if (features & ~VHOST_SCSI_FEATURES) +		return -EOPNOTSUPP; + +	mutex_lock(&vs->dev.mutex); +	if ((features & (1 << VHOST_F_LOG_ALL)) && +	    !vhost_log_access_ok(&vs->dev)) { +		mutex_unlock(&vs->dev.mutex); +		return -EFAULT; +	} +	vs->dev.acked_features = features; +	smp_wmb(); +	vhost_scsi_flush(vs); +	mutex_unlock(&vs->dev.mutex); +	return 0; +} +  static int vhost_scsi_open(struct inode *inode, struct file *f)  {  	struct vhost_scsi *s; @@ -939,38 +1033,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)  	return 0;  } -static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) -{ -	vhost_poll_flush(&vs->dev.vqs[index].poll); -} - -static void vhost_scsi_flush(struct vhost_scsi *vs) -{ -	int i; - -	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) -		vhost_scsi_flush_vq(vs, i); -	vhost_work_flush(&vs->dev, &vs->vs_completion_work); -} - -static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) -{ -	if (features & ~VHOST_SCSI_FEATURES) -		return -EOPNOTSUPP; - -	mutex_lock(&vs->dev.mutex); -	if ((features & (1 << VHOST_F_LOG_ALL)) && -	    !vhost_log_access_ok(&vs->dev)) { -		mutex_unlock(&vs->dev.mutex); -		return -EFAULT; -	} -	vs->dev.acked_features = features; -	smp_wmb(); -	vhost_scsi_flush(vs); -	mutex_unlock(&vs->dev.mutex); -	return 0; -} -  static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,  				unsigned long arg)  { diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index 7c254084b6a..86291dcd964 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c @@ -1373,15 +1373,12 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)  {  	struct fb_info *info = file_fb_info(file);  	struct fb_ops *fb; -	unsigned long off; +	unsigned long mmio_pgoff;  	unsigned long start;  	u32 len;  	if (!info)  		return -ENODEV; -	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) -		return -EINVAL; -	off = vma->vm_pgoff << PAGE_SHIFT;  	fb = info->fbops;  	if (!fb)  		return -ENODEV; @@ -1393,32 +1390,24 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)  		return res;  	} -	/* frame buffer memory */ +	/* +	 * Ugh. This can be either the frame buffer mapping, or +	 * if pgoff points past it, the mmio mapping. +	 */  	start = info->fix.smem_start; -	len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len); -	if (off >= len) { -		/* memory mapped io */ -		off -= len; -		if (info->var.accel_flags) { -			mutex_unlock(&info->mm_lock); -			return -EINVAL; -		} +	len = info->fix.smem_len; +	mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT; +	if (vma->vm_pgoff >= mmio_pgoff) { +		vma->vm_pgoff -= mmio_pgoff;  		start = info->fix.mmio_start; -		len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len); +		len = info->fix.mmio_len;  	}  	mutex_unlock(&info->mm_lock); -	start &= PAGE_MASK; -	if ((vma->vm_end - vma->vm_start + off) > len) -		return -EINVAL; -	off += start; -	vma->vm_pgoff = off >> PAGE_SHIFT; -	/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by io_remap_pfn_range()*/ +  	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); -	fb_pgprotect(file, vma, off); -	if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, -			     vma->vm_end - vma->vm_start, vma->vm_page_prot)) -		return -EAGAIN; -	return 0; +	fb_pgprotect(file, vma, start); + +	return vm_iomap_memory(vma, start, len);  }  static int diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c index 94ad0f71383..7f6709991a5 100644 --- a/drivers/video/fbmon.c +++ b/drivers/video/fbmon.c @@ -1400,7 +1400,7 @@ int fb_videomode_from_videomode(const struct videomode *vm,  	fbmode->vmode = 0;  	if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH)  		fbmode->sync |= FB_SYNC_HOR_HIGH_ACT; -	if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH) +	if (vm->dmt_flags & VESA_DMT_VSYNC_HIGH)  		fbmode->sync |= FB_SYNC_VERT_HIGH_ACT;  	if (vm->data_flags & DISPLAY_FLAGS_INTERLACED)  		fbmode->vmode |= FB_VMODE_INTERLACED; diff --git a/drivers/video/mmp/core.c b/drivers/video/mmp/core.c index 9ed83419038..84de2632857 100644 --- a/drivers/video/mmp/core.c +++ b/drivers/video/mmp/core.c @@ -252,7 +252,5 @@ void mmp_unregister_path(struct mmp_path *path)  	kfree(path);  	mutex_unlock(&disp_lock); - -	dev_info(path->dev, "de-register %s\n", path->name);  }  EXPORT_SYMBOL_GPL(mmp_unregister_path); diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index 63203acef81..0264704a52b 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c @@ -858,6 +858,7 @@ static void sh_mobile_lcdc_geometry(struct sh_mobile_lcdc_chan *ch)  	tmp = ((mode->xres & 7) << 24) | ((display_h_total & 7) << 16)  	    | ((mode->hsync_len & 7) << 8) | (hsync_pos & 7);  	lcdc_write_chan(ch, LDHAJR, tmp); +	lcdc_write_chan_mirror(ch, LDHAJR, tmp);  }  static void sh_mobile_lcdc_overlay_setup(struct sh_mobile_lcdc_overlay *ovl) diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c index b75db018648..d4284458377 100644 --- a/drivers/video/uvesafb.c +++ b/drivers/video/uvesafb.c @@ -1973,7 +1973,8 @@ static int uvesafb_init(void)  			err = -ENOMEM;  		if (err) { -			platform_device_put(uvesafb_device); +			if (uvesafb_device) +				platform_device_put(uvesafb_device);  			platform_driver_unregister(&uvesafb_driver);  			cn_del_callback(&uvesafb_cn_id);  			return err; diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 9fcc70c11ce..e89fc313397 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -117,7 +117,7 @@ config ARM_SP805_WATCHDOG  config AT91RM9200_WATCHDOG  	tristate "AT91RM9200 watchdog" -	depends on ARCH_AT91 +	depends on ARCH_AT91RM9200  	help  	  Watchdog timer embedded into AT91RM9200 chips. This will reboot your  	  system when the timeout is reached. diff --git a/drivers/xen/events.c b/drivers/xen/events.c index aa85881d17b..2647ad8e1f1 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -1316,7 +1316,7 @@ static void __xen_evtchn_do_upcall(void)  {  	int start_word_idx, start_bit_idx;  	int word_idx, bit_idx; -	int i; +	int i, irq;  	int cpu = get_cpu();  	struct shared_info *s = HYPERVISOR_shared_info;  	struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); @@ -1324,6 +1324,8 @@ static void __xen_evtchn_do_upcall(void)  	do {  		xen_ulong_t pending_words; +		xen_ulong_t pending_bits; +		struct irq_desc *desc;  		vcpu_info->evtchn_upcall_pending = 0; @@ -1335,6 +1337,17 @@ static void __xen_evtchn_do_upcall(void)  		 * selector flag. xchg_xen_ulong must contain an  		 * appropriate barrier.  		 */ +		if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) { +			int evtchn = evtchn_from_irq(irq); +			word_idx = evtchn / BITS_PER_LONG; +			pending_bits = evtchn % BITS_PER_LONG; +			if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) { +				desc = irq_to_desc(irq); +				if (desc) +					generic_handle_irq_desc(irq, desc); +			} +		} +  		pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);  		start_word_idx = __this_cpu_read(current_word_idx); @@ -1343,7 +1356,6 @@ static void __xen_evtchn_do_upcall(void)  		word_idx = start_word_idx;  		for (i = 0; pending_words != 0; i++) { -			xen_ulong_t pending_bits;  			xen_ulong_t words;  			words = MASK_LSBS(pending_words, word_idx); @@ -1372,8 +1384,7 @@ static void __xen_evtchn_do_upcall(void)  			do {  				xen_ulong_t bits; -				int port, irq; -				struct irq_desc *desc; +				int port;  				bits = MASK_LSBS(pending_bits, bit_idx); diff --git a/firmware/Makefile b/firmware/Makefile index 5d8ee1319b5..cbb09ce9730 100644 --- a/firmware/Makefile +++ b/firmware/Makefile @@ -82,7 +82,7 @@ fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \  fw-shipped-$(CONFIG_SCSI_QLOGIC_1280) += qlogic/1040.bin qlogic/1280.bin \  					 qlogic/12160.bin  fw-shipped-$(CONFIG_SCSI_QLOGICPTI) += qlogic/isp1000.bin -fw-shipped-$(CONFIG_INFINIBAND_QIB) += intel/sd7220.fw +fw-shipped-$(CONFIG_INFINIBAND_QIB) += qlogic/sd7220.fw  fw-shipped-$(CONFIG_SND_KORG1212) += korg/k1212.dsp  fw-shipped-$(CONFIG_SND_MAESTRO3) += ess/maestro3_assp_kernel.fw \  				     ess/maestro3_assp_minisrc.fw diff --git a/firmware/intel/sd7220.fw.ihex b/firmware/qlogic/sd7220.fw.ihex index a3363631911..a3363631911 100644 --- a/firmware/intel/sd7220.fw.ihex +++ b/firmware/qlogic/sd7220.fw.ihex diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 3939829f6c5..86af964c242 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1137,6 +1137,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,  			goto whole;  		if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))  			goto whole; +		return 0;  	}  	/* Do not dump I/O mapped devices or special mappings */ @@ -1428,8 +1428,6 @@ void bio_endio(struct bio *bio, int error)  	else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))  		error = -EIO; -	trace_block_bio_complete(bio, error); -  	if (bio->bi_end_io)  		bio->bi_end_io(bio, error);  } diff --git a/fs/block_dev.c b/fs/block_dev.c index aea605c98ba..aae187a7f94 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -551,6 +551,7 @@ struct block_device *bdgrab(struct block_device *bdev)  	ihold(bdev->bd_inode);  	return bdev;  } +EXPORT_SYMBOL(bdgrab);  long nr_blockdev_pages(void)  { diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 451fad96ecd..ef96381569a 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -317,6 +317,7 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,  	unsigned long src_ptr;  	unsigned long dst_ptr;  	int overwrite_root = 0; +	bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;  	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)  		overwrite_root = 1; @@ -326,6 +327,9 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,  	/* look for the key in the destination tree */  	ret = btrfs_search_slot(NULL, root, key, path, 0, 0); +	if (ret < 0) +		return ret; +  	if (ret == 0) {  		char *src_copy;  		char *dst_copy; @@ -367,6 +371,30 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,  			return 0;  		} +		/* +		 * We need to load the old nbytes into the inode so when we +		 * replay the extents we've logged we get the right nbytes. +		 */ +		if (inode_item) { +			struct btrfs_inode_item *item; +			u64 nbytes; + +			item = btrfs_item_ptr(path->nodes[0], path->slots[0], +					      struct btrfs_inode_item); +			nbytes = btrfs_inode_nbytes(path->nodes[0], item); +			item = btrfs_item_ptr(eb, slot, +					      struct btrfs_inode_item); +			btrfs_set_inode_nbytes(eb, item, nbytes); +		} +	} else if (inode_item) { +		struct btrfs_inode_item *item; + +		/* +		 * New inode, set nbytes to 0 so that the nbytes comes out +		 * properly when we replay the extents. +		 */ +		item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); +		btrfs_set_inode_nbytes(eb, item, 0);  	}  insert:  	btrfs_release_path(path); @@ -486,7 +514,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,  	int found_type;  	u64 extent_end;  	u64 start = key->offset; -	u64 saved_nbytes; +	u64 nbytes = 0;  	struct btrfs_file_extent_item *item;  	struct inode *inode = NULL;  	unsigned long size; @@ -496,10 +524,19 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,  	found_type = btrfs_file_extent_type(eb, item);  	if (found_type == BTRFS_FILE_EXTENT_REG || -	    found_type == BTRFS_FILE_EXTENT_PREALLOC) -		extent_end = start + btrfs_file_extent_num_bytes(eb, item); -	else if (found_type == BTRFS_FILE_EXTENT_INLINE) { +	    found_type == BTRFS_FILE_EXTENT_PREALLOC) { +		nbytes = btrfs_file_extent_num_bytes(eb, item); +		extent_end = start + nbytes; + +		/* +		 * We don't add to the inodes nbytes if we are prealloc or a +		 * hole. +		 */ +		if (btrfs_file_extent_disk_bytenr(eb, item) == 0) +			nbytes = 0; +	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {  		size = btrfs_file_extent_inline_len(eb, item); +		nbytes = btrfs_file_extent_ram_bytes(eb, item);  		extent_end = ALIGN(start + size, root->sectorsize);  	} else {  		ret = 0; @@ -548,7 +585,6 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,  	}  	btrfs_release_path(path); -	saved_nbytes = inode_get_bytes(inode);  	/* drop any overlapping extents */  	ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);  	BUG_ON(ret); @@ -635,7 +671,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,  		BUG_ON(ret);  	} -	inode_set_bytes(inode, saved_nbytes); +	inode_add_bytes(inode, nbytes);  	ret = btrfs_update_inode(trans, root, inode);  out:  	if (inode) diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 991c63c6bdd..21b3a291c32 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1575,14 +1575,24 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,  			}  			break;  		case Opt_blank_pass: -			vol->password = NULL; -			break; -		case Opt_pass:  			/* passwords have to be handled differently  			 * to allow the character used for deliminator  			 * to be passed within them  			 */ +			/* +			 * Check if this is a case where the  password +			 * starts with a delimiter +			 */ +			tmp_end = strchr(data, '='); +			tmp_end++; +			if (!(tmp_end < end && tmp_end[1] == delim)) { +				/* No it is not. Set the password to NULL */ +				vol->password = NULL; +				break; +			} +			/* Yes it is. Drop down to Opt_pass below.*/ +		case Opt_pass:  			/* Obtain the value string */  			value = strchr(data, '=');  			value++; diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c index 412e6eda25f..e4141f25749 100644 --- a/fs/ecryptfs/miscdev.c +++ b/fs/ecryptfs/miscdev.c @@ -80,13 +80,6 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)  	int rc;  	mutex_lock(&ecryptfs_daemon_hash_mux); -	rc = try_module_get(THIS_MODULE); -	if (rc == 0) { -		rc = -EIO; -		printk(KERN_ERR "%s: Error attempting to increment module use " -		       "count; rc = [%d]\n", __func__, rc); -		goto out_unlock_daemon_list; -	}  	rc = ecryptfs_find_daemon_by_euid(&daemon);  	if (!rc) {  		rc = -EINVAL; @@ -96,7 +89,7 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)  	if (rc) {  		printk(KERN_ERR "%s: Error attempting to spawn daemon; "  		       "rc = [%d]\n", __func__, rc); -		goto out_module_put_unlock_daemon_list; +		goto out_unlock_daemon_list;  	}  	mutex_lock(&daemon->mux);  	if (daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN) { @@ -108,9 +101,6 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)  	atomic_inc(&ecryptfs_num_miscdev_opens);  out_unlock_daemon:  	mutex_unlock(&daemon->mux); -out_module_put_unlock_daemon_list: -	if (rc) -		module_put(THIS_MODULE);  out_unlock_daemon_list:  	mutex_unlock(&ecryptfs_daemon_hash_mux);  	return rc; @@ -147,7 +137,6 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file)  		       "bug.\n", __func__, rc);  		BUG();  	} -	module_put(THIS_MODULE);  	return rc;  } @@ -471,6 +460,7 @@ out_free:  static const struct file_operations ecryptfs_miscdev_fops = { +	.owner   = THIS_MODULE,  	.open    = ecryptfs_miscdev_open,  	.poll    = ecryptfs_miscdev_poll,  	.read    = ecryptfs_miscdev_read, diff --git a/fs/exec.c b/fs/exec.c index a96a4885bbb..87e731f020f 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -613,7 +613,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)  		 * when the old and new regions overlap clear from new_end.  		 */  		free_pgd_range(&tlb, new_end, old_end, new_end, -			vma->vm_next ? vma->vm_next->vm_start : 0); +			vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);  	} else {  		/*  		 * otherwise, clean from old_start; this is done to not touch @@ -622,7 +622,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)  		 * for the others its just a little faster.  		 */  		free_pgd_range(&tlb, old_start, old_end, new_end, -			vma->vm_next ? vma->vm_next->vm_start : 0); +			vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);  	}  	tlb_finish_mmu(&tlb, new_end, old_end); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 56efcaadf84..9c6d06dcef8 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -2999,20 +2999,23 @@ static int ext4_split_extent_at(handle_t *handle,  			if (split_flag & EXT4_EXT_DATA_VALID1) {  				err = ext4_ext_zeroout(inode, ex2);  				zero_ex.ee_block = ex2->ee_block; -				zero_ex.ee_len = ext4_ext_get_actual_len(ex2); +				zero_ex.ee_len = cpu_to_le16( +						ext4_ext_get_actual_len(ex2));  				ext4_ext_store_pblock(&zero_ex,  						      ext4_ext_pblock(ex2));  			} else {  				err = ext4_ext_zeroout(inode, ex);  				zero_ex.ee_block = ex->ee_block; -				zero_ex.ee_len = ext4_ext_get_actual_len(ex); +				zero_ex.ee_len = cpu_to_le16( +						ext4_ext_get_actual_len(ex));  				ext4_ext_store_pblock(&zero_ex,  						      ext4_ext_pblock(ex));  			}  		} else {  			err = ext4_ext_zeroout(inode, &orig_ex);  			zero_ex.ee_block = orig_ex.ee_block; -			zero_ex.ee_len = ext4_ext_get_actual_len(&orig_ex); +			zero_ex.ee_len = cpu_to_le16( +						ext4_ext_get_actual_len(&orig_ex));  			ext4_ext_store_pblock(&zero_ex,  					      ext4_ext_pblock(&orig_ex));  		} @@ -3272,7 +3275,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,  		if (err)  			goto out;  		zero_ex.ee_block = ex->ee_block; -		zero_ex.ee_len = ext4_ext_get_actual_len(ex); +		zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));  		ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));  		err = ext4_ext_get_access(handle, inode, path + depth); diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index b505a145a59..a04183127ef 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -1539,9 +1539,9 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode,  		blk = *i_data;  		if (level > 0) {  			ext4_lblk_t first2; -			bh = sb_bread(inode->i_sb, blk); +			bh = sb_bread(inode->i_sb, le32_to_cpu(blk));  			if (!bh) { -				EXT4_ERROR_INODE_BLOCK(inode, blk, +				EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk),  						       "Read failure");  				return -EIO;  			} diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 019f45e4509..d79c2dadc53 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -923,8 +923,11 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)  		cmd = F_SETLK;  		fl->fl_type = F_UNLCK;  	} -	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) +	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { +		if (fl->fl_type == F_UNLCK) +			posix_lock_file_wait(file, fl);  		return -EIO; +	}  	if (IS_GETLK(cmd))  		return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);  	else if (fl->fl_type == F_UNLCK) diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 156e42ec84e..5c29216e9cc 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -588,6 +588,7 @@ struct lm_lockstruct {  	struct dlm_lksb ls_control_lksb; /* control_lock */  	char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */  	struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */ +	char *ls_lvb_bits;  	spinlock_t ls_recover_spin; /* protects following fields */  	unsigned long ls_recover_flags; /* DFL_ */ diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 9802de0f85e..c8423d6de6c 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -483,12 +483,8 @@ static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen,  static int all_jid_bits_clear(char *lvb)  { -	int i; -	for (i = JID_BITMAP_OFFSET; i < GDLM_LVB_SIZE; i++) { -		if (lvb[i]) -			return 0; -	} -	return 1; +	return !memchr_inv(lvb + JID_BITMAP_OFFSET, 0, +			GDLM_LVB_SIZE - JID_BITMAP_OFFSET);  }  static void sync_wait_cb(void *arg) @@ -580,7 +576,6 @@ static void gfs2_control_func(struct work_struct *work)  {  	struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work);  	struct lm_lockstruct *ls = &sdp->sd_lockstruct; -	char lvb_bits[GDLM_LVB_SIZE];  	uint32_t block_gen, start_gen, lvb_gen, flags;  	int recover_set = 0;  	int write_lvb = 0; @@ -634,7 +629,7 @@ static void gfs2_control_func(struct work_struct *work)  		return;  	} -	control_lvb_read(ls, &lvb_gen, lvb_bits); +	control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits);  	spin_lock(&ls->ls_recover_spin);  	if (block_gen != ls->ls_recover_block || @@ -664,10 +659,10 @@ static void gfs2_control_func(struct work_struct *work)  			ls->ls_recover_result[i] = 0; -			if (!test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET)) +			if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET))  				continue; -			__clear_bit_le(i, lvb_bits + JID_BITMAP_OFFSET); +			__clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET);  			write_lvb = 1;  		}  	} @@ -691,7 +686,7 @@ static void gfs2_control_func(struct work_struct *work)  				continue;  			if (ls->ls_recover_submit[i] < start_gen) {  				ls->ls_recover_submit[i] = 0; -				__set_bit_le(i, lvb_bits + JID_BITMAP_OFFSET); +				__set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET);  			}  		}  		/* even if there are no bits to set, we need to write the @@ -705,7 +700,7 @@ static void gfs2_control_func(struct work_struct *work)  	spin_unlock(&ls->ls_recover_spin);  	if (write_lvb) { -		control_lvb_write(ls, start_gen, lvb_bits); +		control_lvb_write(ls, start_gen, ls->ls_lvb_bits);  		flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK;  	} else {  		flags = DLM_LKF_CONVERT; @@ -725,7 +720,7 @@ static void gfs2_control_func(struct work_struct *work)  	 */  	for (i = 0; i < recover_size; i++) { -		if (test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET)) { +		if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) {  			fs_info(sdp, "recover generation %u jid %d\n",  				start_gen, i);  			gfs2_recover_set(sdp, i); @@ -758,7 +753,6 @@ static void gfs2_control_func(struct work_struct *work)  static int control_mount(struct gfs2_sbd *sdp)  {  	struct lm_lockstruct *ls = &sdp->sd_lockstruct; -	char lvb_bits[GDLM_LVB_SIZE];  	uint32_t start_gen, block_gen, mount_gen, lvb_gen;  	int mounted_mode;  	int retries = 0; @@ -857,7 +851,7 @@ locks_done:  	 * lvb_gen will be non-zero.  	 */ -	control_lvb_read(ls, &lvb_gen, lvb_bits); +	control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits);  	if (lvb_gen == 0xFFFFFFFF) {  		/* special value to force mount attempts to fail */ @@ -887,7 +881,7 @@ locks_done:  	 * and all lvb bits to be clear (no pending journal recoveries.)  	 */ -	if (!all_jid_bits_clear(lvb_bits)) { +	if (!all_jid_bits_clear(ls->ls_lvb_bits)) {  		/* journals need recovery, wait until all are clear */  		fs_info(sdp, "control_mount wait for journal recovery\n");  		goto restart; @@ -949,7 +943,6 @@ static int dlm_recovery_wait(void *word)  static int control_first_done(struct gfs2_sbd *sdp)  {  	struct lm_lockstruct *ls = &sdp->sd_lockstruct; -	char lvb_bits[GDLM_LVB_SIZE];  	uint32_t start_gen, block_gen;  	int error; @@ -991,8 +984,8 @@ restart:  	memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t));  	spin_unlock(&ls->ls_recover_spin); -	memset(lvb_bits, 0, sizeof(lvb_bits)); -	control_lvb_write(ls, start_gen, lvb_bits); +	memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE); +	control_lvb_write(ls, start_gen, ls->ls_lvb_bits);  	error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT);  	if (error) @@ -1022,6 +1015,12 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,  	uint32_t old_size, new_size;  	int i, max_jid; +	if (!ls->ls_lvb_bits) { +		ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); +		if (!ls->ls_lvb_bits) +			return -ENOMEM; +	} +  	max_jid = 0;  	for (i = 0; i < num_slots; i++) {  		if (max_jid < slots[i].slot - 1) @@ -1057,6 +1056,7 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,  static void free_recover_size(struct lm_lockstruct *ls)  { +	kfree(ls->ls_lvb_bits);  	kfree(ls->ls_recover_submit);  	kfree(ls->ls_recover_result);  	ls->ls_recover_submit = NULL; @@ -1205,6 +1205,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)  	ls->ls_recover_size = 0;  	ls->ls_recover_submit = NULL;  	ls->ls_recover_result = NULL; +	ls->ls_lvb_bits = NULL;  	error = set_recover_size(sdp, NULL, 0);  	if (error) diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index d1f51fd73f8..5a51265a434 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -576,7 +576,7 @@ int gfs2_rs_alloc(struct gfs2_inode *ip)  	RB_CLEAR_NODE(&ip->i_res->rs_node);  out:  	up_write(&ip->i_rw_mutex); -	return 0; +	return error;  }  static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs) @@ -1181,12 +1181,9 @@ int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,  			     const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)  {  	struct super_block *sb = sdp->sd_vfs; -	struct block_device *bdev = sb->s_bdev; -	const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize / -					   bdev_logical_block_size(sb->s_bdev);  	u64 blk;  	sector_t start = 0; -	sector_t nr_sects = 0; +	sector_t nr_blks = 0;  	int rv;  	unsigned int x;  	u32 trimmed = 0; @@ -1206,35 +1203,34 @@ int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,  		if (diff == 0)  			continue;  		blk = offset + ((bi->bi_start + x) * GFS2_NBBY); -		blk *= sects_per_blk; /* convert to sectors */  		while(diff) {  			if (diff & 1) { -				if (nr_sects == 0) +				if (nr_blks == 0)  					goto start_new_extent; -				if ((start + nr_sects) != blk) { -					if (nr_sects >= minlen) { -						rv = blkdev_issue_discard(bdev, -							start, nr_sects, +				if ((start + nr_blks) != blk) { +					if (nr_blks >= minlen) { +						rv = sb_issue_discard(sb, +							start, nr_blks,  							GFP_NOFS, 0);  						if (rv)  							goto fail; -						trimmed += nr_sects; +						trimmed += nr_blks;  					} -					nr_sects = 0; +					nr_blks = 0;  start_new_extent:  					start = blk;  				} -				nr_sects += sects_per_blk; +				nr_blks++;  			}  			diff >>= 2; -			blk += sects_per_blk; +			blk++;  		}  	} -	if (nr_sects >= minlen) { -		rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0); +	if (nr_blks >= minlen) { +		rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0);  		if (rv)  			goto fail; -		trimmed += nr_sects; +		trimmed += nr_blks;  	}  	if (ptrimmed)  		*ptrimmed = trimmed; diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c index a94f0f779d5..fe0a76213d9 100644 --- a/fs/hfsplus/extents.c +++ b/fs/hfsplus/extents.c @@ -533,7 +533,7 @@ void hfsplus_file_truncate(struct inode *inode)  		struct address_space *mapping = inode->i_mapping;  		struct page *page;  		void *fsdata; -		u32 size = inode->i_size; +		loff_t size = inode->i_size;  		res = pagecache_write_begin(NULL, mapping, size, 0,  						AOP_FLAG_UNINTERRUPTIBLE, diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 84e3d856e91..523464e6284 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -110,7 +110,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)  	 * way when do_mmap_pgoff unwinds (may be important on powerpc  	 * and ia64).  	 */ -	vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND | VM_DONTDUMP; +	vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;  	vma->vm_ops = &hugetlb_vm_ops;  	if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) diff --git a/fs/inode.c b/fs/inode.c index f5f7c06c36f..a898b3d43cc 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -725,7 +725,7 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan)  		 * inode to the back of the list so we don't spin on it.  		 */  		if (!spin_trylock(&inode->i_lock)) { -			list_move_tail(&inode->i_lru, &sb->s_inode_lru); +			list_move(&inode->i_lru, &sb->s_inode_lru);  			continue;  		} diff --git a/fs/namespace.c b/fs/namespace.c index d581e45c0a9..341d3f56408 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1690,7 +1690,7 @@ static int do_loopback(struct path *path, const char *old_name,  	if (IS_ERR(mnt)) {  		err = PTR_ERR(mnt); -		goto out; +		goto out2;  	}  	err = graft_tree(mnt, path); diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index ac4fc9a8fdb..66b6664dcd4 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -300,7 +300,7 @@ int nfs40_walk_client_list(struct nfs_client *new,  			   struct rpc_cred *cred)  {  	struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); -	struct nfs_client *pos, *n, *prev = NULL; +	struct nfs_client *pos, *prev = NULL;  	struct nfs4_setclientid_res clid = {  		.clientid	= new->cl_clientid,  		.confirm	= new->cl_confirm, @@ -308,10 +308,23 @@ int nfs40_walk_client_list(struct nfs_client *new,  	int status = -NFS4ERR_STALE_CLIENTID;  	spin_lock(&nn->nfs_client_lock); -	list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { +	list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {  		/* If "pos" isn't marked ready, we can't trust the  		 * remaining fields in "pos" */ -		if (pos->cl_cons_state < NFS_CS_READY) +		if (pos->cl_cons_state > NFS_CS_READY) { +			atomic_inc(&pos->cl_count); +			spin_unlock(&nn->nfs_client_lock); + +			if (prev) +				nfs_put_client(prev); +			prev = pos; + +			status = nfs_wait_client_init_complete(pos); +			spin_lock(&nn->nfs_client_lock); +			if (status < 0) +				continue; +		} +		if (pos->cl_cons_state != NFS_CS_READY)  			continue;  		if (pos->rpc_ops != new->rpc_ops) @@ -423,16 +436,16 @@ int nfs41_walk_client_list(struct nfs_client *new,  			   struct rpc_cred *cred)  {  	struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); -	struct nfs_client *pos, *n, *prev = NULL; +	struct nfs_client *pos, *prev = NULL;  	int status = -NFS4ERR_STALE_CLIENTID;  	spin_lock(&nn->nfs_client_lock); -	list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { +	list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {  		/* If "pos" isn't marked ready, we can't trust the  		 * remaining fields in "pos", especially the client  		 * ID and serverowner fields.  Wait for CREATE_SESSION  		 * to finish. */ -		if (pos->cl_cons_state < NFS_CS_READY) { +		if (pos->cl_cons_state > NFS_CS_READY) {  			atomic_inc(&pos->cl_count);  			spin_unlock(&nn->nfs_client_lock); @@ -440,18 +453,17 @@ int nfs41_walk_client_list(struct nfs_client *new,  				nfs_put_client(prev);  			prev = pos; -			nfs4_schedule_lease_recovery(pos);  			status = nfs_wait_client_init_complete(pos); -			if (status < 0) { -				nfs_put_client(pos); -				spin_lock(&nn->nfs_client_lock); -				continue; +			if (status == 0) { +				nfs4_schedule_lease_recovery(pos); +				status = nfs4_wait_clnt_recover(pos);  			} -			status = pos->cl_cons_state;  			spin_lock(&nn->nfs_client_lock);  			if (status < 0)  				continue;  		} +		if (pos->cl_cons_state != NFS_CS_READY) +			continue;  		if (pos->rpc_ops != new->rpc_ops)  			continue; @@ -469,17 +481,18 @@ int nfs41_walk_client_list(struct nfs_client *new,  			continue;  		atomic_inc(&pos->cl_count); -		spin_unlock(&nn->nfs_client_lock); +		*result = pos; +		status = 0;  		dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n",  			__func__, pos, atomic_read(&pos->cl_count)); - -		*result = pos; -		return 0; +		break;  	}  	/* No matching nfs_client found. */  	spin_unlock(&nn->nfs_client_lock);  	dprintk("NFS: <-- %s status = %d\n", __func__, status); +	if (prev) +		nfs_put_client(prev);  	return status;  }  #endif	/* CONFIG_NFS_V4_1 */ diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 26431cf62dd..0ad025eb523 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1046,6 +1046,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)  		/* Save the delegation */  		nfs4_stateid_copy(&stateid, &delegation->stateid);  		rcu_read_unlock(); +		nfs_release_seqid(opendata->o_arg.seqid);  		ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);  		if (ret != 0)  			goto out; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 6ace365c633..d41a3518509 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1886,7 +1886,13 @@ again:  			status = PTR_ERR(clnt);  			break;  		} -		clp->cl_rpcclient = clnt; +		/* Note: this is safe because we haven't yet marked the +		 * client as ready, so we are the only user of +		 * clp->cl_rpcclient +		 */ +		clnt = xchg(&clp->cl_rpcclient, clnt); +		rpc_shutdown_client(clnt); +		clnt = clp->cl_rpcclient;  		goto again;  	case -NFS4ERR_MINOR_VERS_MISMATCH: diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 01168865dd3..a2720071f28 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -264,7 +264,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,  		iattr->ia_valid |= ATTR_SIZE;  	}  	if (bmval[0] & FATTR4_WORD0_ACL) { -		int nace; +		u32 nace;  		struct nfs4_ace *ace;  		READ_BUF(4); len += 4; diff --git a/fs/proc/array.c b/fs/proc/array.c index f7ed9ee46eb..cbd0f1b324b 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -143,6 +143,7 @@ static const char * const task_state_array[] = {  	"x (dead)",		/*  64 */  	"K (wakekill)",		/* 128 */  	"W (waking)",		/* 256 */ +	"P (parked)",		/* 512 */  };  static inline const char *get_task_state(struct task_struct *tsk) diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 4b3b3ffb52f..21e1a8f1659 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -755,37 +755,8 @@ void pde_put(struct proc_dir_entry *pde)  		free_proc_entry(pde);  } -/* - * Remove a /proc entry and free it if it's not currently in use. - */ -void remove_proc_entry(const char *name, struct proc_dir_entry *parent) +static void entry_rundown(struct proc_dir_entry *de)  { -	struct proc_dir_entry **p; -	struct proc_dir_entry *de = NULL; -	const char *fn = name; -	unsigned int len; - -	spin_lock(&proc_subdir_lock); -	if (__xlate_proc_name(name, &parent, &fn) != 0) { -		spin_unlock(&proc_subdir_lock); -		return; -	} -	len = strlen(fn); - -	for (p = &parent->subdir; *p; p=&(*p)->next ) { -		if (proc_match(len, fn, *p)) { -			de = *p; -			*p = de->next; -			de->next = NULL; -			break; -		} -	} -	spin_unlock(&proc_subdir_lock); -	if (!de) { -		WARN(1, "name '%s'\n", name); -		return; -	} -  	spin_lock(&de->pde_unload_lock);  	/*  	 * Stop accepting new callers into module. If you're @@ -817,6 +788,40 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)  		spin_lock(&de->pde_unload_lock);  	}  	spin_unlock(&de->pde_unload_lock); +} + +/* + * Remove a /proc entry and free it if it's not currently in use. + */ +void remove_proc_entry(const char *name, struct proc_dir_entry *parent) +{ +	struct proc_dir_entry **p; +	struct proc_dir_entry *de = NULL; +	const char *fn = name; +	unsigned int len; + +	spin_lock(&proc_subdir_lock); +	if (__xlate_proc_name(name, &parent, &fn) != 0) { +		spin_unlock(&proc_subdir_lock); +		return; +	} +	len = strlen(fn); + +	for (p = &parent->subdir; *p; p=&(*p)->next ) { +		if (proc_match(len, fn, *p)) { +			de = *p; +			*p = de->next; +			de->next = NULL; +			break; +		} +	} +	spin_unlock(&proc_subdir_lock); +	if (!de) { +		WARN(1, "name '%s'\n", name); +		return; +	} + +	entry_rundown(de);  	if (S_ISDIR(de->mode))  		parent->nlink--; @@ -827,3 +832,57 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)  	pde_put(de);  }  EXPORT_SYMBOL(remove_proc_entry); + +int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) +{ +	struct proc_dir_entry **p; +	struct proc_dir_entry *root = NULL, *de, *next; +	const char *fn = name; +	unsigned int len; + +	spin_lock(&proc_subdir_lock); +	if (__xlate_proc_name(name, &parent, &fn) != 0) { +		spin_unlock(&proc_subdir_lock); +		return -ENOENT; +	} +	len = strlen(fn); + +	for (p = &parent->subdir; *p; p=&(*p)->next ) { +		if (proc_match(len, fn, *p)) { +			root = *p; +			*p = root->next; +			root->next = NULL; +			break; +		} +	} +	if (!root) { +		spin_unlock(&proc_subdir_lock); +		return -ENOENT; +	} +	de = root; +	while (1) { +		next = de->subdir; +		if (next) { +			de->subdir = next->next; +			next->next = NULL; +			de = next; +			continue; +		} +		spin_unlock(&proc_subdir_lock); + +		entry_rundown(de); +		next = de->parent; +		if (S_ISDIR(de->mode)) +			next->nlink--; +		de->nlink = 0; +		if (de == root) +			break; +		pde_put(de); + +		spin_lock(&proc_subdir_lock); +		de = next; +	} +	pde_put(root); +	return 0; +} +EXPORT_SYMBOL(remove_proc_subtree); diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index c196369fe40..4cce1d9552f 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -187,8 +187,8 @@ fill_with_dentries(void *buf, const char *name, int namelen, loff_t offset,  	if (dbuf->count == ARRAY_SIZE(dbuf->dentries))  		return -ENOSPC; -	if (name[0] == '.' && (name[1] == '\0' || -			       (name[1] == '.' && name[2] == '\0'))) +	if (name[0] == '.' && (namelen < 2 || +			       (namelen == 2 && name[1] == '.')))  		return 0;  	dentry = lookup_one_len(name, dbuf->xadir, namelen); diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index ac838b84493..f21acf0ef01 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1568,6 +1568,12 @@ static int ubifs_remount_rw(struct ubifs_info *c)  	c->remounting_rw = 1;  	c->ro_mount = 0; +	if (c->space_fixup) { +		err = ubifs_fixup_free_space(c); +		if (err) +			return err; +	} +  	err = check_free_space(c);  	if (err)  		goto out; @@ -1684,12 +1690,6 @@ static int ubifs_remount_rw(struct ubifs_info *c)  		err = dbg_check_space_info(c);  	} -	if (c->space_fixup) { -		err = ubifs_fixup_free_space(c); -		if (err) -			goto out; -	} -  	mutex_unlock(&c->umount_mutex);  	return err; diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index bfd87685fc1..a59ff51b016 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -7,6 +7,16 @@  #include <linux/mm_types.h>  #include <linux/bug.h> +/* + * On almost all architectures and configurations, 0 can be used as the + * upper ceiling to free_pgtables(): on many architectures it has the same + * effect as using TASK_SIZE.  However, there is one configuration which + * must impose a more careful limit, to avoid freeing kernel pgtables. + */ +#ifndef USER_PGTABLES_CEILING +#define USER_PGTABLES_CEILING	0UL +#endif +  #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS  extern int ptep_set_access_flags(struct vm_area_struct *vma,  				 unsigned long address, pte_t *ptep, diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 25f01d0bc14..b1b1fa6ffff 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -99,7 +99,12 @@ struct mmu_gather {  	unsigned int		need_flush : 1,	/* Did free PTEs */  				fast_mode  : 1; /* No batching   */ -	unsigned int		fullmm; +	/* we are in the middle of an operation to clear +	 * a full mm and can make some optimizations */ +	unsigned int		fullmm : 1, +	/* we have performed an operation which +	 * requires a complete flush of the tlb */ +				need_flush_all : 1;  	struct mmu_gather_batch *active;  	struct mmu_gather_batch	local; diff --git a/include/linux/ata.h b/include/linux/ata.h index 8f7a3d68371..ee0bd952405 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -954,7 +954,7 @@ static inline int atapi_cdb_len(const u16 *dev_id)  	}  } -static inline bool atapi_command_packet_set(const u16 *dev_id) +static inline int atapi_command_packet_set(const u16 *dev_id)  {  	return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f;  } diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 0ea61e07a91..7c2e030e72f 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -12,7 +12,6 @@  struct blk_trace {  	int trace_state; -	bool rq_based;  	struct rchan *rchan;  	unsigned long __percpu *sequence;  	unsigned char __percpu *msg_data; diff --git a/include/linux/capability.h b/include/linux/capability.h index 98503b79236..d9a4f7f40f3 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -35,6 +35,7 @@ struct cpu_vfs_cap_data {  #define _KERNEL_CAP_T_SIZE     (sizeof(kernel_cap_t)) +struct file;  struct inode;  struct dentry;  struct user_namespace; @@ -211,6 +212,7 @@ extern bool capable(int cap);  extern bool ns_capable(struct user_namespace *ns, int cap);  extern bool nsown_capable(int cap);  extern bool inode_capable(const struct inode *inode, int cap); +extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);  /* audit system wants to get cap info from files as well */  extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); diff --git a/include/linux/compat.h b/include/linux/compat.h index 76a87fb57ac..377cd8c3395 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -141,11 +141,11 @@ typedef struct {  } compat_sigset_t;  struct compat_sigaction { -#ifndef __ARCH_HAS_ODD_SIGACTION +#ifndef __ARCH_HAS_IRIX_SIGACTION  	compat_uptr_t			sa_handler;  	compat_ulong_t			sa_flags;  #else -	compat_ulong_t			sa_flags; +	compat_uint_t			sa_flags;  	compat_uptr_t			sa_handler;  #endif  #ifdef __ARCH_HAS_SA_RESTORER diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index e83ef39b3be..fe8c4476f7e 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -213,7 +213,7 @@ struct devfreq_simple_ondemand_data {  #endif  #else /* !CONFIG_PM_DEVFREQ */ -static struct devfreq *devfreq_add_device(struct device *dev, +static inline struct devfreq *devfreq_add_device(struct device *dev,  					  struct devfreq_dev_profile *profile,  					  const char *governor_name,  					  void *data) @@ -221,34 +221,34 @@ static struct devfreq *devfreq_add_device(struct device *dev,  	return NULL;  } -static int devfreq_remove_device(struct devfreq *devfreq) +static inline int devfreq_remove_device(struct devfreq *devfreq)  {  	return 0;  } -static int devfreq_suspend_device(struct devfreq *devfreq) +static inline int devfreq_suspend_device(struct devfreq *devfreq)  {  	return 0;  } -static int devfreq_resume_device(struct devfreq *devfreq) +static inline int devfreq_resume_device(struct devfreq *devfreq)  {  	return 0;  } -static struct opp *devfreq_recommended_opp(struct device *dev, +static inline struct opp *devfreq_recommended_opp(struct device *dev,  					   unsigned long *freq, u32 flags)  { -	return -EINVAL; +	return ERR_PTR(-EINVAL);  } -static int devfreq_register_opp_notifier(struct device *dev, +static inline int devfreq_register_opp_notifier(struct device *dev,  					 struct devfreq *devfreq)  {  	return -EINVAL;  } -static int devfreq_unregister_opp_notifier(struct device *dev, +static inline int devfreq_unregister_opp_notifier(struct device *dev,  					   struct devfreq *devfreq)  {  	return -EINVAL; diff --git a/include/linux/efi.h b/include/linux/efi.h index 9bf2f1fcae2..3d7df3d32c6 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -333,6 +333,7 @@ typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules,  					      unsigned long count,  					      u64 *max_size,  					      int *reset_type); +typedef efi_status_t efi_query_variable_store_t(u32 attributes, unsigned long size);  /*   *  EFI Configuration Table and GUID definitions @@ -575,9 +576,15 @@ extern void efi_enter_virtual_mode (void);	/* switch EFI to virtual mode, if pos  #ifdef CONFIG_X86  extern void efi_late_init(void);  extern void efi_free_boot_services(void); +extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size);  #else  static inline void efi_late_init(void) {}  static inline void efi_free_boot_services(void) {} + +static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) +{ +	return EFI_SUCCESS; +}  #endif  extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);  extern u64 efi_get_iobase (void); @@ -731,7 +738,7 @@ struct efivar_operations {  	efi_get_variable_t *get_variable;  	efi_get_next_variable_t *get_next_variable;  	efi_set_variable_t *set_variable; -	efi_query_variable_info_t *query_variable_info; +	efi_query_variable_store_t *query_variable_store;  };  struct efivars { diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index e5ca8ef50e9..52da2a25079 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -89,6 +89,7 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,   *            that the call back has its own recursion protection. If it does   *            not set this, then the ftrace infrastructure will add recursion   *            protection for the caller. + * STUB   - The ftrace_ops is just a place holder.   */  enum {  	FTRACE_OPS_FL_ENABLED			= 1 << 0, @@ -98,6 +99,7 @@ enum {  	FTRACE_OPS_FL_SAVE_REGS			= 1 << 4,  	FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED	= 1 << 5,  	FTRACE_OPS_FL_RECURSION_SAFE		= 1 << 6, +	FTRACE_OPS_FL_STUB			= 1 << 7,  };  struct ftrace_ops { @@ -394,7 +396,6 @@ ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,  			    size_t cnt, loff_t *ppos);  ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,  			     size_t cnt, loff_t *ppos); -loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int whence);  int ftrace_regex_release(struct inode *inode, struct file *file);  void __init @@ -567,6 +568,8 @@ static inline int  ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }  #endif /* CONFIG_DYNAMIC_FTRACE */ +loff_t ftrace_filter_lseek(struct file *file, loff_t offset, int whence); +  /* totally disable ftrace - can not re-enable after this */  void ftrace_kill(void); diff --git a/include/linux/kexec.h b/include/linux/kexec.h index d2e6927bbaa..d78d28a733b 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -200,6 +200,8 @@ extern size_t vmcoreinfo_max_size;  int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,  		unsigned long long *crash_size, unsigned long long *crash_base); +int parse_crashkernel_high(char *cmdline, unsigned long long system_ram, +		unsigned long long *crash_size, unsigned long long *crash_base);  int parse_crashkernel_low(char *cmdline, unsigned long long system_ram,  		unsigned long long *crash_size, unsigned long long *crash_base);  int crash_shrink_memory(unsigned long new_size); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index cad77fe09d7..c1395825192 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -518,7 +518,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,  int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,  			   void *data, unsigned long len);  int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, -			      gpa_t gpa); +			      gpa_t gpa, unsigned long len);  int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);  int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);  struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index fa7cc7244cb..b0bcce0ddc9 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -71,6 +71,7 @@ struct gfn_to_hva_cache {  	u64 generation;  	gpa_t gpa;  	unsigned long hva; +	unsigned long len;  	struct kvm_memory_slot *memslot;  }; diff --git a/include/linux/libata.h b/include/linux/libata.h index 91c9d109e5f..eae7a053dc5 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -398,6 +398,7 @@ enum {  	ATA_HORKAGE_NOSETXFER	= (1 << 14),	/* skip SETXFER, SATA only */  	ATA_HORKAGE_BROKEN_FPDMA_AA	= (1 << 15),	/* skip AA */  	ATA_HORKAGE_DUMP_ID	= (1 << 16),	/* dump IDENTIFY data */ +	ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17),	/* Set max sects to 65535 */  	 /* DMA mask for user DMA control: User visible values; DO NOT  	    renumber */ diff --git a/include/linux/mm.h b/include/linux/mm.h index e19ff30ad0a..e2091b88d24 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1611,6 +1611,8 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,  			unsigned long pfn);  int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,  			unsigned long pfn); +int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); +  struct page *follow_page_mask(struct vm_area_struct *vma,  			      unsigned long address, unsigned int foll_flags, diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b3d00fa4b31..6151e903eef 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -210,9 +210,9 @@ struct netdev_hw_addr {  #define NETDEV_HW_ADDR_T_SLAVE		3  #define NETDEV_HW_ADDR_T_UNICAST	4  #define NETDEV_HW_ADDR_T_MULTICAST	5 -	bool			synced;  	bool			global_use;  	int			refcount; +	int			synced;  	struct rcu_head		rcu_head;  }; @@ -895,7 +895,7 @@ struct netdev_fcoe_hbainfo {   *   * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)   * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, - *			     struct net_device *dev) + *			     struct net_device *dev, u32 filter_mask)   *   * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);   *	Called to change device carrier. Soft-devices (like dummy, team, etc) diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h index 01d25e6fc79..0214c4c146f 100644 --- a/include/linux/netfilter/ipset/ip_set_ahash.h +++ b/include/linux/netfilter/ipset/ip_set_ahash.h @@ -291,6 +291,7 @@ ip_set_hash_destroy(struct ip_set *set)  #define type_pf_data_tlist	TOKEN(TYPE, PF, _data_tlist)  #define type_pf_data_next	TOKEN(TYPE, PF, _data_next)  #define type_pf_data_flags	TOKEN(TYPE, PF, _data_flags) +#define type_pf_data_reset_flags TOKEN(TYPE, PF, _data_reset_flags)  #ifdef IP_SET_HASH_WITH_NETS  #define type_pf_data_match	TOKEN(TYPE, PF, _data_match)  #else @@ -385,9 +386,9 @@ type_pf_resize(struct ip_set *set, bool retried)  	struct ip_set_hash *h = set->data;  	struct htable *t, *orig = h->table;  	u8 htable_bits = orig->htable_bits; -	const struct type_pf_elem *data; +	struct type_pf_elem *data;  	struct hbucket *n, *m; -	u32 i, j; +	u32 i, j, flags = 0;  	int ret;  retry: @@ -412,9 +413,16 @@ retry:  		n = hbucket(orig, i);  		for (j = 0; j < n->pos; j++) {  			data = ahash_data(n, j); +#ifdef IP_SET_HASH_WITH_NETS +			flags = 0; +			type_pf_data_reset_flags(data, &flags); +#endif  			m = hbucket(t, HKEY(data, h->initval, htable_bits)); -			ret = type_pf_elem_add(m, data, AHASH_MAX(h), 0); +			ret = type_pf_elem_add(m, data, AHASH_MAX(h), flags);  			if (ret < 0) { +#ifdef IP_SET_HASH_WITH_NETS +				type_pf_data_flags(data, flags); +#endif  				read_unlock_bh(&set->lock);  				ahash_destroy(t);  				if (ret == -EAGAIN) @@ -836,9 +844,9 @@ type_pf_tresize(struct ip_set *set, bool retried)  	struct ip_set_hash *h = set->data;  	struct htable *t, *orig = h->table;  	u8 htable_bits = orig->htable_bits; -	const struct type_pf_elem *data; +	struct type_pf_elem *data;  	struct hbucket *n, *m; -	u32 i, j; +	u32 i, j, flags = 0;  	int ret;  	/* Try to cleanup once */ @@ -873,10 +881,17 @@ retry:  		n = hbucket(orig, i);  		for (j = 0; j < n->pos; j++) {  			data = ahash_tdata(n, j); +#ifdef IP_SET_HASH_WITH_NETS +			flags = 0; +			type_pf_data_reset_flags(data, &flags); +#endif  			m = hbucket(t, HKEY(data, h->initval, htable_bits)); -			ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), 0, -						ip_set_timeout_get(type_pf_data_timeout(data))); +			ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), flags, +				ip_set_timeout_get(type_pf_data_timeout(data)));  			if (ret < 0) { +#ifdef IP_SET_HASH_WITH_NETS +				type_pf_data_flags(data, flags); +#endif  				read_unlock_bh(&set->lock);  				ahash_destroy(t);  				if (ret == -EAGAIN) @@ -1187,6 +1202,7 @@ type_pf_gc_init(struct ip_set *set)  #undef type_pf_data_tlist  #undef type_pf_data_next  #undef type_pf_data_flags +#undef type_pf_data_reset_flags  #undef type_pf_data_match  #undef type_pf_elem diff --git a/include/linux/pci.h b/include/linux/pci.h index 2461033a798..710067f3618 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -916,6 +916,7 @@ void pci_disable_rom(struct pci_dev *pdev);  void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);  void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);  size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size); +void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);  /* Power management related routines */  int pci_save_state(struct pci_dev *dev); diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 5a710b9c578..87a03c746f1 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -93,14 +93,20 @@ do { \  #else /* !CONFIG_PREEMPT_COUNT */ -#define preempt_disable()		do { } while (0) -#define sched_preempt_enable_no_resched()	do { } while (0) -#define preempt_enable_no_resched()	do { } while (0) -#define preempt_enable()		do { } while (0) +/* + * Even if we don't have any preemption, we need preempt disable/enable + * to be barriers, so that we don't have things like get_user/put_user + * that can cause faults and scheduling migrate into our preempt-protected + * region. + */ +#define preempt_disable()		barrier() +#define sched_preempt_enable_no_resched()	barrier() +#define preempt_enable_no_resched()	barrier() +#define preempt_enable()		barrier() -#define preempt_disable_notrace()		do { } while (0) -#define preempt_enable_no_resched_notrace()	do { } while (0) -#define preempt_enable_notrace()		do { } while (0) +#define preempt_disable_notrace()		barrier() +#define preempt_enable_no_resched_notrace()	barrier() +#define preempt_enable_notrace()		barrier()  #endif /* CONFIG_PREEMPT_COUNT */ diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 8307f2f94d8..94dfb2aa553 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -117,6 +117,7 @@ struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,  				const struct file_operations *proc_fops,  				void *data);  extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent); +extern int remove_proc_subtree(const char *name, struct proc_dir_entry *parent);  struct pid_namespace; @@ -202,6 +203,7 @@ static inline struct proc_dir_entry *proc_create_data(const char *name,  	return NULL;  }  #define remove_proc_entry(name, parent) do {} while (0) +#define remove_proc_subtree(name, parent) do {} while (0)  static inline struct proc_dir_entry *proc_symlink(const char *name,  		struct proc_dir_entry *parent,const char *dest) {return NULL;} diff --git a/include/linux/sched.h b/include/linux/sched.h index d35d2b6ddbf..e692a022527 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -163,9 +163,10 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)  #define TASK_DEAD		64  #define TASK_WAKEKILL		128  #define TASK_WAKING		256 -#define TASK_STATE_MAX		512 +#define TASK_PARKED		512 +#define TASK_STATE_MAX		1024 -#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW" +#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"  extern char ___assert_task_state[1 - 2*!!(  		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; diff --git a/include/linux/security.h b/include/linux/security.h index eee7478cda7..032c366ef1c 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1012,6 +1012,10 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)   *	This hook can be used by the module to update any security state   *	associated with the TUN device's security structure.   *	@security pointer to the TUN devices's security structure. + * @skb_owned_by: + *	This hook sets the packet's owning sock. + *	@skb is the packet. + *	@sk the sock which owns the packet.   *   * Security hooks for XFRM operations.   * @@ -1638,6 +1642,7 @@ struct security_operations {  	int (*tun_dev_attach_queue) (void *security);  	int (*tun_dev_attach) (struct sock *sk, void *security);  	int (*tun_dev_open) (void *security); +	void (*skb_owned_by) (struct sk_buff *skb, struct sock *sk);  #endif	/* CONFIG_SECURITY_NETWORK */  #ifdef CONFIG_SECURITY_NETWORK_XFRM @@ -2588,6 +2593,8 @@ int security_tun_dev_attach_queue(void *security);  int security_tun_dev_attach(struct sock *sk, void *security);  int security_tun_dev_open(void *security); +void security_skb_owned_by(struct sk_buff *skb, struct sock *sk); +  #else	/* CONFIG_SECURITY_NETWORK */  static inline int security_unix_stream_connect(struct sock *sock,  					       struct sock *other, @@ -2779,6 +2786,11 @@ static inline int security_tun_dev_open(void *security)  {  	return 0;  } + +static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk) +{ +} +  #endif	/* CONFIG_SECURITY_NETWORK */  #ifdef CONFIG_SECURITY_NETWORK_XFRM diff --git a/include/linux/signal.h b/include/linux/signal.h index a2dcb94ea49..9475c5cb28b 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -250,11 +250,11 @@ extern int show_unhandled_signals;  extern int sigsuspend(sigset_t *);  struct sigaction { -#ifndef __ARCH_HAS_ODD_SIGACTION +#ifndef __ARCH_HAS_IRIX_SIGACTION  	__sighandler_t	sa_handler;  	unsigned long	sa_flags;  #else -	unsigned long	sa_flags; +	unsigned int	sa_flags;  	__sighandler_t	sa_handler;  #endif  #ifdef __ARCH_HAS_SA_RESTORER diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 441f5bfdab8..b8292d8cc9f 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2643,6 +2643,13 @@ static inline void nf_reset(struct sk_buff *skb)  #endif  } +static inline void nf_reset_trace(struct sk_buff *skb) +{ +#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) +	skb->nf_trace = 0; +#endif +} +  /* Note: This doesn't put any conntrack and bridge info in dst. */  static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)  { diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index a26e2fb604e..e2369c167db 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -16,7 +16,10 @@   * In the debug case, 1 means unlocked, 0 means locked. (the values   * are inverted, to catch initialization bugs)   * - * No atomicity anywhere, we are on UP. + * No atomicity anywhere, we are on UP. However, we still need + * the compiler barriers, because we do not want the compiler to + * move potentially faulting instructions (notably user accesses) + * into the locked sequence, resulting in non-atomic execution.   */  #ifdef CONFIG_DEBUG_SPINLOCK @@ -25,6 +28,7 @@  static inline void arch_spin_lock(arch_spinlock_t *lock)  {  	lock->slock = 0; +	barrier();  }  static inline void @@ -32,6 +36,7 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)  {  	local_irq_save(flags);  	lock->slock = 0; +	barrier();  }  static inline int arch_spin_trylock(arch_spinlock_t *lock) @@ -39,32 +44,34 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)  	char oldval = lock->slock;  	lock->slock = 0; +	barrier();  	return oldval > 0;  }  static inline void arch_spin_unlock(arch_spinlock_t *lock)  { +	barrier();  	lock->slock = 1;  }  /*   * Read-write spinlocks. No debug version.   */ -#define arch_read_lock(lock)		do { (void)(lock); } while (0) -#define arch_write_lock(lock)		do { (void)(lock); } while (0) -#define arch_read_trylock(lock)	({ (void)(lock); 1; }) -#define arch_write_trylock(lock)	({ (void)(lock); 1; }) -#define arch_read_unlock(lock)		do { (void)(lock); } while (0) -#define arch_write_unlock(lock)	do { (void)(lock); } while (0) +#define arch_read_lock(lock)		do { barrier(); (void)(lock); } while (0) +#define arch_write_lock(lock)		do { barrier(); (void)(lock); } while (0) +#define arch_read_trylock(lock)	({ barrier(); (void)(lock); 1; }) +#define arch_write_trylock(lock)	({ barrier(); (void)(lock); 1; }) +#define arch_read_unlock(lock)		do { barrier(); (void)(lock); } while (0) +#define arch_write_unlock(lock)	do { barrier(); (void)(lock); } while (0)  #else /* DEBUG_SPINLOCK */  #define arch_spin_is_locked(lock)	((void)(lock), 0)  /* for sched.c and kernel_lock.c: */ -# define arch_spin_lock(lock)		do { (void)(lock); } while (0) -# define arch_spin_lock_flags(lock, flags)	do { (void)(lock); } while (0) -# define arch_spin_unlock(lock)	do { (void)(lock); } while (0) -# define arch_spin_trylock(lock)	({ (void)(lock); 1; }) +# define arch_spin_lock(lock)		do { barrier(); (void)(lock); } while (0) +# define arch_spin_lock_flags(lock, flags)	do { barrier(); (void)(lock); } while (0) +# define arch_spin_unlock(lock)	do { barrier(); (void)(lock); } while (0) +# define arch_spin_trylock(lock)	({ barrier(); (void)(lock); 1; })  #endif /* DEBUG_SPINLOCK */  #define arch_spin_is_contended(lock)	(((void)(lock), 0)) diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h index 9e492be5244..6fcfe99bd99 100644 --- a/include/linux/ssb/ssb_driver_chipcommon.h +++ b/include/linux/ssb/ssb_driver_chipcommon.h @@ -219,6 +219,7 @@  #define SSB_CHIPCO_PMU_CTL			0x0600 /* PMU control */  #define  SSB_CHIPCO_PMU_CTL_ILP_DIV		0xFFFF0000 /* ILP div mask */  #define  SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT	16 +#define  SSB_CHIPCO_PMU_CTL_PLL_UPD		0x00000400  #define  SSB_CHIPCO_PMU_CTL_NOILPONW		0x00000200 /* No ILP on wait */  #define  SSB_CHIPCO_PMU_CTL_HTREQEN		0x00000100 /* HT req enable */  #define  SSB_CHIPCO_PMU_CTL_ALPREQEN		0x00000080 /* ALP req enable */ @@ -667,5 +668,6 @@ enum ssb_pmu_ldo_volt_id {  void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc,  			     enum ssb_pmu_ldo_volt_id id, u32 voltage);  void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on); +void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid);  #endif /* LINUX_SSB_CHIPCO_H_ */ diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 2de42f9401d..a5ffd32642f 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -25,6 +25,7 @@ extern int swiotlb_force;  extern void swiotlb_init(int verbose);  int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);  extern unsigned long swiotlb_nr_tbl(void); +unsigned long swiotlb_size_or_default(void);  extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);  /* diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h new file mode 100644 index 00000000000..cbb20afdbc0 --- /dev/null +++ b/include/linux/ucs2_string.h @@ -0,0 +1,14 @@ +#ifndef _LINUX_UCS2_STRING_H_ +#define _LINUX_UCS2_STRING_H_ + +#include <linux/types.h>	/* for size_t */ +#include <linux/stddef.h>	/* for NULL */ + +typedef u16 ucs2_char_t; + +unsigned long ucs2_strnlen(const ucs2_char_t *s, size_t maxlength); +unsigned long ucs2_strlen(const ucs2_char_t *s); +unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength); +int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len); + +#endif /* _LINUX_UCS2_STRING_H_ */ diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 40be2a0d8ae..84a6440f1f1 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -199,6 +199,7 @@ extern bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,  /* Device notifier */  extern int register_inet6addr_notifier(struct notifier_block *nb);  extern int unregister_inet6addr_notifier(struct notifier_block *nb); +extern int inet6addr_notifier_call_chain(unsigned long val, void *v);  extern void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,  					 struct ipv6_devconf *devconf); diff --git a/include/net/irda/irlmp.h b/include/net/irda/irlmp.h index f74109144d3..f132924cc9d 100644 --- a/include/net/irda/irlmp.h +++ b/include/net/irda/irlmp.h @@ -256,7 +256,8 @@ static inline __u32 irlmp_get_daddr(const struct lsap_cb *self)  	return (self && self->lap) ? self->lap->daddr : 0;  } -extern const char *irlmp_reasons[]; +const char *irlmp_reason_str(LM_REASON reason); +  extern int sysctl_discovery_timeout;  extern int sysctl_discovery_slots;  extern int sysctl_discovery; diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h index cc7c1973238..714cc9a54a4 100644 --- a/include/net/iucv/af_iucv.h +++ b/include/net/iucv/af_iucv.h @@ -130,6 +130,14 @@ struct iucv_sock {  					       enum iucv_tx_notify n);  }; +struct iucv_skb_cb { +	u32	class;		/* target class of message */ +	u32	tag;		/* tag associated with message */ +	u32	offset;		/* offset for skb receival */ +}; + +#define IUCV_SKB_CB(__skb)	((struct iucv_skb_cb *)&((__skb)->cb[0])) +  /* iucv socket options (SOL_IUCV) */  #define SO_IPRMDATA_MSG	0x0080		/* send/recv IPRM_DATA msgs */  #define SO_MSGLIMIT	0x1000		/* get/set IUCV MSGLIMIT */ diff --git a/include/net/scm.h b/include/net/scm.h index 975cca01048..b1170810568 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -56,8 +56,8 @@ static __inline__ void scm_set_cred(struct scm_cookie *scm,  	scm->pid  = get_pid(pid);  	scm->cred = cred ? get_cred(cred) : NULL;  	scm->creds.pid = pid_vnr(pid); -	scm->creds.uid = cred ? cred->euid : INVALID_UID; -	scm->creds.gid = cred ? cred->egid : INVALID_GID; +	scm->creds.uid = cred ? cred->uid : INVALID_UID; +	scm->creds.gid = cred ? cred->gid : INVALID_GID;  }  static __inline__ void scm_destroy_cred(struct scm_cookie *scm) diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 399162b50a8..e1379b4e8fa 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -1074,7 +1074,8 @@ void fc_rport_terminate_io(struct fc_rport *);  /*   * DISCOVERY LAYER   *****************************/ -int fc_disc_init(struct fc_lport *); +void fc_disc_init(struct fc_lport *); +void fc_disc_config(struct fc_lport *, void *);  static inline struct fc_lport *fc_disc_lport(struct fc_disc *disc)  { diff --git a/include/sound/max98090.h b/include/sound/max98090.h index 95efb13f847..95efb13f847 100755..100644 --- a/include/sound/max98090.h +++ b/include/sound/max98090.h diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index e1ef63d4a5c..44a30b10868 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -488,6 +488,7 @@ struct snd_soc_dapm_path {  	/* status */  	u32 connect:1;	/* source and sink widgets are connected */  	u32 walked:1;	/* path has been walked */ +	u32 walking:1;  /* path is in the process of being walked */  	u32 weak:1;	/* path ignored for power management */  	int (*connected)(struct snd_soc_dapm_widget *source, diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 9961726523d..9c1467357b0 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -257,6 +257,7 @@ TRACE_EVENT(block_bio_bounce,  /**   * block_bio_complete - completed all work on the block operation + * @q: queue holding the block operation   * @bio: block operation completed   * @error: io error value   * @@ -265,9 +266,9 @@ TRACE_EVENT(block_bio_bounce,   */  TRACE_EVENT(block_bio_complete, -	TP_PROTO(struct bio *bio, int error), +	TP_PROTO(struct request_queue *q, struct bio *bio, int error), -	TP_ARGS(bio, error), +	TP_ARGS(q, bio, error),  	TP_STRUCT__entry(  		__field( dev_t,		dev		) @@ -278,8 +279,7 @@ TRACE_EVENT(block_bio_complete,  	),  	TP_fast_assign( -		__entry->dev		= bio->bi_bdev ? -					  bio->bi_bdev->bd_dev : 0; +		__entry->dev		= bio->bi_bdev->bd_dev;  		__entry->sector		= bio->bi_sector;  		__entry->nr_sector	= bio->bi_size >> 9;  		__entry->error		= error; diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 5a8671e8a67..e5586caff67 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -147,7 +147,7 @@ TRACE_EVENT(sched_switch,  		  __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",  				{ 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },  				{ 16, "Z" }, { 32, "X" }, { 64, "x" }, -				{ 128, "W" }) : "R", +				{ 128, "K" }, { 256, "W" }, { 512, "P" }) : "R",  		__entry->prev_state & TASK_STATE_MAX ? "+" : "",  		__entry->next_comm, __entry->next_pid, __entry->next_prio)  ); diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index 4c43b444879..706d035fa74 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -95,15 +95,10 @@  #ifndef _LINUX_FUSE_H  #define _LINUX_FUSE_H -#ifdef __linux__ +#ifdef __KERNEL__  #include <linux/types.h>  #else  #include <stdint.h> -#define __u64 uint64_t -#define __s64 int64_t -#define __u32 uint32_t -#define __s32 int32_t -#define __u16 uint16_t  #endif  /* @@ -139,42 +134,42 @@     userspace works under 64bit kernels */  struct fuse_attr { -	__u64	ino; -	__u64	size; -	__u64	blocks; -	__u64	atime; -	__u64	mtime; -	__u64	ctime; -	__u32	atimensec; -	__u32	mtimensec; -	__u32	ctimensec; -	__u32	mode; -	__u32	nlink; -	__u32	uid; -	__u32	gid; -	__u32	rdev; -	__u32	blksize; -	__u32	padding; +	uint64_t	ino; +	uint64_t	size; +	uint64_t	blocks; +	uint64_t	atime; +	uint64_t	mtime; +	uint64_t	ctime; +	uint32_t	atimensec; +	uint32_t	mtimensec; +	uint32_t	ctimensec; +	uint32_t	mode; +	uint32_t	nlink; +	uint32_t	uid; +	uint32_t	gid; +	uint32_t	rdev; +	uint32_t	blksize; +	uint32_t	padding;  };  struct fuse_kstatfs { -	__u64	blocks; -	__u64	bfree; -	__u64	bavail; -	__u64	files; -	__u64	ffree; -	__u32	bsize; -	__u32	namelen; -	__u32	frsize; -	__u32	padding; -	__u32	spare[6]; +	uint64_t	blocks; +	uint64_t	bfree; +	uint64_t	bavail; +	uint64_t	files; +	uint64_t	ffree; +	uint32_t	bsize; +	uint32_t	namelen; +	uint32_t	frsize; +	uint32_t	padding; +	uint32_t	spare[6];  };  struct fuse_file_lock { -	__u64	start; -	__u64	end; -	__u32	type; -	__u32	pid; /* tgid */ +	uint64_t	start; +	uint64_t	end; +	uint32_t	type; +	uint32_t	pid; /* tgid */  };  /** @@ -364,143 +359,143 @@ enum fuse_notify_code {  #define FUSE_COMPAT_ENTRY_OUT_SIZE 120  struct fuse_entry_out { -	__u64	nodeid;		/* Inode ID */ -	__u64	generation;	/* Inode generation: nodeid:gen must -				   be unique for the fs's lifetime */ -	__u64	entry_valid;	/* Cache timeout for the name */ -	__u64	attr_valid;	/* Cache timeout for the attributes */ -	__u32	entry_valid_nsec; -	__u32	attr_valid_nsec; +	uint64_t	nodeid;		/* Inode ID */ +	uint64_t	generation;	/* Inode generation: nodeid:gen must +					   be unique for the fs's lifetime */ +	uint64_t	entry_valid;	/* Cache timeout for the name */ +	uint64_t	attr_valid;	/* Cache timeout for the attributes */ +	uint32_t	entry_valid_nsec; +	uint32_t	attr_valid_nsec;  	struct fuse_attr attr;  };  struct fuse_forget_in { -	__u64	nlookup; +	uint64_t	nlookup;  };  struct fuse_forget_one { -	__u64	nodeid; -	__u64	nlookup; +	uint64_t	nodeid; +	uint64_t	nlookup;  };  struct fuse_batch_forget_in { -	__u32	count; -	__u32	dummy; +	uint32_t	count; +	uint32_t	dummy;  };  struct fuse_getattr_in { -	__u32	getattr_flags; -	__u32	dummy; -	__u64	fh; +	uint32_t	getattr_flags; +	uint32_t	dummy; +	uint64_t	fh;  };  #define FUSE_COMPAT_ATTR_OUT_SIZE 96  struct fuse_attr_out { -	__u64	attr_valid;	/* Cache timeout for the attributes */ -	__u32	attr_valid_nsec; -	__u32	dummy; +	uint64_t	attr_valid;	/* Cache timeout for the attributes */ +	uint32_t	attr_valid_nsec; +	uint32_t	dummy;  	struct fuse_attr attr;  };  #define FUSE_COMPAT_MKNOD_IN_SIZE 8  struct fuse_mknod_in { -	__u32	mode; -	__u32	rdev; -	__u32	umask; -	__u32	padding; +	uint32_t	mode; +	uint32_t	rdev; +	uint32_t	umask; +	uint32_t	padding;  };  struct fuse_mkdir_in { -	__u32	mode; -	__u32	umask; +	uint32_t	mode; +	uint32_t	umask;  };  struct fuse_rename_in { -	__u64	newdir; +	uint64_t	newdir;  };  struct fuse_link_in { -	__u64	oldnodeid; +	uint64_t	oldnodeid;  };  struct fuse_setattr_in { -	__u32	valid; -	__u32	padding; -	__u64	fh; -	__u64	size; -	__u64	lock_owner; -	__u64	atime; -	__u64	mtime; -	__u64	unused2; -	__u32	atimensec; -	__u32	mtimensec; -	__u32	unused3; -	__u32	mode; -	__u32	unused4; -	__u32	uid; -	__u32	gid; -	__u32	unused5; +	uint32_t	valid; +	uint32_t	padding; +	uint64_t	fh; +	uint64_t	size; +	uint64_t	lock_owner; +	uint64_t	atime; +	uint64_t	mtime; +	uint64_t	unused2; +	uint32_t	atimensec; +	uint32_t	mtimensec; +	uint32_t	unused3; +	uint32_t	mode; +	uint32_t	unused4; +	uint32_t	uid; +	uint32_t	gid; +	uint32_t	unused5;  };  struct fuse_open_in { -	__u32	flags; -	__u32	unused; +	uint32_t	flags; +	uint32_t	unused;  };  struct fuse_create_in { -	__u32	flags; -	__u32	mode; -	__u32	umask; -	__u32	padding; +	uint32_t	flags; +	uint32_t	mode; +	uint32_t	umask; +	uint32_t	padding;  };  struct fuse_open_out { -	__u64	fh; -	__u32	open_flags; -	__u32	padding; +	uint64_t	fh; +	uint32_t	open_flags; +	uint32_t	padding;  };  struct fuse_release_in { -	__u64	fh; -	__u32	flags; -	__u32	release_flags; -	__u64	lock_owner; +	uint64_t	fh; +	uint32_t	flags; +	uint32_t	release_flags; +	uint64_t	lock_owner;  };  struct fuse_flush_in { -	__u64	fh; -	__u32	unused; -	__u32	padding; -	__u64	lock_owner; +	uint64_t	fh; +	uint32_t	unused; +	uint32_t	padding; +	uint64_t	lock_owner;  };  struct fuse_read_in { -	__u64	fh; -	__u64	offset; -	__u32	size; -	__u32	read_flags; -	__u64	lock_owner; -	__u32	flags; -	__u32	padding; +	uint64_t	fh; +	uint64_t	offset; +	uint32_t	size; +	uint32_t	read_flags; +	uint64_t	lock_owner; +	uint32_t	flags; +	uint32_t	padding;  };  #define FUSE_COMPAT_WRITE_IN_SIZE 24  struct fuse_write_in { -	__u64	fh; -	__u64	offset; -	__u32	size; -	__u32	write_flags; -	__u64	lock_owner; -	__u32	flags; -	__u32	padding; +	uint64_t	fh; +	uint64_t	offset; +	uint32_t	size; +	uint32_t	write_flags; +	uint64_t	lock_owner; +	uint32_t	flags; +	uint32_t	padding;  };  struct fuse_write_out { -	__u32	size; -	__u32	padding; +	uint32_t	size; +	uint32_t	padding;  };  #define FUSE_COMPAT_STATFS_SIZE 48 @@ -510,32 +505,32 @@ struct fuse_statfs_out {  };  struct fuse_fsync_in { -	__u64	fh; -	__u32	fsync_flags; -	__u32	padding; +	uint64_t	fh; +	uint32_t	fsync_flags; +	uint32_t	padding;  };  struct fuse_setxattr_in { -	__u32	size; -	__u32	flags; +	uint32_t	size; +	uint32_t	flags;  };  struct fuse_getxattr_in { -	__u32	size; -	__u32	padding; +	uint32_t	size; +	uint32_t	padding;  };  struct fuse_getxattr_out { -	__u32	size; -	__u32	padding; +	uint32_t	size; +	uint32_t	padding;  };  struct fuse_lk_in { -	__u64	fh; -	__u64	owner; +	uint64_t	fh; +	uint64_t	owner;  	struct fuse_file_lock lk; -	__u32	lk_flags; -	__u32	padding; +	uint32_t	lk_flags; +	uint32_t	padding;  };  struct fuse_lk_out { @@ -543,134 +538,135 @@ struct fuse_lk_out {  };  struct fuse_access_in { -	__u32	mask; -	__u32	padding; +	uint32_t	mask; +	uint32_t	padding;  };  struct fuse_init_in { -	__u32	major; -	__u32	minor; -	__u32	max_readahead; -	__u32	flags; +	uint32_t	major; +	uint32_t	minor; +	uint32_t	max_readahead; +	uint32_t	flags;  };  struct fuse_init_out { -	__u32	major; -	__u32	minor; -	__u32	max_readahead; -	__u32	flags; -	__u16   max_background; -	__u16   congestion_threshold; -	__u32	max_write; +	uint32_t	major; +	uint32_t	minor; +	uint32_t	max_readahead; +	uint32_t	flags; +	uint16_t	max_background; +	uint16_t	congestion_threshold; +	uint32_t	max_write;  };  #define CUSE_INIT_INFO_MAX 4096  struct cuse_init_in { -	__u32	major; -	__u32	minor; -	__u32	unused; -	__u32	flags; +	uint32_t	major; +	uint32_t	minor; +	uint32_t	unused; +	uint32_t	flags;  };  struct cuse_init_out { -	__u32	major; -	__u32	minor; -	__u32	unused; -	__u32	flags; -	__u32	max_read; -	__u32	max_write; -	__u32	dev_major;		/* chardev major */ -	__u32	dev_minor;		/* chardev minor */ -	__u32	spare[10]; +	uint32_t	major; +	uint32_t	minor; +	uint32_t	unused; +	uint32_t	flags; +	uint32_t	max_read; +	uint32_t	max_write; +	uint32_t	dev_major;		/* chardev major */ +	uint32_t	dev_minor;		/* chardev minor */ +	uint32_t	spare[10];  };  struct fuse_interrupt_in { -	__u64	unique; +	uint64_t	unique;  };  struct fuse_bmap_in { -	__u64	block; -	__u32	blocksize; -	__u32	padding; +	uint64_t	block; +	uint32_t	blocksize; +	uint32_t	padding;  };  struct fuse_bmap_out { -	__u64	block; +	uint64_t	block;  };  struct fuse_ioctl_in { -	__u64	fh; -	__u32	flags; -	__u32	cmd; -	__u64	arg; -	__u32	in_size; -	__u32	out_size; +	uint64_t	fh; +	uint32_t	flags; +	uint32_t	cmd; +	uint64_t	arg; +	uint32_t	in_size; +	uint32_t	out_size;  };  struct fuse_ioctl_iovec { -	__u64	base; -	__u64	len; +	uint64_t	base; +	uint64_t	len;  };  struct fuse_ioctl_out { -	__s32	result; -	__u32	flags; -	__u32	in_iovs; -	__u32	out_iovs; +	int32_t		result; +	uint32_t	flags; +	uint32_t	in_iovs; +	uint32_t	out_iovs;  };  struct fuse_poll_in { -	__u64	fh; -	__u64	kh; -	__u32	flags; -	__u32   events; +	uint64_t	fh; +	uint64_t	kh; +	uint32_t	flags; +	uint32_t	events;  };  struct fuse_poll_out { -	__u32	revents; -	__u32	padding; +	uint32_t	revents; +	uint32_t	padding;  };  struct fuse_notify_poll_wakeup_out { -	__u64	kh; +	uint64_t	kh;  };  struct fuse_fallocate_in { -	__u64	fh; -	__u64	offset; -	__u64	length; -	__u32	mode; -	__u32	padding; +	uint64_t	fh; +	uint64_t	offset; +	uint64_t	length; +	uint32_t	mode; +	uint32_t	padding;  };  struct fuse_in_header { -	__u32	len; -	__u32	opcode; -	__u64	unique; -	__u64	nodeid; -	__u32	uid; -	__u32	gid; -	__u32	pid; -	__u32	padding; +	uint32_t	len; +	uint32_t	opcode; +	uint64_t	unique; +	uint64_t	nodeid; +	uint32_t	uid; +	uint32_t	gid; +	uint32_t	pid; +	uint32_t	padding;  };  struct fuse_out_header { -	__u32	len; -	__s32	error; -	__u64	unique; +	uint32_t	len; +	int32_t		error; +	uint64_t	unique;  };  struct fuse_dirent { -	__u64	ino; -	__u64	off; -	__u32	namelen; -	__u32	type; +	uint64_t	ino; +	uint64_t	off; +	uint32_t	namelen; +	uint32_t	type;  	char name[];  };  #define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name) -#define FUSE_DIRENT_ALIGN(x) (((x) + sizeof(__u64) - 1) & ~(sizeof(__u64) - 1)) +#define FUSE_DIRENT_ALIGN(x) \ +	(((x) + sizeof(uint64_t) - 1) & ~(sizeof(uint64_t) - 1))  #define FUSE_DIRENT_SIZE(d) \  	FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen) @@ -685,47 +681,47 @@ struct fuse_direntplus {  	FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET_DIRENTPLUS + (d)->dirent.namelen)  struct fuse_notify_inval_inode_out { -	__u64	ino; -	__s64	off; -	__s64	len; +	uint64_t	ino; +	int64_t		off; +	int64_t		len;  };  struct fuse_notify_inval_entry_out { -	__u64	parent; -	__u32	namelen; -	__u32	padding; +	uint64_t	parent; +	uint32_t	namelen; +	uint32_t	padding;  };  struct fuse_notify_delete_out { -	__u64	parent; -	__u64	child; -	__u32	namelen; -	__u32	padding; +	uint64_t	parent; +	uint64_t	child; +	uint32_t	namelen; +	uint32_t	padding;  };  struct fuse_notify_store_out { -	__u64	nodeid; -	__u64	offset; -	__u32	size; -	__u32	padding; +	uint64_t	nodeid; +	uint64_t	offset; +	uint32_t	size; +	uint32_t	padding;  };  struct fuse_notify_retrieve_out { -	__u64	notify_unique; -	__u64	nodeid; -	__u64	offset; -	__u32	size; -	__u32	padding; +	uint64_t	notify_unique; +	uint64_t	nodeid; +	uint64_t	offset; +	uint32_t	size; +	uint32_t	padding;  };  /* Matches the size of fuse_write_in */  struct fuse_notify_retrieve_in { -	__u64	dummy1; -	__u64	offset; -	__u32	size; -	__u32	dummy2; -	__u64	dummy3; -	__u64	dummy4; +	uint64_t	dummy1; +	uint64_t	offset; +	uint32_t	size; +	uint32_t	dummy2; +	uint64_t	dummy3; +	uint64_t	dummy4;  };  #endif /* _LINUX_FUSE_H */ diff --git a/ipc/msg.c b/ipc/msg.c index 31cd1bf6af2..fede1d06ef3 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -872,6 +872,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp,  							goto out_unlock;  						break;  					} +					msg = ERR_PTR(-EAGAIN);  				} else  					break;  				msg_counter++; diff --git a/kernel/capability.c b/kernel/capability.c index 493d9725948..f6c2ce5701e 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -393,6 +393,30 @@ bool ns_capable(struct user_namespace *ns, int cap)  EXPORT_SYMBOL(ns_capable);  /** + * file_ns_capable - Determine if the file's opener had a capability in effect + * @file:  The file we want to check + * @ns:  The usernamespace we want the capability in + * @cap: The capability to be tested for + * + * Return true if task that opened the file had a capability in effect + * when the file was opened. + * + * This does not set PF_SUPERPRIV because the caller may not + * actually be privileged. + */ +bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap) +{ +	if (WARN_ON_ONCE(!cap_valid(cap))) +		return false; + +	if (security_capable(file->f_cred, ns, cap) == 0) +		return true; + +	return false; +} +EXPORT_SYMBOL(file_ns_capable); + +/**   * capable - Determine if the current task has a superior capability in effect   * @cap: The capability to be tested for   * diff --git a/kernel/events/core.c b/kernel/events/core.c index 59412d037ee..4d3124b3927 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4737,7 +4737,8 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)  	} else {  		if (arch_vma_name(mmap_event->vma)) {  			name = strncpy(tmp, arch_vma_name(mmap_event->vma), -				       sizeof(tmp)); +				       sizeof(tmp) - 1); +			tmp[sizeof(tmp) - 1] = '\0';  			goto got_name;  		} @@ -5330,7 +5331,7 @@ static void sw_perf_event_destroy(struct perf_event *event)  static int perf_swevent_init(struct perf_event *event)  { -	int event_id = event->attr.config; +	u64 event_id = event->attr.config;  	if (event->attr.type != PERF_TYPE_SOFTWARE)  		return -ENOENT; @@ -5986,6 +5987,7 @@ skip_type:  	if (pmu->pmu_cpu_context)  		goto got_cpu_context; +	ret = -ENOMEM;  	pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);  	if (!pmu->pmu_cpu_context)  		goto free_dev; diff --git a/kernel/events/internal.h b/kernel/events/internal.h index d56a64c99a8..eb675c4d59d 100644 --- a/kernel/events/internal.h +++ b/kernel/events/internal.h @@ -16,7 +16,7 @@ struct ring_buffer {  	int				page_order;	/* allocation order  */  #endif  	int				nr_pages;	/* nr of data pages  */ -	int				writable;	/* are we writable   */ +	int				overwrite;	/* can overwrite itself */  	atomic_t			poll;		/* POLL_ for wakeups */ diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 23cb34ff397..97fddb09762 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -18,12 +18,24 @@  static bool perf_output_space(struct ring_buffer *rb, unsigned long tail,  			      unsigned long offset, unsigned long head)  { -	unsigned long mask; +	unsigned long sz = perf_data_size(rb); +	unsigned long mask = sz - 1; -	if (!rb->writable) +	/* +	 * check if user-writable +	 * overwrite : over-write its own tail +	 * !overwrite: buffer possibly drops events. +	 */ +	if (rb->overwrite)  		return true; -	mask = perf_data_size(rb) - 1; +	/* +	 * verify that payload is not bigger than buffer +	 * otherwise masking logic may fail to detect +	 * the "not enough space" condition +	 */ +	if ((head - offset) > sz) +		return false;  	offset = (offset - tail) & mask;  	head   = (head   - tail) & mask; @@ -212,7 +224,9 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)  		rb->watermark = max_size / 2;  	if (flags & RING_BUFFER_WRITABLE) -		rb->writable = 1; +		rb->overwrite = 0; +	else +		rb->overwrite = 1;  	atomic_set(&rb->refcount, 1); diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index cc47812d3fe..14be27feda4 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -63,6 +63,7 @@  DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =  { +	.lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),  	.clock_base =  	{  		{ @@ -1642,8 +1643,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu)  	struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);  	int i; -	raw_spin_lock_init(&cpu_base->lock); -  	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {  		cpu_base->clock_base[i].cpu_base = cpu_base;  		timerqueue_init_head(&cpu_base->clock_base[i].active); diff --git a/kernel/kexec.c b/kernel/kexec.c index bddd3d7a74b..ffd4e111fd6 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -55,7 +55,7 @@ struct resource crashk_res = {  	.flags = IORESOURCE_BUSY | IORESOURCE_MEM  };  struct resource crashk_low_res = { -	.name  = "Crash kernel low", +	.name  = "Crash kernel",  	.start = 0,  	.end   = 0,  	.flags = IORESOURCE_BUSY | IORESOURCE_MEM @@ -1368,35 +1368,114 @@ static int __init parse_crashkernel_simple(char 		*cmdline,  	return 0;  } +#define SUFFIX_HIGH 0 +#define SUFFIX_LOW  1 +#define SUFFIX_NULL 2 +static __initdata char *suffix_tbl[] = { +	[SUFFIX_HIGH] = ",high", +	[SUFFIX_LOW]  = ",low", +	[SUFFIX_NULL] = NULL, +}; +  /* - * That function is the entry point for command line parsing and should be - * called from the arch-specific code. + * That function parses "suffix"  crashkernel command lines like + * + *	crashkernel=size,[high|low] + * + * It returns 0 on success and -EINVAL on failure.   */ +static int __init parse_crashkernel_suffix(char *cmdline, +					   unsigned long long	*crash_size, +					   unsigned long long	*crash_base, +					   const char *suffix) +{ +	char *cur = cmdline; + +	*crash_size = memparse(cmdline, &cur); +	if (cmdline == cur) { +		pr_warn("crashkernel: memory value expected\n"); +		return -EINVAL; +	} + +	/* check with suffix */ +	if (strncmp(cur, suffix, strlen(suffix))) { +		pr_warn("crashkernel: unrecognized char\n"); +		return -EINVAL; +	} +	cur += strlen(suffix); +	if (*cur != ' ' && *cur != '\0') { +		pr_warn("crashkernel: unrecognized char\n"); +		return -EINVAL; +	} + +	return 0; +} + +static __init char *get_last_crashkernel(char *cmdline, +			     const char *name, +			     const char *suffix) +{ +	char *p = cmdline, *ck_cmdline = NULL; + +	/* find crashkernel and use the last one if there are more */ +	p = strstr(p, name); +	while (p) { +		char *end_p = strchr(p, ' '); +		char *q; + +		if (!end_p) +			end_p = p + strlen(p); + +		if (!suffix) { +			int i; + +			/* skip the one with any known suffix */ +			for (i = 0; suffix_tbl[i]; i++) { +				q = end_p - strlen(suffix_tbl[i]); +				if (!strncmp(q, suffix_tbl[i], +					     strlen(suffix_tbl[i]))) +					goto next; +			} +			ck_cmdline = p; +		} else { +			q = end_p - strlen(suffix); +			if (!strncmp(q, suffix, strlen(suffix))) +				ck_cmdline = p; +		} +next: +		p = strstr(p+1, name); +	} + +	if (!ck_cmdline) +		return NULL; + +	return ck_cmdline; +} +  static int __init __parse_crashkernel(char *cmdline,  			     unsigned long long system_ram,  			     unsigned long long *crash_size,  			     unsigned long long *crash_base, -				const char *name) +			     const char *name, +			     const char *suffix)  { -	char 	*p = cmdline, *ck_cmdline = NULL;  	char	*first_colon, *first_space; +	char	*ck_cmdline;  	BUG_ON(!crash_size || !crash_base);  	*crash_size = 0;  	*crash_base = 0; -	/* find crashkernel and use the last one if there are more */ -	p = strstr(p, name); -	while (p) { -		ck_cmdline = p; -		p = strstr(p+1, name); -	} +	ck_cmdline = get_last_crashkernel(cmdline, name, suffix);  	if (!ck_cmdline)  		return -EINVAL;  	ck_cmdline += strlen(name); +	if (suffix) +		return parse_crashkernel_suffix(ck_cmdline, crash_size, +				crash_base, suffix);  	/*  	 * if the commandline contains a ':', then that's the extended  	 * syntax -- if not, it must be the classic syntax @@ -1413,13 +1492,26 @@ static int __init __parse_crashkernel(char *cmdline,  	return 0;  } +/* + * That function is the entry point for command line parsing and should be + * called from the arch-specific code. + */  int __init parse_crashkernel(char *cmdline,  			     unsigned long long system_ram,  			     unsigned long long *crash_size,  			     unsigned long long *crash_base)  {  	return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, -					"crashkernel="); +					"crashkernel=", NULL); +} + +int __init parse_crashkernel_high(char *cmdline, +			     unsigned long long system_ram, +			     unsigned long long *crash_size, +			     unsigned long long *crash_base) +{ +	return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, +				"crashkernel=", suffix_tbl[SUFFIX_HIGH]);  }  int __init parse_crashkernel_low(char *cmdline, @@ -1428,7 +1520,7 @@ int __init parse_crashkernel_low(char *cmdline,  			     unsigned long long *crash_base)  {  	return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, -					"crashkernel_low="); +				"crashkernel=", suffix_tbl[SUFFIX_LOW]);  }  static void update_vmcoreinfo_note(void) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index e35be53f661..3fed7f0cbcd 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -794,16 +794,16 @@ out:  }  #ifdef CONFIG_SYSCTL -/* This should be called with kprobe_mutex locked */  static void __kprobes optimize_all_kprobes(void)  {  	struct hlist_head *head;  	struct kprobe *p;  	unsigned int i; +	mutex_lock(&kprobe_mutex);  	/* If optimization is already allowed, just return */  	if (kprobes_allow_optimization) -		return; +		goto out;  	kprobes_allow_optimization = true;  	for (i = 0; i < KPROBE_TABLE_SIZE; i++) { @@ -813,18 +813,22 @@ static void __kprobes optimize_all_kprobes(void)  				optimize_kprobe(p);  	}  	printk(KERN_INFO "Kprobes globally optimized\n"); +out: +	mutex_unlock(&kprobe_mutex);  } -/* This should be called with kprobe_mutex locked */  static void __kprobes unoptimize_all_kprobes(void)  {  	struct hlist_head *head;  	struct kprobe *p;  	unsigned int i; +	mutex_lock(&kprobe_mutex);  	/* If optimization is already prohibited, just return */ -	if (!kprobes_allow_optimization) +	if (!kprobes_allow_optimization) { +		mutex_unlock(&kprobe_mutex);  		return; +	}  	kprobes_allow_optimization = false;  	for (i = 0; i < KPROBE_TABLE_SIZE; i++) { @@ -834,11 +838,14 @@ static void __kprobes unoptimize_all_kprobes(void)  				unoptimize_kprobe(p, false);  		}  	} +	mutex_unlock(&kprobe_mutex); +  	/* Wait for unoptimizing completion */  	wait_for_kprobe_optimizer();  	printk(KERN_INFO "Kprobes globally unoptimized\n");  } +static DEFINE_MUTEX(kprobe_sysctl_mutex);  int sysctl_kprobes_optimization;  int proc_kprobes_optimization_handler(struct ctl_table *table, int write,  				      void __user *buffer, size_t *length, @@ -846,7 +853,7 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write,  {  	int ret; -	mutex_lock(&kprobe_mutex); +	mutex_lock(&kprobe_sysctl_mutex);  	sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;  	ret = proc_dointvec_minmax(table, write, buffer, length, ppos); @@ -854,7 +861,7 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write,  		optimize_all_kprobes();  	else  		unoptimize_all_kprobes(); -	mutex_unlock(&kprobe_mutex); +	mutex_unlock(&kprobe_sysctl_mutex);  	return ret;  } diff --git a/kernel/kthread.c b/kernel/kthread.c index 691dc2ef9ba..9eb7fed0bba 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -124,12 +124,12 @@ void *kthread_data(struct task_struct *task)  static void __kthread_parkme(struct kthread *self)  { -	__set_current_state(TASK_INTERRUPTIBLE); +	__set_current_state(TASK_PARKED);  	while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {  		if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))  			complete(&self->parked);  		schedule(); -		__set_current_state(TASK_INTERRUPTIBLE); +		__set_current_state(TASK_PARKED);  	}  	clear_bit(KTHREAD_IS_PARKED, &self->flags);  	__set_current_state(TASK_RUNNING); @@ -256,8 +256,13 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),  }  EXPORT_SYMBOL(kthread_create_on_node); -static void __kthread_bind(struct task_struct *p, unsigned int cpu) +static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)  { +	/* Must have done schedule() in kthread() before we set_task_cpu */ +	if (!wait_task_inactive(p, state)) { +		WARN_ON(1); +		return; +	}  	/* It's safe because the task is inactive. */  	do_set_cpus_allowed(p, cpumask_of(cpu));  	p->flags |= PF_THREAD_BOUND; @@ -274,12 +279,7 @@ static void __kthread_bind(struct task_struct *p, unsigned int cpu)   */  void kthread_bind(struct task_struct *p, unsigned int cpu)  { -	/* Must have done schedule() in kthread() before we set_task_cpu */ -	if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { -		WARN_ON(1); -		return; -	} -	__kthread_bind(p, cpu); +	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);  }  EXPORT_SYMBOL(kthread_bind); @@ -324,6 +324,22 @@ static struct kthread *task_get_live_kthread(struct task_struct *k)  	return NULL;  } +static void __kthread_unpark(struct task_struct *k, struct kthread *kthread) +{ +	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); +	/* +	 * We clear the IS_PARKED bit here as we don't wait +	 * until the task has left the park code. So if we'd +	 * park before that happens we'd see the IS_PARKED bit +	 * which might be about to be cleared. +	 */ +	if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { +		if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) +			__kthread_bind(k, kthread->cpu, TASK_PARKED); +		wake_up_state(k, TASK_PARKED); +	} +} +  /**   * kthread_unpark - unpark a thread created by kthread_create().   * @k:		thread created by kthread_create(). @@ -336,20 +352,8 @@ void kthread_unpark(struct task_struct *k)  {  	struct kthread *kthread = task_get_live_kthread(k); -	if (kthread) { -		clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); -		/* -		 * We clear the IS_PARKED bit here as we don't wait -		 * until the task has left the park code. So if we'd -		 * park before that happens we'd see the IS_PARKED bit -		 * which might be about to be cleared. -		 */ -		if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { -			if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) -				__kthread_bind(k, kthread->cpu); -			wake_up_process(k); -		} -	} +	if (kthread) +		__kthread_unpark(k, kthread);  	put_task_struct(k);  } @@ -407,7 +411,7 @@ int kthread_stop(struct task_struct *k)  	trace_sched_kthread_stop(k);  	if (kthread) {  		set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); -		clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); +		__kthread_unpark(k, kthread);  		wake_up_process(k);  		wait_for_completion(&kthread->exited);  	} diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index c685e31492d..c3ae1446461 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -176,10 +176,36 @@ static u64 sched_clock_remote(struct sched_clock_data *scd)  	u64 this_clock, remote_clock;  	u64 *ptr, old_val, val; +#if BITS_PER_LONG != 64 +again: +	/* +	 * Careful here: The local and the remote clock values need to +	 * be read out atomic as we need to compare the values and +	 * then update either the local or the remote side. So the +	 * cmpxchg64 below only protects one readout. +	 * +	 * We must reread via sched_clock_local() in the retry case on +	 * 32bit as an NMI could use sched_clock_local() via the +	 * tracer and hit between the readout of +	 * the low32bit and the high 32bit portion. +	 */ +	this_clock = sched_clock_local(my_scd); +	/* +	 * We must enforce atomic readout on 32bit, otherwise the +	 * update on the remote cpu can hit inbetween the readout of +	 * the low32bit and the high 32bit portion. +	 */ +	remote_clock = cmpxchg64(&scd->clock, 0, 0); +#else +	/* +	 * On 64bit the read of [my]scd->clock is atomic versus the +	 * update, so we can avoid the above 32bit dance. +	 */  	sched_clock_local(my_scd);  again:  	this_clock = my_scd->clock;  	remote_clock = scd->clock; +#endif  	/*  	 * Use the opportunity that we have both locks diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7f12624a393..67d04651f44 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1498,8 +1498,10 @@ static void try_to_wake_up_local(struct task_struct *p)  {  	struct rq *rq = task_rq(p); -	BUG_ON(rq != this_rq()); -	BUG_ON(p == current); +	if (WARN_ON_ONCE(rq != this_rq()) || +	    WARN_ON_ONCE(p == current)) +		return; +  	lockdep_assert_held(&rq->lock);  	if (!raw_spin_trylock(&p->pi_lock)) { @@ -4999,7 +5001,7 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)  }  static int min_load_idx = 0; -static int max_load_idx = CPU_LOAD_IDX_MAX; +static int max_load_idx = CPU_LOAD_IDX_MAX-1;  static void  set_table_entry(struct ctl_table *entry, diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index ed12cbb135f..e93cca92f38 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -310,7 +310,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)  	t = tsk;  	do { -		task_cputime(tsk, &utime, &stime); +		task_cputime(t, &utime, &stime);  		times->utime += utime;  		times->stime += stime;  		times->sum_exec_runtime += task_sched_runtime(t); diff --git a/kernel/signal.c b/kernel/signal.c index dd72567767d..598dc06be42 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2948,7 +2948,7 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)  static int do_tkill(pid_t tgid, pid_t pid, int sig)  { -	struct siginfo info; +	struct siginfo info = {};  	info.si_signo = sig;  	info.si_errno = 0; diff --git a/kernel/smpboot.c b/kernel/smpboot.c index 8eaed9aa9cf..02fc5c93367 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -185,8 +185,18 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)  	}  	get_task_struct(tsk);  	*per_cpu_ptr(ht->store, cpu) = tsk; -	if (ht->create) -		ht->create(cpu); +	if (ht->create) { +		/* +		 * Make sure that the task has actually scheduled out +		 * into park position, before calling the create +		 * callback. At least the migration thread callback +		 * requires that the task is off the runqueue. +		 */ +		if (!wait_task_inactive(tsk, TASK_PARKED)) +			WARN_ON(1); +		else +			ht->create(cpu); +	}  	return 0;  } diff --git a/kernel/sys.c b/kernel/sys.c index 39c9c4a2949..0da73cf73e6 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -324,7 +324,6 @@ void kernel_restart_prepare(char *cmd)  	system_state = SYSTEM_RESTART;  	usermodehelper_disable();  	device_shutdown(); -	syscore_shutdown();  }  /** @@ -370,6 +369,7 @@ void kernel_restart(char *cmd)  {  	kernel_restart_prepare(cmd);  	disable_nonboot_cpus(); +	syscore_shutdown();  	if (!cmd)  		printk(KERN_EMERG "Restarting system.\n");  	else @@ -395,6 +395,7 @@ static void kernel_shutdown_prepare(enum system_states state)  void kernel_halt(void)  {  	kernel_shutdown_prepare(SYSTEM_HALT); +	disable_nonboot_cpus();  	syscore_shutdown();  	printk(KERN_EMERG "System halted.\n");  	kmsg_dump(KMSG_DUMP_HALT); diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 9e5b8c272ee..5a0f781cd72 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -739,12 +739,6 @@ static void blk_add_trace_rq_complete(void *ignore,  				      struct request_queue *q,  				      struct request *rq)  { -	struct blk_trace *bt = q->blk_trace; - -	/* if control ever passes through here, it's a request based driver */ -	if (unlikely(bt && !bt->rq_based)) -		bt->rq_based = true; -  	blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);  } @@ -780,24 +774,10 @@ static void blk_add_trace_bio_bounce(void *ignore,  	blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);  } -static void blk_add_trace_bio_complete(void *ignore, struct bio *bio, int error) +static void blk_add_trace_bio_complete(void *ignore, +				       struct request_queue *q, struct bio *bio, +				       int error)  { -	struct request_queue *q; -	struct blk_trace *bt; - -	if (!bio->bi_bdev) -		return; - -	q = bdev_get_queue(bio->bi_bdev); -	bt = q->blk_trace; - -	/* -	 * Request based drivers will generate both rq and bio completions. -	 * Ignore bio ones. -	 */ -	if (likely(!bt) || bt->rq_based) -		return; -  	blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);  } diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 6893d5a2bf0..b3fde6d7b7f 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -66,7 +66,7 @@  static struct ftrace_ops ftrace_list_end __read_mostly = {  	.func		= ftrace_stub, -	.flags		= FTRACE_OPS_FL_RECURSION_SAFE, +	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,  };  /* ftrace_enabled is a method to turn ftrace on or off */ @@ -694,7 +694,6 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)  		free_page(tmp);  	} -	free_page((unsigned long)stat->pages);  	stat->pages = NULL;  	stat->start = NULL; @@ -1053,6 +1052,19 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)  static struct pid * const ftrace_swapper_pid = &init_struct_pid; +loff_t +ftrace_filter_lseek(struct file *file, loff_t offset, int whence) +{ +	loff_t ret; + +	if (file->f_mode & FMODE_READ) +		ret = seq_lseek(file, offset, whence); +	else +		file->f_pos = ret = 1; + +	return ret; +} +  #ifdef CONFIG_DYNAMIC_FTRACE  #ifndef CONFIG_FTRACE_MCOUNT_RECORD @@ -2613,7 +2625,7 @@ static void ftrace_filter_reset(struct ftrace_hash *hash)   * routine, you can use ftrace_filter_write() for the write   * routine if @flag has FTRACE_ITER_FILTER set, or   * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. - * ftrace_regex_lseek() should be used as the lseek routine, and + * ftrace_filter_lseek() should be used as the lseek routine, and   * release must call ftrace_regex_release().   */  int @@ -2697,19 +2709,6 @@ ftrace_notrace_open(struct inode *inode, struct file *file)  				 inode, file);  } -loff_t -ftrace_regex_lseek(struct file *file, loff_t offset, int whence) -{ -	loff_t ret; - -	if (file->f_mode & FMODE_READ) -		ret = seq_lseek(file, offset, whence); -	else -		file->f_pos = ret = 1; - -	return ret; -} -  static int ftrace_match(char *str, char *regex, int len, int type)  {  	int matched = 0; @@ -3441,14 +3440,14 @@ static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;  static int __init set_ftrace_notrace(char *str)  { -	strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); +	strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);  	return 1;  }  __setup("ftrace_notrace=", set_ftrace_notrace);  static int __init set_ftrace_filter(char *str)  { -	strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); +	strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);  	return 1;  }  __setup("ftrace_filter=", set_ftrace_filter); @@ -3571,7 +3570,7 @@ static const struct file_operations ftrace_filter_fops = {  	.open = ftrace_filter_open,  	.read = seq_read,  	.write = ftrace_filter_write, -	.llseek = ftrace_regex_lseek, +	.llseek = ftrace_filter_lseek,  	.release = ftrace_regex_release,  }; @@ -3579,7 +3578,7 @@ static const struct file_operations ftrace_notrace_fops = {  	.open = ftrace_notrace_open,  	.read = seq_read,  	.write = ftrace_notrace_write, -	.llseek = ftrace_regex_lseek, +	.llseek = ftrace_filter_lseek,  	.release = ftrace_regex_release,  }; @@ -3784,8 +3783,8 @@ static const struct file_operations ftrace_graph_fops = {  	.open		= ftrace_graph_open,  	.read		= seq_read,  	.write		= ftrace_graph_write, +	.llseek		= ftrace_filter_lseek,  	.release	= ftrace_graph_release, -	.llseek		= seq_lseek,  };  #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ @@ -4131,7 +4130,8 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,  	preempt_disable_notrace();  	trace_recursion_set(TRACE_CONTROL_BIT);  	do_for_each_ftrace_op(op, ftrace_control_list) { -		if (!ftrace_function_local_disabled(op) && +		if (!(op->flags & FTRACE_OPS_FL_STUB) && +		    !ftrace_function_local_disabled(op) &&  		    ftrace_ops_test(op, ip))  			op->func(ip, parent_ip, op, regs);  	} while_for_each_ftrace_op(op); @@ -4439,7 +4439,7 @@ static const struct file_operations ftrace_pid_fops = {  	.open		= ftrace_pid_open,  	.write		= ftrace_pid_write,  	.read		= seq_read, -	.llseek		= seq_lseek, +	.llseek		= ftrace_filter_lseek,  	.release	= ftrace_pid_release,  }; @@ -4555,12 +4555,8 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,  		ftrace_startup_sysctl();  		/* we are starting ftrace again */ -		if (ftrace_ops_list != &ftrace_list_end) { -			if (ftrace_ops_list->next == &ftrace_list_end) -				ftrace_trace_function = ftrace_ops_list->func; -			else -				ftrace_trace_function = ftrace_ops_list_func; -		} +		if (ftrace_ops_list != &ftrace_list_end) +			update_ftrace_function();  	} else {  		/* stopping ftrace calls (just send to ftrace_stub) */ diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 4f1dade5698..66338c4f7f4 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -132,7 +132,7 @@ static char *default_bootup_tracer;  static int __init set_cmdline_ftrace(char *str)  { -	strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); +	strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);  	default_bootup_tracer = bootup_tracer_buf;  	/* We are using ftrace early, expand it */  	ring_buffer_expanded = 1; @@ -162,7 +162,7 @@ static char *trace_boot_options __initdata;  static int __init set_trace_boot_options(char *str)  { -	strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); +	strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);  	trace_boot_options = trace_boot_options_buf;  	return 0;  } @@ -744,8 +744,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)  		return;  	WARN_ON_ONCE(!irqs_disabled()); -	if (WARN_ON_ONCE(!current_trace->allocated_snapshot)) +	if (!current_trace->allocated_snapshot) { +		/* Only the nop tracer should hit this when disabling */ +		WARN_ON_ONCE(current_trace != &nop_trace);  		return; +	}  	arch_spin_lock(&ftrace_max_lock); diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 42ca822fc70..83a8b5b7bd3 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -322,7 +322,7 @@ static const struct file_operations stack_trace_filter_fops = {  	.open = stack_trace_filter_open,  	.read = seq_read,  	.write = ftrace_filter_write, -	.llseek = ftrace_regex_lseek, +	.llseek = ftrace_filter_lseek,  	.release = ftrace_regex_release,  }; diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index a54f26f82eb..e134d8f365d 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -25,7 +25,8 @@  static struct kmem_cache *user_ns_cachep __read_mostly; -static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid, +static bool new_idmap_permitted(const struct file *file, +				struct user_namespace *ns, int cap_setid,  				struct uid_gid_map *map);  static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns) @@ -612,10 +613,10 @@ static ssize_t map_write(struct file *file, const char __user *buf,  	if (map->nr_extents != 0)  		goto out; -	/* Require the appropriate privilege CAP_SETUID or CAP_SETGID -	 * over the user namespace in order to set the id mapping. +	/* +	 * Adjusting namespace settings requires capabilities on the target.  	 */ -	if (cap_valid(cap_setid) && !ns_capable(ns, cap_setid)) +	if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))  		goto out;  	/* Get a buffer */ @@ -700,7 +701,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,  	ret = -EPERM;  	/* Validate the user is allowed to use user id's mapped to. */ -	if (!new_idmap_permitted(ns, cap_setid, &new_map)) +	if (!new_idmap_permitted(file, ns, cap_setid, &new_map))  		goto out;  	/* Map the lower ids from the parent user namespace to the @@ -787,7 +788,8 @@ ssize_t proc_projid_map_write(struct file *file, const char __user *buf, size_t  			 &ns->projid_map, &ns->parent->projid_map);  } -static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid, +static bool new_idmap_permitted(const struct file *file,  +				struct user_namespace *ns, int cap_setid,  				struct uid_gid_map *new_map)  {  	/* Allow mapping to your own filesystem ids */ @@ -795,12 +797,12 @@ static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid,  		u32 id = new_map->extent[0].lower_first;  		if (cap_setid == CAP_SETUID) {  			kuid_t uid = make_kuid(ns->parent, id); -			if (uid_eq(uid, current_fsuid())) +			if (uid_eq(uid, file->f_cred->fsuid))  				return true;  		}  		else if (cap_setid == CAP_SETGID) {  			kgid_t gid = make_kgid(ns->parent, id); -			if (gid_eq(gid, current_fsgid())) +			if (gid_eq(gid, file->f_cred->fsgid))  				return true;  		}  	} @@ -811,8 +813,10 @@ static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid,  	/* Allow the specified ids if we have the appropriate capability  	 * (CAP_SETUID or CAP_SETGID) over the parent user namespace. +	 * And the opener of the id file also had the approprpiate capability.  	 */ -	if (ns_capable(ns->parent, cap_setid)) +	if (ns_capable(ns->parent, cap_setid) && +	    file_ns_capable(file, ns->parent, cap_setid))  		return true;  	return false; diff --git a/lib/Kconfig b/lib/Kconfig index 3958dc4389f..fe01d418b09 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -404,4 +404,7 @@ config OID_REGISTRY  	help  	  Enable fast lookup object identifier registry. +config UCS2_STRING +        tristate +  endmenu diff --git a/lib/Makefile b/lib/Makefile index d7946ff75b2..6e2cc561f76 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -174,3 +174,5 @@ quiet_cmd_build_OID_registry = GEN     $@        cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@  clean-files	+= oid_registry_data.c + +obj-$(CONFIG_UCS2_STRING) += ucs2_string.o diff --git a/lib/kobject.c b/lib/kobject.c index e07ee1fcd6f..a65486613d7 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -529,6 +529,13 @@ struct kobject *kobject_get(struct kobject *kobj)  	return kobj;  } +static struct kobject *kobject_get_unless_zero(struct kobject *kobj) +{ +	if (!kref_get_unless_zero(&kobj->kref)) +		kobj = NULL; +	return kobj; +} +  /*   * kobject_cleanup - free kobject resources.   * @kobj: object to cleanup @@ -751,7 +758,7 @@ struct kobject *kset_find_obj(struct kset *kset, const char *name)  	list_for_each_entry(k, &kset->list, entry) {  		if (kobject_name(k) && !strcmp(kobject_name(k), name)) { -			ret = kobject_get(k); +			ret = kobject_get_unless_zero(k);  			break;  		}  	} diff --git a/lib/swiotlb.c b/lib/swiotlb.c index bfe02b8fc55..d23762e6652 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -105,9 +105,9 @@ setup_io_tlb_npages(char *str)  	if (!strcmp(str, "force"))  		swiotlb_force = 1; -	return 1; +	return 0;  } -__setup("swiotlb=", setup_io_tlb_npages); +early_param("swiotlb", setup_io_tlb_npages);  /* make io_tlb_overflow tunable too? */  unsigned long swiotlb_nr_tbl(void) @@ -115,6 +115,18 @@ unsigned long swiotlb_nr_tbl(void)  	return io_tlb_nslabs;  }  EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); + +/* default to 64MB */ +#define IO_TLB_DEFAULT_SIZE (64UL<<20) +unsigned long swiotlb_size_or_default(void) +{ +	unsigned long size; + +	size = io_tlb_nslabs << IO_TLB_SHIFT; + +	return size ? size : (IO_TLB_DEFAULT_SIZE); +} +  /* Note that this doesn't work with highmem page */  static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,  				      volatile void *address) @@ -188,8 +200,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)  void  __init  swiotlb_init(int verbose)  { -	/* default to 64MB */ -	size_t default_size = 64UL<<20; +	size_t default_size = IO_TLB_DEFAULT_SIZE;  	unsigned char *vstart;  	unsigned long bytes; diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c new file mode 100644 index 00000000000..6f500ef2301 --- /dev/null +++ b/lib/ucs2_string.c @@ -0,0 +1,51 @@ +#include <linux/ucs2_string.h> +#include <linux/module.h> + +/* Return the number of unicode characters in data */ +unsigned long +ucs2_strnlen(const ucs2_char_t *s, size_t maxlength) +{ +        unsigned long length = 0; + +        while (*s++ != 0 && length < maxlength) +                length++; +        return length; +} +EXPORT_SYMBOL(ucs2_strnlen); + +unsigned long +ucs2_strlen(const ucs2_char_t *s) +{ +        return ucs2_strnlen(s, ~0UL); +} +EXPORT_SYMBOL(ucs2_strlen); + +/* + * Return the number of bytes is the length of this string + * Note: this is NOT the same as the number of unicode characters + */ +unsigned long +ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength) +{ +        return ucs2_strnlen(data, maxlength/sizeof(ucs2_char_t)) * sizeof(ucs2_char_t); +} +EXPORT_SYMBOL(ucs2_strsize); + +int +ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len) +{ +        while (1) { +                if (len == 0) +                        return 0; +                if (*a < *b) +                        return -1; +                if (*a > *b) +                        return 1; +                if (*a == 0) /* implies *b == 0 */ +                        return 0; +                a++; +                b++; +                len--; +        } +} +EXPORT_SYMBOL(ucs2_strncmp); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ca9a7c6d7e9..1a12f5b9a0a 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2961,7 +2961,17 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,  			break;  		} -		if (absent || +		/* +		 * We need call hugetlb_fault for both hugepages under migration +		 * (in which case hugetlb_fault waits for the migration,) and +		 * hwpoisoned hugepages (in which case we need to prevent the +		 * caller from accessing to them.) In order to do this, we use +		 * here is_swap_pte instead of is_hugetlb_entry_migration and +		 * is_hugetlb_entry_hwpoisoned. This is because it simply covers +		 * both cases, and because we can't follow correct pages +		 * directly from any kind of swap entries. +		 */ +		if (absent || is_swap_pte(huge_ptep_get(pte)) ||  		    ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {  			int ret; diff --git a/mm/memory.c b/mm/memory.c index 494526ae024..ba94dec5b25 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -216,6 +216,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)  	tlb->mm = mm;  	tlb->fullmm     = fullmm; +	tlb->need_flush_all = 0;  	tlb->start	= -1UL;  	tlb->end	= 0;  	tlb->need_flush = 0; @@ -2392,6 +2393,53 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,  }  EXPORT_SYMBOL(remap_pfn_range); +/** + * vm_iomap_memory - remap memory to userspace + * @vma: user vma to map to + * @start: start of area + * @len: size of area + * + * This is a simplified io_remap_pfn_range() for common driver use. The + * driver just needs to give us the physical memory range to be mapped, + * we'll figure out the rest from the vma information. + * + * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get + * whatever write-combining details or similar. + */ +int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) +{ +	unsigned long vm_len, pfn, pages; + +	/* Check that the physical memory area passed in looks valid */ +	if (start + len < start) +		return -EINVAL; +	/* +	 * You *really* shouldn't map things that aren't page-aligned, +	 * but we've historically allowed it because IO memory might +	 * just have smaller alignment. +	 */ +	len += start & ~PAGE_MASK; +	pfn = start >> PAGE_SHIFT; +	pages = (len + ~PAGE_MASK) >> PAGE_SHIFT; +	if (pfn + pages < pfn) +		return -EINVAL; + +	/* We start the mapping 'vm_pgoff' pages into the area */ +	if (vma->vm_pgoff > pages) +		return -EINVAL; +	pfn += vma->vm_pgoff; +	pages -= vma->vm_pgoff; + +	/* Can we fit all of the mapping? */ +	vm_len = vma->vm_end - vma->vm_start; +	if (vm_len >> PAGE_SHIFT > pages) +		return -EINVAL; + +	/* Ok, let it rip */ +	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); +} +EXPORT_SYMBOL(vm_iomap_memory); +  static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,  				     unsigned long addr, unsigned long end,  				     pte_fn_t fn, void *data) diff --git a/mm/mmap.c b/mm/mmap.c index 6466699b16c..033094ba62d 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1940,7 +1940,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)  	/* Check the cache first. */  	/* (Cache hit rate is typically around 35%.) */ -	vma = mm->mmap_cache; +	vma = ACCESS_ONCE(mm->mmap_cache);  	if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {  		struct rb_node *rb_node; @@ -2305,7 +2305,7 @@ static void unmap_region(struct mm_struct *mm,  	update_hiwater_rss(mm);  	unmap_vmas(&tlb, vma, start, end);  	free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, -				 next ? next->vm_start : 0); +				 next ? next->vm_start : USER_PGTABLES_CEILING);  	tlb_finish_mmu(&tlb, start, end);  } @@ -2685,7 +2685,7 @@ void exit_mmap(struct mm_struct *mm)  	/* Use -1 here to ensure all VMAs in the mm are unmapped */  	unmap_vmas(&tlb, vma, 0, -1); -	free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); +	free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);  	tlb_finish_mmu(&tlb, 0, -1);  	/* diff --git a/mm/nommu.c b/mm/nommu.c index e1932808753..2f3ea749c31 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -821,7 +821,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)  	struct vm_area_struct *vma;  	/* check the cache first */ -	vma = mm->mmap_cache; +	vma = ACCESS_ONCE(mm->mmap_cache);  	if (vma && vma->vm_start <= addr && vma->vm_end > addr)  		return vma; diff --git a/mm/vmscan.c b/mm/vmscan.c index 88c5fed8b9a..669fba39be1 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3188,9 +3188,9 @@ int kswapd_run(int nid)  	if (IS_ERR(pgdat->kswapd)) {  		/* failure at boot is fatal */  		BUG_ON(system_state == SYSTEM_BOOTING); -		pgdat->kswapd = NULL;  		pr_err("Failed to start kswapd on node %d\n", nid);  		ret = PTR_ERR(pgdat->kswapd); +		pgdat->kswapd = NULL;  	}  	return ret;  } diff --git a/net/802/mrp.c b/net/802/mrp.c index a4cc3229952..e085bcc754f 100644 --- a/net/802/mrp.c +++ b/net/802/mrp.c @@ -870,8 +870,12 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)  	 * all pending messages before the applicant is gone.  	 */  	del_timer_sync(&app->join_timer); + +	spin_lock(&app->lock);  	mrp_mad_event(app, MRP_EVENT_TX);  	mrp_pdu_queue(app); +	spin_unlock(&app->lock); +  	mrp_queue_xmit(app);  	dev_mc_del(dev, appl->group_address); diff --git a/net/atm/common.c b/net/atm/common.c index 7b491006eaf..737bef59ce8 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -531,6 +531,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,  	struct sk_buff *skb;  	int copied, error = -EINVAL; +	msg->msg_namelen = 0; +  	if (sock->state != SS_CONNECTED)  		return -ENOTCONN; diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 7b11f8bc507..e277e38f736 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1642,6 +1642,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,  		ax25_address src;  		const unsigned char *mac = skb_mac_header(skb); +		memset(sax, 0, sizeof(struct full_sockaddr_ax25));  		ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,  				&digi, NULL, NULL);  		sax->sax25_family = AF_AX25; diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 0488d70c8c3..fa563e497c4 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -169,7 +169,7 @@ void batadv_mesh_free(struct net_device *soft_iface)  	atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);  } -int batadv_is_my_mac(const uint8_t *addr) +int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)  {  	const struct batadv_hard_iface *hard_iface; @@ -178,6 +178,9 @@ int batadv_is_my_mac(const uint8_t *addr)  		if (hard_iface->if_status != BATADV_IF_ACTIVE)  			continue; +		if (hard_iface->soft_iface != bat_priv->soft_iface) +			continue; +  		if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {  			rcu_read_unlock();  			return 1; diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index ced08b936a9..d40910dfc8e 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -162,7 +162,7 @@ extern struct workqueue_struct *batadv_event_workqueue;  int batadv_mesh_init(struct net_device *soft_iface);  void batadv_mesh_free(struct net_device *soft_iface); -int batadv_is_my_mac(const uint8_t *addr); +int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr);  struct batadv_hard_iface *  batadv_seq_print_text_primary_if_get(struct seq_file *seq);  int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 5ee21cebbbb..319f2906c71 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -402,7 +402,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,  		goto out;  	/* not for me */ -	if (!batadv_is_my_mac(ethhdr->h_dest)) +	if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))  		goto out;  	icmp_packet = (struct batadv_icmp_packet_rr *)skb->data; @@ -416,7 +416,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,  	}  	/* packet for me */ -	if (batadv_is_my_mac(icmp_packet->dst)) +	if (batadv_is_my_mac(bat_priv, icmp_packet->dst))  		return batadv_recv_my_icmp_packet(bat_priv, skb, hdr_size);  	/* TTL exceeded */ @@ -548,7 +548,8 @@ batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,  	return router;  } -static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size) +static int batadv_check_unicast_packet(struct batadv_priv *bat_priv, +				       struct sk_buff *skb, int hdr_size)  {  	struct ethhdr *ethhdr; @@ -567,7 +568,7 @@ static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)  		return -1;  	/* not for me */ -	if (!batadv_is_my_mac(ethhdr->h_dest)) +	if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))  		return -1;  	return 0; @@ -582,7 +583,7 @@ int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)  	char tt_flag;  	size_t packet_size; -	if (batadv_check_unicast_packet(skb, hdr_size) < 0) +	if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)  		return NET_RX_DROP;  	/* I could need to modify it */ @@ -614,7 +615,7 @@ int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)  	case BATADV_TT_RESPONSE:  		batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX); -		if (batadv_is_my_mac(tt_query->dst)) { +		if (batadv_is_my_mac(bat_priv, tt_query->dst)) {  			/* packet needs to be linearized to access the TT  			 * changes  			 */ @@ -657,14 +658,15 @@ int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if)  	struct batadv_roam_adv_packet *roam_adv_packet;  	struct batadv_orig_node *orig_node; -	if (batadv_check_unicast_packet(skb, sizeof(*roam_adv_packet)) < 0) +	if (batadv_check_unicast_packet(bat_priv, skb, +					sizeof(*roam_adv_packet)) < 0)  		goto out;  	batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX);  	roam_adv_packet = (struct batadv_roam_adv_packet *)skb->data; -	if (!batadv_is_my_mac(roam_adv_packet->dst)) +	if (!batadv_is_my_mac(bat_priv, roam_adv_packet->dst))  		return batadv_route_unicast_packet(skb, recv_if);  	/* check if it is a backbone gateway. we don't accept @@ -967,7 +969,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,  	 * last time) the packet had an updated information or not  	 */  	curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn); -	if (!batadv_is_my_mac(unicast_packet->dest)) { +	if (!batadv_is_my_mac(bat_priv, unicast_packet->dest)) {  		orig_node = batadv_orig_hash_find(bat_priv,  						  unicast_packet->dest);  		/* if it is not possible to find the orig_node representing the @@ -1044,14 +1046,14 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,  	if (is4addr)  		hdr_size = sizeof(*unicast_4addr_packet); -	if (batadv_check_unicast_packet(skb, hdr_size) < 0) +	if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)  		return NET_RX_DROP;  	if (!batadv_check_unicast_ttvn(bat_priv, skb))  		return NET_RX_DROP;  	/* packet for me */ -	if (batadv_is_my_mac(unicast_packet->dest)) { +	if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {  		if (is4addr) {  			batadv_dat_inc_counter(bat_priv,  					       unicast_4addr_packet->subtype); @@ -1088,7 +1090,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb,  	struct sk_buff *new_skb = NULL;  	int ret; -	if (batadv_check_unicast_packet(skb, hdr_size) < 0) +	if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)  		return NET_RX_DROP;  	if (!batadv_check_unicast_ttvn(bat_priv, skb)) @@ -1097,7 +1099,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb,  	unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;  	/* packet for me */ -	if (batadv_is_my_mac(unicast_packet->dest)) { +	if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {  		ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);  		if (ret == NET_RX_DROP) @@ -1151,13 +1153,13 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,  		goto out;  	/* ignore broadcasts sent by myself */ -	if (batadv_is_my_mac(ethhdr->h_source)) +	if (batadv_is_my_mac(bat_priv, ethhdr->h_source))  		goto out;  	bcast_packet = (struct batadv_bcast_packet *)skb->data;  	/* ignore broadcasts originated by myself */ -	if (batadv_is_my_mac(bcast_packet->orig)) +	if (batadv_is_my_mac(bat_priv, bcast_packet->orig))  		goto out;  	if (bcast_packet->header.ttl < 2) @@ -1243,14 +1245,14 @@ int batadv_recv_vis_packet(struct sk_buff *skb,  	ethhdr = (struct ethhdr *)skb_mac_header(skb);  	/* not for me */ -	if (!batadv_is_my_mac(ethhdr->h_dest)) +	if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))  		return NET_RX_DROP;  	/* ignore own packets */ -	if (batadv_is_my_mac(vis_packet->vis_orig)) +	if (batadv_is_my_mac(bat_priv, vis_packet->vis_orig))  		return NET_RX_DROP; -	if (batadv_is_my_mac(vis_packet->sender_orig)) +	if (batadv_is_my_mac(bat_priv, vis_packet->sender_orig))  		return NET_RX_DROP;  	switch (vis_packet->vis_type) { diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 98a66a021a6..7abee19567e 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -1953,7 +1953,7 @@ out:  bool batadv_send_tt_response(struct batadv_priv *bat_priv,  			     struct batadv_tt_query_packet *tt_request)  { -	if (batadv_is_my_mac(tt_request->dst)) { +	if (batadv_is_my_mac(bat_priv, tt_request->dst)) {  		/* don't answer backbone gws! */  		if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))  			return true; diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c index c053244b97b..6a1e646be96 100644 --- a/net/batman-adv/vis.c +++ b/net/batman-adv/vis.c @@ -477,7 +477,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,  	/* Are we the target for this VIS packet? */  	if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC	&& -	    batadv_is_my_mac(vis_packet->target_orig)) +	    batadv_is_my_mac(bat_priv, vis_packet->target_orig))  		are_target = 1;  	spin_lock_bh(&bat_priv->vis.hash_lock); @@ -496,7 +496,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,  		batadv_send_list_add(bat_priv, info);  		/* ... we're not the recipient (and thus need to forward). */ -	} else if (!batadv_is_my_mac(packet->target_orig)) { +	} else if (!batadv_is_my_mac(bat_priv, packet->target_orig)) {  		batadv_send_list_add(bat_priv, info);  	} diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index d3ee69b35a7..0d1b08cc76e 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -230,6 +230,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  	if (flags & (MSG_OOB))  		return -EOPNOTSUPP; +	msg->msg_namelen = 0; +  	skb = skb_recv_datagram(sk, flags, noblock, &err);  	if (!skb) {  		if (sk->sk_shutdown & RCV_SHUTDOWN) @@ -237,8 +239,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  		return err;  	} -	msg->msg_namelen = 0; -  	copied = skb->len;  	if (len < copied) {  		msg->msg_flags |= MSG_TRUNC; diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index c23bae86263..7c9224bcce1 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -608,6 +608,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  	if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {  		rfcomm_dlc_accept(d); +		msg->msg_namelen = 0;  		return 0;  	} diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index fad0302bdb3..fb6192c9812 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -665,6 +665,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  	    test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {  		hci_conn_accept(pi->conn->hcon, 0);  		sk->sk_state = BT_CONFIG; +		msg->msg_namelen = 0;  		release_sock(sk);  		return 0; diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index ef1b91431c6..459dab22b3f 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -67,7 +67,8 @@ void br_port_carrier_check(struct net_bridge_port *p)  	struct net_device *dev = p->dev;  	struct net_bridge *br = p->br; -	if (netif_running(dev) && netif_oper_up(dev)) +	if (!(p->flags & BR_ADMIN_COST) && +	    netif_running(dev) && netif_oper_up(dev))  		p->path_cost = port_cost(dev);  	if (!netif_running(br->dev)) diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 3cbf5beb3d4..d2c043a857b 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -156,6 +156,7 @@ struct net_bridge_port  #define BR_BPDU_GUARD           0x00000002  #define BR_ROOT_BLOCK		0x00000004  #define BR_MULTICAST_FAST_LEAVE	0x00000008 +#define BR_ADMIN_COST		0x00000010  #ifdef CONFIG_BRIDGE_IGMP_SNOOPING  	u32				multicast_startup_queries_sent; diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index 0bdb4ebd362..d45e760141b 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c @@ -288,6 +288,7 @@ int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost)  	    path_cost > BR_MAX_PATH_COST)  		return -ERANGE; +	p->flags |= BR_ADMIN_COST;  	p->path_cost = path_cost;  	br_configuration_update(p->br);  	br_port_state_selection(p->br); diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 095259f8390..ff2ff3ce696 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,  	if (m->msg_flags&MSG_OOB)  		goto read_error; +	m->msg_namelen = 0; +  	skb = skb_recv_datagram(sk, flags, 0 , &ret);  	if (!skb)  		goto read_error; diff --git a/net/can/gw.c b/net/can/gw.c index 2d117dc5ebe..117814a7e73 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -466,7 +466,7 @@ static int cgw_notifier(struct notifier_block *nb,  			if (gwj->src.dev == dev || gwj->dst.dev == dev) {  				hlist_del(&gwj->list);  				cgw_unregister_filter(gwj); -				kfree(gwj); +				kmem_cache_free(cgw_cache, gwj);  			}  		}  	} @@ -864,7 +864,7 @@ static void cgw_remove_all_jobs(void)  	hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) {  		hlist_del(&gwj->list);  		cgw_unregister_filter(gwj); -		kfree(gwj); +		kmem_cache_free(cgw_cache, gwj);  	}  } @@ -920,7 +920,7 @@ static int cgw_remove_job(struct sk_buff *skb,  struct nlmsghdr *nlh, void *arg)  		hlist_del(&gwj->list);  		cgw_unregister_filter(gwj); -		kfree(gwj); +		kmem_cache_free(cgw_cache, gwj);  		err = 0;  		break;  	} diff --git a/net/core/dev.c b/net/core/dev.c index b13e5c766c1..b24ab0e98eb 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1624,7 +1624,6 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)  	}  	skb_orphan(skb); -	nf_reset(skb);  	if (unlikely(!is_skb_forwardable(dev, skb))) {  		atomic_long_inc(&dev->rx_dropped); @@ -1640,6 +1639,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)  	skb->mark = 0;  	secpath_reset(skb);  	nf_reset(skb); +	nf_reset_trace(skb);  	return netif_rx(skb);  }  EXPORT_SYMBOL_GPL(dev_forward_skb); @@ -2148,6 +2148,9 @@ static void skb_warn_bad_offload(const struct sk_buff *skb)  	struct net_device *dev = skb->dev;  	const char *driver = ""; +	if (!net_ratelimit()) +		return; +  	if (dev && dev->dev.parent)  		driver = dev_driver_string(dev->dev.parent); @@ -3314,6 +3317,7 @@ int netdev_rx_handler_register(struct net_device *dev,  	if (dev->rx_handler)  		return -EBUSY; +	/* Note: rx_handler_data must be set before rx_handler */  	rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);  	rcu_assign_pointer(dev->rx_handler, rx_handler); @@ -3334,6 +3338,11 @@ void netdev_rx_handler_unregister(struct net_device *dev)  	ASSERT_RTNL();  	RCU_INIT_POINTER(dev->rx_handler, NULL); +	/* a reader seeing a non NULL rx_handler in a rcu_read_lock() +	 * section has a guarantee to see a non NULL rx_handler_data +	 * as well. +	 */ +	synchronize_net();  	RCU_INIT_POINTER(dev->rx_handler_data, NULL);  }  EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index bd2eb9d3e36..abdc9e6ef33 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -37,7 +37,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,  	ha->type = addr_type;  	ha->refcount = 1;  	ha->global_use = global; -	ha->synced = false; +	ha->synced = 0;  	list_add_tail_rcu(&ha->list, &list->list);  	list->count++; @@ -165,7 +165,7 @@ int __hw_addr_sync(struct netdev_hw_addr_list *to_list,  					    addr_len, ha->type);  			if (err)  				break; -			ha->synced = true; +			ha->synced++;  			ha->refcount++;  		} else if (ha->refcount == 1) {  			__hw_addr_del(to_list, ha->addr, addr_len, ha->type); @@ -186,7 +186,7 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,  		if (ha->synced) {  			__hw_addr_del(to_list, ha->addr,  				      addr_len, ha->type); -			ha->synced = false; +			ha->synced--;  			__hw_addr_del(from_list, ha->addr,  				      addr_len, ha->type);  		} diff --git a/net/core/flow.c b/net/core/flow.c index c56ea6f7f6c..2bfd081c59f 100644 --- a/net/core/flow.c +++ b/net/core/flow.c @@ -328,7 +328,7 @@ static void flow_cache_flush_per_cpu(void *data)  	struct flow_flush_info *info = data;  	struct tasklet_struct *tasklet; -	tasklet = this_cpu_ptr(&info->cache->percpu->flush_tasklet); +	tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet;  	tasklet->data = (unsigned long)info;  	tasklet_schedule(tasklet);  } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 5fb8d7e4729..23854b51a25 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -496,8 +496,10 @@ static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)  	}  	if (ops->fill_info) {  		data = nla_nest_start(skb, IFLA_INFO_DATA); -		if (data == NULL) +		if (data == NULL) { +			err = -EMSGSIZE;  			goto err_cancel_link; +		}  		err = ops->fill_info(skb, dev);  		if (err < 0)  			goto err_cancel_data; @@ -1070,7 +1072,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)  	rcu_read_lock();  	cb->seq = net->dev_base_seq; -	if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, +	if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,  			ifla_policy) >= 0) {  		if (tb[IFLA_EXT_MASK]) @@ -1920,7 +1922,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)  	u32 ext_filter_mask = 0;  	u16 min_ifinfo_dump_size = 0; -	if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, +	if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,  			ifla_policy) >= 0) {  		if (tb[IFLA_EXT_MASK])  			ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index f678507bc82..c6287cd978c 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -587,13 +587,16 @@ static void check_lifetime(struct work_struct *work)  {  	unsigned long now, next, next_sec, next_sched;  	struct in_ifaddr *ifa; +	struct hlist_node *n;  	int i;  	now = jiffies;  	next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); -	rcu_read_lock();  	for (i = 0; i < IN4_ADDR_HSIZE; i++) { +		bool change_needed = false; + +		rcu_read_lock();  		hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {  			unsigned long age; @@ -606,16 +609,7 @@ static void check_lifetime(struct work_struct *work)  			if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&  			    age >= ifa->ifa_valid_lft) { -				struct in_ifaddr **ifap ; - -				rtnl_lock(); -				for (ifap = &ifa->ifa_dev->ifa_list; -				     *ifap != NULL; ifap = &ifa->ifa_next) { -					if (*ifap == ifa) -						inet_del_ifa(ifa->ifa_dev, -							     ifap, 1); -				} -				rtnl_unlock(); +				change_needed = true;  			} else if (ifa->ifa_preferred_lft ==  				   INFINITY_LIFE_TIME) {  				continue; @@ -625,10 +619,8 @@ static void check_lifetime(struct work_struct *work)  					next = ifa->ifa_tstamp +  					       ifa->ifa_valid_lft * HZ; -				if (!(ifa->ifa_flags & IFA_F_DEPRECATED)) { -					ifa->ifa_flags |= IFA_F_DEPRECATED; -					rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0); -				} +				if (!(ifa->ifa_flags & IFA_F_DEPRECATED)) +					change_needed = true;  			} else if (time_before(ifa->ifa_tstamp +  					       ifa->ifa_preferred_lft * HZ,  					       next)) { @@ -636,8 +628,42 @@ static void check_lifetime(struct work_struct *work)  				       ifa->ifa_preferred_lft * HZ;  			}  		} +		rcu_read_unlock(); +		if (!change_needed) +			continue; +		rtnl_lock(); +		hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) { +			unsigned long age; + +			if (ifa->ifa_flags & IFA_F_PERMANENT) +				continue; + +			/* We try to batch several events at once. */ +			age = (now - ifa->ifa_tstamp + +			       ADDRCONF_TIMER_FUZZ_MINUS) / HZ; + +			if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME && +			    age >= ifa->ifa_valid_lft) { +				struct in_ifaddr **ifap; + +				for (ifap = &ifa->ifa_dev->ifa_list; +				     *ifap != NULL; ifap = &(*ifap)->ifa_next) { +					if (*ifap == ifa) { +						inet_del_ifa(ifa->ifa_dev, +							     ifap, 1); +						break; +					} +				} +			} else if (ifa->ifa_preferred_lft != +				   INFINITY_LIFE_TIME && +				   age >= ifa->ifa_preferred_lft && +				   !(ifa->ifa_flags & IFA_F_DEPRECATED)) { +				ifa->ifa_flags |= IFA_F_DEPRECATED; +				rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0); +			} +		} +		rtnl_unlock();  	} -	rcu_read_unlock();  	next_sec = round_jiffies_up(next);  	next_sched = next; @@ -802,8 +828,12 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg  		if (nlh->nlmsg_flags & NLM_F_EXCL ||  		    !(nlh->nlmsg_flags & NLM_F_REPLACE))  			return -EEXIST; - -		set_ifa_lifetime(ifa_existing, valid_lft, prefered_lft); +		ifa = ifa_existing; +		set_ifa_lifetime(ifa, valid_lft, prefered_lft); +		cancel_delayed_work(&check_lifetime_work); +		schedule_delayed_work(&check_lifetime_work, 0); +		rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid); +		blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);  	}  	return 0;  } diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 3b4f0cd2e63..4cfe34d4cc9 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -139,8 +139,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)  	/* skb is pure payload to encrypt */ -	err = -ENOMEM; -  	esp = x->data;  	aead = esp->aead;  	alen = crypto_aead_authsize(aead); @@ -176,8 +174,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)  	}  	tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); -	if (!tmp) +	if (!tmp) { +		err = -ENOMEM;  		goto error; +	}  	seqhi = esp_tmp_seqhi(tmp);  	iv = esp_tmp_iv(aead, tmp, seqhilen); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index a6445b843ef..52c273ea05c 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -248,8 +248,7 @@ static void ip_expire(unsigned long arg)  		if (!head->dev)  			goto out_rcu_unlock; -		/* skb dst is stale, drop it, and perform route lookup again */ -		skb_dst_drop(head); +		/* skb has no dst, perform route lookup again */  		iph = ip_hdr(head);  		err = ip_route_input_noref(head, iph->daddr, iph->saddr,  					   iph->tos, head->dev); @@ -523,9 +522,16 @@ found:  		qp->q.max_size = skb->len + ihl;  	if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && -	    qp->q.meat == qp->q.len) -		return ip_frag_reasm(qp, prev, dev); +	    qp->q.meat == qp->q.len) { +		unsigned long orefdst = skb->_skb_refdst; +		skb->_skb_refdst = 0UL; +		err = ip_frag_reasm(qp, prev, dev); +		skb->_skb_refdst = orefdst; +		return err; +	} + +	skb_dst_drop(skb);  	inet_frag_lru_move(&qp->q);  	return -EINPROGRESS; diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c index c30130062cd..c49dcd0284a 100644 --- a/net/ipv4/netfilter/ipt_rpfilter.c +++ b/net/ipv4/netfilter/ipt_rpfilter.c @@ -66,6 +66,12 @@ static bool rpfilter_lookup_reverse(struct flowi4 *fl4,  	return dev_match;  } +static bool rpfilter_is_local(const struct sk_buff *skb) +{ +	const struct rtable *rt = skb_rtable(skb); +	return rt && (rt->rt_flags & RTCF_LOCAL); +} +  static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)  {  	const struct xt_rpfilter_info *info; @@ -76,7 +82,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)  	info = par->matchinfo;  	invert = info->flags & XT_RPFILTER_INVERT; -	if (par->in->flags & IFF_LOOPBACK) +	if (rpfilter_is_local(skb))  		return true ^ invert;  	iph = ip_hdr(skb); diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index ef54377fb11..397e0f69435 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -349,8 +349,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,  	 * hasn't changed since we received the original syn, but I see  	 * no easy way to do this.  	 */ -	flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk), -			   RT_SCOPE_UNIVERSE, IPPROTO_TCP, +	flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark, +			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,  			   inet_sk_flowi_flags(sk),  			   (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,  			   ireq->loc_addr, th->source, th->dest); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3bd55bad230..13b9c08fc15 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -113,6 +113,7 @@ int sysctl_tcp_early_retrans __read_mostly = 2;  #define FLAG_DSACKING_ACK	0x800 /* SACK blocks contained D-SACK info */  #define FLAG_NONHEAD_RETRANS_ACKED	0x1000 /* Non-head rexmitted data was ACKed */  #define FLAG_SACK_RENEGING	0x2000 /* snd_una advanced to a sacked seq */ +#define FLAG_UPDATE_TS_RECENT	0x4000 /* tcp_replace_ts_recent() */  #define FLAG_ACKED		(FLAG_DATA_ACKED|FLAG_SYN_ACKED)  #define FLAG_NOT_DUP		(FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) @@ -3564,6 +3565,27 @@ static void tcp_send_challenge_ack(struct sock *sk)  	}  } +static void tcp_store_ts_recent(struct tcp_sock *tp) +{ +	tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; +	tp->rx_opt.ts_recent_stamp = get_seconds(); +} + +static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) +{ +	if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { +		/* PAWS bug workaround wrt. ACK frames, the PAWS discard +		 * extra check below makes sure this can only happen +		 * for pure ACK frames.  -DaveM +		 * +		 * Not only, also it occurs for expired timestamps. +		 */ + +		if (tcp_paws_check(&tp->rx_opt, 0)) +			tcp_store_ts_recent(tp); +	} +} +  /* This routine deals with incoming acks, but not outgoing ones. */  static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)  { @@ -3607,6 +3629,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)  	prior_fackets = tp->fackets_out;  	prior_in_flight = tcp_packets_in_flight(tp); +	/* ts_recent update must be made after we are sure that the packet +	 * is in window. +	 */ +	if (flag & FLAG_UPDATE_TS_RECENT) +		tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); +  	if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) {  		/* Window is constant, pure forward advance.  		 * No more checks are required. @@ -3927,27 +3955,6 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)  EXPORT_SYMBOL(tcp_parse_md5sig_option);  #endif -static inline void tcp_store_ts_recent(struct tcp_sock *tp) -{ -	tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; -	tp->rx_opt.ts_recent_stamp = get_seconds(); -} - -static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) -{ -	if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { -		/* PAWS bug workaround wrt. ACK frames, the PAWS discard -		 * extra check below makes sure this can only happen -		 * for pure ACK frames.  -DaveM -		 * -		 * Not only, also it occurs for expired timestamps. -		 */ - -		if (tcp_paws_check(&tp->rx_opt, 0)) -			tcp_store_ts_recent(tp); -	} -} -  /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM   *   * It is not fatal. If this ACK does _not_ change critical state (seqs, window) @@ -5543,14 +5550,9 @@ slow_path:  		return 0;  step5: -	if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) +	if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0)  		goto discard; -	/* ts_recent update must be made after we are sure that the packet -	 * is in window. -	 */ -	tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); -  	tcp_rcv_rtt_measure_ts(sk, skb);  	/* Process urgent data. */ @@ -5986,7 +5988,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,  	/* step 5: check the ACK field */  	if (true) { -		int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0; +		int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | +						  FLAG_UPDATE_TS_RECENT) > 0;  		switch (sk->sk_state) {  		case TCP_SYN_RECV: @@ -6137,11 +6140,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,  		}  	} -	/* ts_recent update must be made after we are sure that the packet -	 * is in window. -	 */ -	tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); -  	/* step 6: check the URG bit */  	tcp_urg(sk, skb, th); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 5d0b4387cba..509912a5ff9 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2388,8 +2388,12 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)  	 */  	TCP_SKB_CB(skb)->when = tcp_time_stamp; -	/* make sure skb->data is aligned on arches that require it */ -	if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) { +	/* make sure skb->data is aligned on arches that require it +	 * and check if ack-trimming & collapsing extended the headroom +	 * beyond what csum_start can cover. +	 */ +	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || +		     skb_headroom(skb) >= 0xFFFF)) {  		struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,  						   GFP_ATOMIC);  		return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : @@ -2709,6 +2713,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,  	skb_reserve(skb, MAX_TCP_HEADER);  	skb_dst_set(skb, dst); +	security_skb_owned_by(skb, sk);  	mss = dst_metric_advmss(dst);  	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 26512250e09..dae802c0af7 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -168,8 +168,6 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev,  static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,  			       struct net_device *dev); -static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); -  static struct ipv6_devconf ipv6_devconf __read_mostly = {  	.forwarding		= 0,  	.hop_limit		= IPV6_DEFAULT_HOPLIMIT, @@ -837,7 +835,7 @@ out2:  	rcu_read_unlock_bh();  	if (likely(err == 0)) -		atomic_notifier_call_chain(&inet6addr_chain, NETDEV_UP, ifa); +		inet6addr_notifier_call_chain(NETDEV_UP, ifa);  	else {  		kfree(ifa);  		ifa = ERR_PTR(err); @@ -927,7 +925,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)  	ipv6_ifa_notify(RTM_DELADDR, ifp); -	atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifp); +	inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);  	/*  	 * Purge or update corresponding prefix @@ -2529,6 +2527,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)  static void init_loopback(struct net_device *dev)  {  	struct inet6_dev  *idev; +	struct net_device *sp_dev; +	struct inet6_ifaddr *sp_ifa; +	struct rt6_info *sp_rt;  	/* ::1 */ @@ -2540,6 +2541,30 @@ static void init_loopback(struct net_device *dev)  	}  	add_addr(idev, &in6addr_loopback, 128, IFA_HOST); + +	/* Add routes to other interface's IPv6 addresses */ +	for_each_netdev(dev_net(dev), sp_dev) { +		if (!strcmp(sp_dev->name, dev->name)) +			continue; + +		idev = __in6_dev_get(sp_dev); +		if (!idev) +			continue; + +		read_lock_bh(&idev->lock); +		list_for_each_entry(sp_ifa, &idev->addr_list, if_list) { + +			if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE)) +				continue; + +			sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0); + +			/* Failure cases are ignored */ +			if (!IS_ERR(sp_rt)) +				ip6_ins_rt(sp_rt); +		} +		read_unlock_bh(&idev->lock); +	}  }  static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr) @@ -2961,7 +2986,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)  		if (state != INET6_IFADDR_STATE_DEAD) {  			__ipv6_ifa_notify(RTM_DELADDR, ifa); -			atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); +			inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);  		}  		in6_ifa_put(ifa); @@ -4842,22 +4867,6 @@ static struct pernet_operations addrconf_ops = {  	.exit = addrconf_exit_net,  }; -/* - *      Device notifier - */ - -int register_inet6addr_notifier(struct notifier_block *nb) -{ -	return atomic_notifier_chain_register(&inet6addr_chain, nb); -} -EXPORT_SYMBOL(register_inet6addr_notifier); - -int unregister_inet6addr_notifier(struct notifier_block *nb) -{ -	return atomic_notifier_chain_unregister(&inet6addr_chain, nb); -} -EXPORT_SYMBOL(unregister_inet6addr_notifier); -  static struct rtnl_af_ops inet6_ops = {  	.family		  = AF_INET6,  	.fill_link_af	  = inet6_fill_link_af, diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c index d051e5f4bf3..72104562c86 100644 --- a/net/ipv6/addrconf_core.c +++ b/net/ipv6/addrconf_core.c @@ -78,3 +78,22 @@ int __ipv6_addr_type(const struct in6_addr *addr)  }  EXPORT_SYMBOL(__ipv6_addr_type); +static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); + +int register_inet6addr_notifier(struct notifier_block *nb) +{ +	return atomic_notifier_chain_register(&inet6addr_chain, nb); +} +EXPORT_SYMBOL(register_inet6addr_notifier); + +int unregister_inet6addr_notifier(struct notifier_block *nb) +{ +	return atomic_notifier_chain_unregister(&inet6addr_chain, nb); +} +EXPORT_SYMBOL(unregister_inet6addr_notifier); + +int inet6addr_notifier_call_chain(unsigned long val, void *v) +{ +	return atomic_notifier_call_chain(&inet6addr_chain, val, v); +} +EXPORT_SYMBOL(inet6addr_notifier_call_chain); diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index e33fe0ab256..2bab2aa5974 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -118,6 +118,18 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt  	    ipv6_addr_loopback(&hdr->daddr))  		goto err; +	/* RFC4291 Errata ID: 3480 +	 * Interface-Local scope spans only a single interface on a +	 * node and is useful only for loopback transmission of +	 * multicast.  Packets with interface-local scope received +	 * from another node must be discarded. +	 */ +	if (!(skb->pkt_type == PACKET_LOOPBACK || +	      dev->flags & IFF_LOOPBACK) && +	    ipv6_addr_is_multicast(&hdr->daddr) && +	    IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1) +		goto err; +  	/* RFC4291 2.7  	 * Nodes must not originate a packet to a multicast address whose scope  	 * field contains the reserved value 0; if such a packet is received, it diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c index 33608c61027..cb631143721 100644 --- a/net/ipv6/netfilter/ip6t_NPT.c +++ b/net/ipv6/netfilter/ip6t_NPT.c @@ -57,7 +57,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,  		if (pfx_len - i >= 32)  			mask = 0;  		else -			mask = htonl(~((1 << (pfx_len - i)) - 1)); +			mask = htonl((1 << (i - pfx_len + 32)) - 1);  		idx = i / 32;  		addr->s6_addr32[idx] &= mask; diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c index 5060d54199a..e0983f3648a 100644 --- a/net/ipv6/netfilter/ip6t_rpfilter.c +++ b/net/ipv6/netfilter/ip6t_rpfilter.c @@ -71,6 +71,12 @@ static bool rpfilter_lookup_reverse6(const struct sk_buff *skb,  	return ret;  } +static bool rpfilter_is_local(const struct sk_buff *skb) +{ +	const struct rt6_info *rt = (const void *) skb_dst(skb); +	return rt && (rt->rt6i_flags & RTF_LOCAL); +} +  static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)  {  	const struct xt_rpfilter_info *info = par->matchinfo; @@ -78,7 +84,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)  	struct ipv6hdr *iph;  	bool invert = info->flags & XT_RPFILTER_INVERT; -	if (par->in->flags & IFF_LOOPBACK) +	if (rpfilter_is_local(skb))  		return true ^ invert;  	iph = ipv6_hdr(skb); diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 196ab9347ad..0ba10e53a62 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -330,9 +330,17 @@ found:  	}  	if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && -	    fq->q.meat == fq->q.len) -		return ip6_frag_reasm(fq, prev, dev); +	    fq->q.meat == fq->q.len) { +		int res; +		unsigned long orefdst = skb->_skb_refdst; +		skb->_skb_refdst = 0UL; +		res = ip6_frag_reasm(fq, prev, dev); +		skb->_skb_refdst = orefdst; +		return res; +	} + +	skb_dst_drop(skb);  	inet_frag_lru_move(&fq->q);  	return -1; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index f6d629fd6ae..46a5be85be8 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -386,6 +386,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,  		if (dst)  			dst->ops->redirect(dst, sk, skb); +		goto out;  	}  	if (type == ICMPV6_PKT_TOOBIG) { diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index d28e7f014cc..e493b3397ae 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,  	IRDA_DEBUG(4, "%s()\n", __func__); +	msg->msg_namelen = 0; +  	skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,  				flags & MSG_DONTWAIT, &err);  	if (!skb) diff --git a/net/irda/iriap.c b/net/irda/iriap.c index 29340a9a6fb..e1b37f5a269 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c @@ -303,7 +303,8 @@ static void iriap_disconnect_indication(void *instance, void *sap,  {  	struct iriap_cb *self; -	IRDA_DEBUG(4, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]); +	IRDA_DEBUG(4, "%s(), reason=%s [%d]\n", __func__, +		   irlmp_reason_str(reason), reason);  	self = instance; diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c index 6115a44c0a2..1064621da6f 100644 --- a/net/irda/irlmp.c +++ b/net/irda/irlmp.c @@ -66,8 +66,15 @@ const char *irlmp_reasons[] = {  	"LM_LAP_RESET",  	"LM_INIT_DISCONNECT",  	"ERROR, NOT USED", +	"UNKNOWN",  }; +const char *irlmp_reason_str(LM_REASON reason) +{ +	reason = min_t(size_t, reason, ARRAY_SIZE(irlmp_reasons) - 1); +	return irlmp_reasons[reason]; +} +  /*   * Function irlmp_init (void)   * @@ -747,7 +754,8 @@ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason,  {  	struct lsap_cb *lsap; -	IRDA_DEBUG(1, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]); +	IRDA_DEBUG(1, "%s(), reason=%s [%d]\n", __func__, +		   irlmp_reason_str(reason), reason);  	IRDA_ASSERT(self != NULL, return;);  	IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index a7d11ffe428..206ce6db2c3 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -49,12 +49,6 @@ static const u8 iprm_shutdown[8] =  #define TRGCLS_SIZE	(sizeof(((struct iucv_message *)0)->class)) -/* macros to set/get socket control buffer at correct offset */ -#define CB_TAG(skb)	((skb)->cb)		/* iucv message tag */ -#define CB_TAG_LEN	(sizeof(((struct iucv_message *) 0)->tag)) -#define CB_TRGCLS(skb)	((skb)->cb + CB_TAG_LEN) /* iucv msg target class */ -#define CB_TRGCLS_LEN	(TRGCLS_SIZE) -  #define __iucv_sock_wait(sk, condition, timeo, ret)			\  do {									\  	DEFINE_WAIT(__wait);						\ @@ -1141,7 +1135,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,  	/* increment and save iucv message tag for msg_completion cbk */  	txmsg.tag = iucv->send_tag++; -	memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); +	IUCV_SKB_CB(skb)->tag = txmsg.tag;  	if (iucv->transport == AF_IUCV_TRANS_HIPER) {  		atomic_inc(&iucv->msg_sent); @@ -1224,7 +1218,7 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)  			return -ENOMEM;  		/* copy target class to control buffer of new skb */ -		memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN); +		IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class;  		/* copy data fragment */  		memcpy(nskb->data, skb->data + copied, size); @@ -1256,7 +1250,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,  	/* store msg target class in the second 4 bytes of skb ctrl buffer */  	/* Note: the first 4 bytes are reserved for msg tag */ -	memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN); +	IUCV_SKB_CB(skb)->class = msg->class;  	/* check for special IPRM messages (e.g. iucv_sock_shutdown) */  	if ((msg->flags & IUCV_IPRMDATA) && len > 7) { @@ -1292,6 +1286,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,  		}  	} +	IUCV_SKB_CB(skb)->offset = 0;  	if (sock_queue_rcv_skb(sk, skb))  		skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);  } @@ -1327,6 +1322,9 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  	unsigned int copied, rlen;  	struct sk_buff *skb, *rskb, *cskb;  	int err = 0; +	u32 offset; + +	msg->msg_namelen = 0;  	if ((sk->sk_state == IUCV_DISCONN) &&  	    skb_queue_empty(&iucv->backlog_skb_q) && @@ -1346,13 +1344,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  		return err;  	} -	rlen   = skb->len;		/* real length of skb */ +	offset = IUCV_SKB_CB(skb)->offset; +	rlen   = skb->len - offset;		/* real length of skb */  	copied = min_t(unsigned int, rlen, len);  	if (!rlen)  		sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;  	cskb = skb; -	if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) { +	if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) {  		if (!(flags & MSG_PEEK))  			skb_queue_head(&sk->sk_receive_queue, skb);  		return -EFAULT; @@ -1370,7 +1369,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  	 * get the trgcls from the control buffer of the skb due to  	 * fragmentation of original iucv message. */  	err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, -			CB_TRGCLS_LEN, CB_TRGCLS(skb)); +		       sizeof(IUCV_SKB_CB(skb)->class), +		       (void *)&IUCV_SKB_CB(skb)->class);  	if (err) {  		if (!(flags & MSG_PEEK))  			skb_queue_head(&sk->sk_receive_queue, skb); @@ -1382,9 +1382,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  		/* SOCK_STREAM: re-queue skb if it contains unreceived data */  		if (sk->sk_type == SOCK_STREAM) { -			skb_pull(skb, copied); -			if (skb->len) { -				skb_queue_head(&sk->sk_receive_queue, skb); +			if (copied < rlen) { +				IUCV_SKB_CB(skb)->offset = offset + copied;  				goto done;  			}  		} @@ -1403,6 +1402,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  		spin_lock_bh(&iucv->message_q.lock);  		rskb = skb_dequeue(&iucv->backlog_skb_q);  		while (rskb) { +			IUCV_SKB_CB(rskb)->offset = 0;  			if (sock_queue_rcv_skb(sk, rskb)) {  				skb_queue_head(&iucv->backlog_skb_q,  						rskb); @@ -1830,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path,  		spin_lock_irqsave(&list->lock, flags);  		while (list_skb != (struct sk_buff *)list) { -			if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) { +			if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {  				this = list_skb;  				break;  			} @@ -2091,6 +2091,7 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)  	skb_pull(skb, sizeof(struct af_iucv_trans_hdr));  	skb_reset_transport_header(skb);  	skb_reset_network_header(skb); +	IUCV_SKB_CB(skb)->offset = 0;  	spin_lock(&iucv->message_q.lock);  	if (skb_queue_empty(&iucv->backlog_skb_q)) {  		if (sock_queue_rcv_skb(sk, skb)) { @@ -2195,8 +2196,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,  		/* fall through and receive zero length data */  	case 0:  		/* plain data frame */ -		memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class, -		       CB_TRGCLS_LEN); +		IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;  		err = afiucv_hs_callback_rx(sk, skb);  		break;  	default: diff --git a/net/key/af_key.c b/net/key/af_key.c index 8555f331ea6..5b1e5af2571 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -2693,6 +2693,7 @@ static int key_notify_policy_flush(const struct km_event *c)  	hdr->sadb_msg_pid = c->portid;  	hdr->sadb_msg_version = PF_KEY_V2;  	hdr->sadb_msg_errno = (uint8_t) 0; +	hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;  	hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));  	pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);  	return 0; diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index c74f5a91ff6..b8a6039314e 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -690,6 +690,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,  		lsa->l2tp_addr = ipv6_hdr(skb)->saddr;  		lsa->l2tp_flowinfo = 0;  		lsa->l2tp_scope_id = 0; +		lsa->l2tp_conn_id = 0;  		if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)  			lsa->l2tp_scope_id = IP6CB(skb)->iif;  	} diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 88709882c46..48aaa89253e 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -720,6 +720,8 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,  	int target;	/* Read at least this many bytes */  	long timeo; +	msg->msg_namelen = 0; +  	lock_sock(sk);  	copied = -ENOTCONN;  	if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN)) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index fb306814576..a6893602f87 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2582,7 +2582,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,  			list_del(&dep->list);  			mutex_unlock(&local->mtx); -			ieee80211_roc_notify_destroy(dep); +			ieee80211_roc_notify_destroy(dep, true);  			return 0;  		} @@ -2622,7 +2622,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,  			ieee80211_start_next_roc(local);  		mutex_unlock(&local->mtx); -		ieee80211_roc_notify_destroy(found); +		ieee80211_roc_notify_destroy(found, true);  	} else {  		/* work may be pending so use it all the time */  		found->abort = true; @@ -2632,6 +2632,8 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,  		/* work will clean up etc */  		flush_delayed_work(&found->work); +		WARN_ON(!found->to_be_freed); +		kfree(found);  	}  	return 0; diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 78c0d90dd64..931be419ab5 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -63,6 +63,7 @@ ieee80211_new_chanctx(struct ieee80211_local *local,  		      enum ieee80211_chanctx_mode mode)  {  	struct ieee80211_chanctx *ctx; +	u32 changed;  	int err;  	lockdep_assert_held(&local->chanctx_mtx); @@ -76,6 +77,13 @@ ieee80211_new_chanctx(struct ieee80211_local *local,  	ctx->conf.rx_chains_dynamic = 1;  	ctx->mode = mode; +	/* acquire mutex to prevent idle from changing */ +	mutex_lock(&local->mtx); +	/* turn idle off *before* setting channel -- some drivers need that */ +	changed = ieee80211_idle_off(local); +	if (changed) +		ieee80211_hw_config(local, changed); +  	if (!local->use_chanctx) {  		local->_oper_channel_type =  			cfg80211_get_chandef_type(chandef); @@ -85,14 +93,17 @@ ieee80211_new_chanctx(struct ieee80211_local *local,  		err = drv_add_chanctx(local, ctx);  		if (err) {  			kfree(ctx); -			return ERR_PTR(err); +			ctx = ERR_PTR(err); + +			ieee80211_recalc_idle(local); +			goto out;  		}  	} +	/* and keep the mutex held until the new chanctx is on the list */  	list_add_rcu(&ctx->list, &local->chanctx_list); -	mutex_lock(&local->mtx); -	ieee80211_recalc_idle(local); + out:  	mutex_unlock(&local->mtx);  	return ctx; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 388580a1bad..5672533a083 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -309,6 +309,7 @@ struct ieee80211_roc_work {  	struct ieee80211_channel *chan;  	bool started, abort, hw_begun, notified; +	bool to_be_freed;  	unsigned long hw_start_time; @@ -1347,7 +1348,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local);  void ieee80211_roc_setup(struct ieee80211_local *local);  void ieee80211_start_next_roc(struct ieee80211_local *local);  void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata); -void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc); +void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free);  void ieee80211_sw_roc_work(struct work_struct *work);  void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc); @@ -1361,6 +1362,7 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,  			     enum nl80211_iftype type);  void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata);  void ieee80211_remove_interfaces(struct ieee80211_local *local); +u32 ieee80211_idle_off(struct ieee80211_local *local);  void ieee80211_recalc_idle(struct ieee80211_local *local);  void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,  				    const int offset); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index baaa8608e52..9ed49ad0380 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -78,7 +78,7 @@ void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)  		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER);  } -static u32 ieee80211_idle_off(struct ieee80211_local *local) +static u32 __ieee80211_idle_off(struct ieee80211_local *local)  {  	if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE))  		return 0; @@ -87,7 +87,7 @@ static u32 ieee80211_idle_off(struct ieee80211_local *local)  	return IEEE80211_CONF_CHANGE_IDLE;  } -static u32 ieee80211_idle_on(struct ieee80211_local *local) +static u32 __ieee80211_idle_on(struct ieee80211_local *local)  {  	if (local->hw.conf.flags & IEEE80211_CONF_IDLE)  		return 0; @@ -98,16 +98,18 @@ static u32 ieee80211_idle_on(struct ieee80211_local *local)  	return IEEE80211_CONF_CHANGE_IDLE;  } -void ieee80211_recalc_idle(struct ieee80211_local *local) +static u32 __ieee80211_recalc_idle(struct ieee80211_local *local, +				   bool force_active)  {  	bool working = false, scanning, active;  	unsigned int led_trig_start = 0, led_trig_stop = 0;  	struct ieee80211_roc_work *roc; -	u32 change;  	lockdep_assert_held(&local->mtx); -	active = !list_empty(&local->chanctx_list) || local->monitors; +	active = force_active || +		 !list_empty(&local->chanctx_list) || +		 local->monitors;  	if (!local->ops->remain_on_channel) {  		list_for_each_entry(roc, &local->roc_list, list) { @@ -132,9 +134,18 @@ void ieee80211_recalc_idle(struct ieee80211_local *local)  	ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop);  	if (working || scanning || active) -		change = ieee80211_idle_off(local); -	else -		change = ieee80211_idle_on(local); +		return __ieee80211_idle_off(local); +	return __ieee80211_idle_on(local); +} + +u32 ieee80211_idle_off(struct ieee80211_local *local) +{ +	return __ieee80211_recalc_idle(local, true); +} + +void ieee80211_recalc_idle(struct ieee80211_local *local) +{ +	u32 change = __ieee80211_recalc_idle(local, false);  	if (change)  		ieee80211_hw_config(local, change);  } @@ -349,21 +360,19 @@ static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)  static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)  {  	struct ieee80211_sub_if_data *sdata; -	int ret = 0; +	int ret;  	if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))  		return 0; -	mutex_lock(&local->iflist_mtx); +	ASSERT_RTNL();  	if (local->monitor_sdata) -		goto out_unlock; +		return 0;  	sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL); -	if (!sdata) { -		ret = -ENOMEM; -		goto out_unlock; -	} +	if (!sdata) +		return -ENOMEM;  	/* set up data */  	sdata->local = local; @@ -377,13 +386,13 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)  	if (WARN_ON(ret)) {  		/* ok .. stupid driver, it asked for this! */  		kfree(sdata); -		goto out_unlock; +		return ret;  	}  	ret = ieee80211_check_queues(sdata);  	if (ret) {  		kfree(sdata); -		goto out_unlock; +		return ret;  	}  	ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef, @@ -391,13 +400,14 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)  	if (ret) {  		drv_remove_interface(local, sdata);  		kfree(sdata); -		goto out_unlock; +		return ret;  	} +	mutex_lock(&local->iflist_mtx);  	rcu_assign_pointer(local->monitor_sdata, sdata); - out_unlock:  	mutex_unlock(&local->iflist_mtx); -	return ret; + +	return 0;  }  static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) @@ -407,14 +417,20 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)  	if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))  		return; +	ASSERT_RTNL(); +  	mutex_lock(&local->iflist_mtx);  	sdata = rcu_dereference_protected(local->monitor_sdata,  					  lockdep_is_held(&local->iflist_mtx)); -	if (!sdata) -		goto out_unlock; +	if (!sdata) { +		mutex_unlock(&local->iflist_mtx); +		return; +	}  	rcu_assign_pointer(local->monitor_sdata, NULL); +	mutex_unlock(&local->iflist_mtx); +  	synchronize_net();  	ieee80211_vif_release_channel(sdata); @@ -422,8 +438,6 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)  	drv_remove_interface(local, sdata);  	kfree(sdata); - out_unlock: -	mutex_unlock(&local->iflist_mtx);  }  /* diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 29ce2aa87e7..4749b385869 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -1060,7 +1060,8 @@ void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)  	rcu_read_lock();  	list_for_each_entry_rcu(sdata, &local->interfaces, list) -		if (ieee80211_vif_is_mesh(&sdata->vif)) +		if (ieee80211_vif_is_mesh(&sdata->vif) && +		    ieee80211_sdata_running(sdata))  			ieee80211_queue_work(&local->hw, &sdata->work);  	rcu_read_unlock();  } diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 141577412d8..346ad4cfb01 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -3608,8 +3608,10 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)  	/* Restart STA timers */  	rcu_read_lock(); -	list_for_each_entry_rcu(sdata, &local->interfaces, list) -		ieee80211_restart_sta_timer(sdata); +	list_for_each_entry_rcu(sdata, &local->interfaces, list) { +		if (ieee80211_sdata_running(sdata)) +			ieee80211_restart_sta_timer(sdata); +	}  	rcu_read_unlock();  } @@ -3962,8 +3964,16 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,  	/* prep auth_data so we don't go into idle on disassoc */  	ifmgd->auth_data = auth_data; -	if (ifmgd->associated) -		ieee80211_set_disassoc(sdata, 0, 0, false, NULL); +	if (ifmgd->associated) { +		u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; + +		ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, +				       WLAN_REASON_UNSPECIFIED, +				       false, frame_buf); + +		__cfg80211_send_deauth(sdata->dev, frame_buf, +				       sizeof(frame_buf)); +	}  	sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid); @@ -4023,8 +4033,16 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,  	mutex_lock(&ifmgd->mtx); -	if (ifmgd->associated) -		ieee80211_set_disassoc(sdata, 0, 0, false, NULL); +	if (ifmgd->associated) { +		u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; + +		ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, +				       WLAN_REASON_UNSPECIFIED, +				       false, frame_buf); + +		__cfg80211_send_deauth(sdata->dev, frame_buf, +				       sizeof(frame_buf)); +	}  	if (ifmgd->auth_data && !ifmgd->auth_data->done) {  		err = -EBUSY; diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index cc79b4a2e82..430bd254e49 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -297,10 +297,13 @@ void ieee80211_start_next_roc(struct ieee80211_local *local)  	}  } -void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc) +void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free)  {  	struct ieee80211_roc_work *dep, *tmp; +	if (WARN_ON(roc->to_be_freed)) +		return; +  	/* was never transmitted */  	if (roc->frame) {  		cfg80211_mgmt_tx_status(&roc->sdata->wdev, @@ -316,9 +319,12 @@ void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc)  						   GFP_KERNEL);  	list_for_each_entry_safe(dep, tmp, &roc->dependents, list) -		ieee80211_roc_notify_destroy(dep); +		ieee80211_roc_notify_destroy(dep, true); -	kfree(roc); +	if (free) +		kfree(roc); +	else +		roc->to_be_freed = true;  }  void ieee80211_sw_roc_work(struct work_struct *work) @@ -331,6 +337,9 @@ void ieee80211_sw_roc_work(struct work_struct *work)  	mutex_lock(&local->mtx); +	if (roc->to_be_freed) +		goto out_unlock; +  	if (roc->abort)  		goto finish; @@ -370,7 +379,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)   finish:  		list_del(&roc->list);  		started = roc->started; -		ieee80211_roc_notify_destroy(roc); +		ieee80211_roc_notify_destroy(roc, !roc->abort);  		if (started) {  			drv_flush(local, false); @@ -410,7 +419,7 @@ static void ieee80211_hw_roc_done(struct work_struct *work)  	list_del(&roc->list); -	ieee80211_roc_notify_destroy(roc); +	ieee80211_roc_notify_destroy(roc, true);  	/* if there's another roc, start it now */  	ieee80211_start_next_roc(local); @@ -460,12 +469,14 @@ void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata)  	list_for_each_entry_safe(roc, tmp, &tmp_list, list) {  		if (local->ops->remain_on_channel) {  			list_del(&roc->list); -			ieee80211_roc_notify_destroy(roc); +			ieee80211_roc_notify_destroy(roc, true);  		} else {  			ieee80211_queue_delayed_work(&local->hw, &roc->work, 0);  			/* work will clean up etc */  			flush_delayed_work(&roc->work); +			WARN_ON(!roc->to_be_freed); +			kfree(roc);  		}  	} diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index bb73ed2d20b..c6844ad080b 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2675,7 +2675,19 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)  		memset(nskb->cb, 0, sizeof(nskb->cb)); -		ieee80211_tx_skb(rx->sdata, nskb); +		if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { +			struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); + +			info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | +				      IEEE80211_TX_INTFL_OFFCHAN_TX_OK | +				      IEEE80211_TX_CTL_NO_CCK_RATE; +			if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) +				info->hw_queue = +					local->hw.offchannel_tx_hw_queue; +		} + +		__ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, +					    status->band);  	}  	dev_kfree_skb(rx->skb);  	return RX_QUEUED; diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index a79ce820cb5..238a0cca320 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -766,6 +766,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta)  	struct ieee80211_local *local;  	struct ieee80211_sub_if_data *sdata;  	int ret, i; +	bool have_key = false;  	might_sleep(); @@ -793,12 +794,19 @@ int __must_check __sta_info_destroy(struct sta_info *sta)  	list_del_rcu(&sta->list);  	mutex_lock(&local->key_mtx); -	for (i = 0; i < NUM_DEFAULT_KEYS; i++) +	for (i = 0; i < NUM_DEFAULT_KEYS; i++) {  		__ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i])); -	if (sta->ptk) +		have_key = true; +	} +	if (sta->ptk) {  		__ieee80211_key_free(key_mtx_dereference(local, sta->ptk)); +		have_key = true; +	}  	mutex_unlock(&local->key_mtx); +	if (!have_key) +		synchronize_net(); +  	sta->dead = true;  	local->num_sta--; diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c index 0f92dc24cb8..d7df6ac2c6f 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c +++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c @@ -339,7 +339,11 @@ bitmap_ipmac_tlist(const struct ip_set *set,  nla_put_failure:  	nla_nest_cancel(skb, nested);  	ipset_nest_end(skb, atd); -	return -EMSGSIZE; +	if (unlikely(id == first)) { +		cb->args[2] = 0; +		return -EMSGSIZE; +	} +	return 0;  }  static int diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c index f2627226a08..10a30b4fc7d 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportnet.c +++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c @@ -104,6 +104,15 @@ hash_ipportnet4_data_flags(struct hash_ipportnet4_elem *dst, u32 flags)  	dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);  } +static inline void +hash_ipportnet4_data_reset_flags(struct hash_ipportnet4_elem *dst, u32 *flags) +{ +	if (dst->nomatch) { +		*flags = IPSET_FLAG_NOMATCH; +		dst->nomatch = 0; +	} +} +  static inline int  hash_ipportnet4_data_match(const struct hash_ipportnet4_elem *elem)  { @@ -414,6 +423,15 @@ hash_ipportnet6_data_flags(struct hash_ipportnet6_elem *dst, u32 flags)  	dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);  } +static inline void +hash_ipportnet6_data_reset_flags(struct hash_ipportnet6_elem *dst, u32 *flags) +{ +	if (dst->nomatch) { +		*flags = IPSET_FLAG_NOMATCH; +		dst->nomatch = 0; +	} +} +  static inline int  hash_ipportnet6_data_match(const struct hash_ipportnet6_elem *elem)  { diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c index 4b677cf6bf7..d6a59154d71 100644 --- a/net/netfilter/ipset/ip_set_hash_net.c +++ b/net/netfilter/ipset/ip_set_hash_net.c @@ -87,7 +87,16 @@ hash_net4_data_copy(struct hash_net4_elem *dst,  static inline void  hash_net4_data_flags(struct hash_net4_elem *dst, u32 flags)  { -	dst->nomatch = flags & IPSET_FLAG_NOMATCH; +	dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); +} + +static inline void +hash_net4_data_reset_flags(struct hash_net4_elem *dst, u32 *flags) +{ +	if (dst->nomatch) { +		*flags = IPSET_FLAG_NOMATCH; +		dst->nomatch = 0; +	}  }  static inline int @@ -308,7 +317,16 @@ hash_net6_data_copy(struct hash_net6_elem *dst,  static inline void  hash_net6_data_flags(struct hash_net6_elem *dst, u32 flags)  { -	dst->nomatch = flags & IPSET_FLAG_NOMATCH; +	dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); +} + +static inline void +hash_net6_data_reset_flags(struct hash_net6_elem *dst, u32 *flags) +{ +	if (dst->nomatch) { +		*flags = IPSET_FLAG_NOMATCH; +		dst->nomatch = 0; +	}  }  static inline int diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index 6ba985f1c96..f2b0a3c3013 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c @@ -198,7 +198,16 @@ hash_netiface4_data_copy(struct hash_netiface4_elem *dst,  static inline void  hash_netiface4_data_flags(struct hash_netiface4_elem *dst, u32 flags)  { -	dst->nomatch = flags & IPSET_FLAG_NOMATCH; +	dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); +} + +static inline void +hash_netiface4_data_reset_flags(struct hash_netiface4_elem *dst, u32 *flags) +{ +	if (dst->nomatch) { +		*flags = IPSET_FLAG_NOMATCH; +		dst->nomatch = 0; +	}  }  static inline int @@ -494,7 +503,7 @@ hash_netiface6_data_copy(struct hash_netiface6_elem *dst,  static inline void  hash_netiface6_data_flags(struct hash_netiface6_elem *dst, u32 flags)  { -	dst->nomatch = flags & IPSET_FLAG_NOMATCH; +	dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);  }  static inline int @@ -504,6 +513,15 @@ hash_netiface6_data_match(const struct hash_netiface6_elem *elem)  }  static inline void +hash_netiface6_data_reset_flags(struct hash_netiface6_elem *dst, u32 *flags) +{ +	if (dst->nomatch) { +		*flags = IPSET_FLAG_NOMATCH; +		dst->nomatch = 0; +	} +} + +static inline void  hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem)  {  	elem->elem = 0; diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c index af20c0c5ced..349deb672a2 100644 --- a/net/netfilter/ipset/ip_set_hash_netport.c +++ b/net/netfilter/ipset/ip_set_hash_netport.c @@ -104,6 +104,15 @@ hash_netport4_data_flags(struct hash_netport4_elem *dst, u32 flags)  	dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);  } +static inline void +hash_netport4_data_reset_flags(struct hash_netport4_elem *dst, u32 *flags) +{ +	if (dst->nomatch) { +		*flags = IPSET_FLAG_NOMATCH; +		dst->nomatch = 0; +	} +} +  static inline int  hash_netport4_data_match(const struct hash_netport4_elem *elem)  { @@ -375,6 +384,15 @@ hash_netport6_data_flags(struct hash_netport6_elem *dst, u32 flags)  	dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);  } +static inline void +hash_netport6_data_reset_flags(struct hash_netport6_elem *dst, u32 *flags) +{ +	if (dst->nomatch) { +		*flags = IPSET_FLAG_NOMATCH; +		dst->nomatch = 0; +	} +} +  static inline int  hash_netport6_data_match(const struct hash_netport6_elem *elem)  { diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index 8371c2bac2e..09c744aa898 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c @@ -174,9 +174,13 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,  {  	const struct set_elem *e = list_set_elem(map, i); -	if (i == map->size - 1 && e->id != IPSET_INVALID_ID) -		/* Last element replaced: e.g. add new,before,last */ -		ip_set_put_byindex(e->id); +	if (e->id != IPSET_INVALID_ID) { +		const struct set_elem *x = list_set_elem(map, map->size - 1); + +		/* Last element replaced or pushed off */ +		if (x->id != IPSET_INVALID_ID) +			ip_set_put_byindex(x->id); +	}  	if (with_timeout(map->timeout))  		list_elem_tadd(map, i, id, ip_set_timeout_set(timeout));  	else diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 0e7d423324c..e0c4373b474 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -1593,10 +1593,8 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,  		end += strlen("\r\n\r\n") + clen;  		msglen = origlen = end - dptr; -		if (msglen > datalen) { -			nf_ct_helper_log(skb, ct, "incomplete/bad SIP message"); -			return NF_DROP; -		} +		if (msglen > datalen) +			return NF_ACCEPT;  		ret = process_sip_msg(skb, ct, protoff, dataoff,  				      &dptr, &msglen); diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 6bcce401fd1..fedee394366 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -568,6 +568,7 @@ static int __init nf_conntrack_standalone_init(void)  		register_net_sysctl(&init_net, "net", nf_ct_netfilter_table);  	if (!nf_ct_netfilter_header) {  		pr_err("nf_conntrack: can't register to sysctl.\n"); +		ret = -ENOMEM;  		goto out_sysctl;  	}  #endif diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 8d5769c6d16..ad24be070e5 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -467,33 +467,22 @@ EXPORT_SYMBOL_GPL(nf_nat_packet);  struct nf_nat_proto_clean {  	u8	l3proto;  	u8	l4proto; -	bool	hash;  }; -/* Clear NAT section of all conntracks, in case we're loaded again. */ -static int nf_nat_proto_clean(struct nf_conn *i, void *data) +/* kill conntracks with affected NAT section */ +static int nf_nat_proto_remove(struct nf_conn *i, void *data)  {  	const struct nf_nat_proto_clean *clean = data;  	struct nf_conn_nat *nat = nfct_nat(i);  	if (!nat)  		return 0; -	if (!(i->status & IPS_SRC_NAT_DONE)) -		return 0; +  	if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||  	    (clean->l4proto && nf_ct_protonum(i) != clean->l4proto))  		return 0; -	if (clean->hash) { -		spin_lock_bh(&nf_nat_lock); -		hlist_del_rcu(&nat->bysource); -		spin_unlock_bh(&nf_nat_lock); -	} else { -		memset(nat, 0, sizeof(*nat)); -		i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | -			       IPS_SEQ_ADJUST); -	} -	return 0; +	return i->status & IPS_NAT_MASK ? 1 : 0;  }  static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) @@ -505,16 +494,8 @@ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)  	struct net *net;  	rtnl_lock(); -	/* Step 1 - remove from bysource hash */ -	clean.hash = true;  	for_each_net(net) -		nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); -	synchronize_rcu(); - -	/* Step 2 - clean NAT section */ -	clean.hash = false; -	for_each_net(net) -		nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); +		nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);  	rtnl_unlock();  } @@ -526,16 +507,9 @@ static void nf_nat_l3proto_clean(u8 l3proto)  	struct net *net;  	rtnl_lock(); -	/* Step 1 - remove from bysource hash */ -	clean.hash = true; -	for_each_net(net) -		nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); -	synchronize_rcu(); -	/* Step 2 - clean NAT section */ -	clean.hash = false;  	for_each_net(net) -		nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); +		nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);  	rtnl_unlock();  } @@ -773,7 +747,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)  {  	struct nf_nat_proto_clean clean = {}; -	nf_ct_iterate_cleanup(net, &nf_nat_proto_clean, &clean); +	nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean);  	synchronize_rcu();  	nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);  } diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c index 589d686f0b4..dc3fd5d4446 100644 --- a/net/netfilter/nfnetlink_acct.c +++ b/net/netfilter/nfnetlink_acct.c @@ -49,6 +49,8 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,  		return -EINVAL;  	acct_name = nla_data(tb[NFACCT_NAME]); +	if (strlen(acct_name) == 0) +		return -EINVAL;  	list_for_each_entry(nfacct, &nfnl_acct_list, head) {  		if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0) diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 1cb48540f86..42680b2baa1 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c @@ -1062,8 +1062,10 @@ static int __init nfnetlink_queue_init(void)  #ifdef CONFIG_PROC_FS  	if (!proc_create("nfnetlink_queue", 0440, -			 proc_net_netfilter, &nfqnl_file_ops)) +			 proc_net_netfilter, &nfqnl_file_ops)) { +		status = -ENOMEM;  		goto cleanup_subsys; +	}  #endif  	register_netdevice_notifier(&nfqnl_dev_notifier); diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index d1fa1d9ffd2..103bd704b5f 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -1173,6 +1173,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,  	}  	if (sax != NULL) { +		memset(sax, 0, sizeof(*sax));  		sax->sax25_family = AF_NETROM;  		skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,  			      AX25_ADDR_LEN); diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c index b530afadd76..ee25f25f0cd 100644 --- a/net/nfc/llcp/llcp.c +++ b/net/nfc/llcp/llcp.c @@ -107,8 +107,6 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen,  				accept_sk->sk_state_change(sk);  				bh_unlock_sock(accept_sk); - -				sock_orphan(accept_sk);  			}  			if (listen == true) { @@ -134,8 +132,6 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen,  		bh_unlock_sock(sk); -		sock_orphan(sk); -  		sk_del_node_init(sk);  	} @@ -164,8 +160,6 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen,  		bh_unlock_sock(sk); -		sock_orphan(sk); -  		sk_del_node_init(sk);  	} @@ -827,7 +821,6 @@ static void nfc_llcp_recv_ui(struct nfc_llcp_local *local,  		skb_get(skb);  	} else {  		pr_err("Receive queue is full\n"); -		kfree_skb(skb);  	}  	nfc_llcp_sock_put(llcp_sock); @@ -1028,7 +1021,6 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,  			skb_get(skb);  		} else {  			pr_err("Receive queue is full\n"); -			kfree_skb(skb);  		}  	} diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c index 5c7cdf3f2a8..6c94447ec41 100644 --- a/net/nfc/llcp/sock.c +++ b/net/nfc/llcp/sock.c @@ -270,7 +270,9 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent,  		}  		if (sk->sk_state == LLCP_CONNECTED || !newsock) { -			nfc_llcp_accept_unlink(sk); +			list_del_init(&lsk->accept_queue); +			sock_put(sk); +  			if (newsock)  				sock_graft(sk, newsock); @@ -464,8 +466,6 @@ static int llcp_sock_release(struct socket *sock)  			nfc_llcp_accept_unlink(accept_sk);  			release_sock(accept_sk); - -			sock_orphan(accept_sk);  		}  	} @@ -646,6 +646,8 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  	pr_debug("%p %zu\n", sk, len); +	msg->msg_namelen = 0; +  	lock_sock(sk);  	if (sk->sk_state == LLCP_CLOSED && @@ -691,6 +693,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  		pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap); +		memset(sockaddr, 0, sizeof(*sockaddr));  		sockaddr->sa_family = AF_NFC;  		sockaddr->nfc_protocol = NFC_PROTO_NFC_DEP;  		sockaddr->dsap = ui_cb->dsap; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index a4b724708a1..6980c3e6f06 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -1593,10 +1593,8 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,  		return ERR_PTR(-ENOMEM);  	retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd); -	if (retval < 0) { -		kfree_skb(skb); -		return ERR_PTR(retval); -	} +	BUG_ON(retval < 0); +  	return skb;  } @@ -1726,24 +1724,32 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)  	    nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type)  		err = -EINVAL; +	reply = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); +	if (!reply) { +		err = -ENOMEM; +		goto exit_unlock; +	} +  	if (!err && a[OVS_VPORT_ATTR_OPTIONS])  		err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);  	if (err) -		goto exit_unlock; +		goto exit_free; +  	if (a[OVS_VPORT_ATTR_UPCALL_PID])  		vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); -	reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, -					 OVS_VPORT_CMD_NEW); -	if (IS_ERR(reply)) { -		netlink_set_err(sock_net(skb->sk)->genl_sock, 0, -				ovs_dp_vport_multicast_group.id, PTR_ERR(reply)); -		goto exit_unlock; -	} +	err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid, +				      info->snd_seq, 0, OVS_VPORT_CMD_NEW); +	BUG_ON(err < 0);  	genl_notify(reply, genl_info_net(info), info->snd_portid,  		    ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); +	rtnl_unlock(); +	return 0; + +exit_free: +	kfree_skb(reply);  exit_unlock:  	rtnl_unlock();  	return err; diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index fe0e4215c73..67a2b783fe7 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -795,9 +795,9 @@ void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)  void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)  { +	BUG_ON(table->count == 0);  	hlist_del_rcu(&flow->hash_node[table->node_ver]);  	table->count--; -	BUG_ON(table->count < 0);  }  /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute.  */ diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index cf68e6e4054..9c834745159 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,  	skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);  	if (srose != NULL) { +		memset(srose, 0, msg->msg_namelen);  		srose->srose_family = AF_ROSE;  		srose->srose_addr   = rose->dest_addr;  		srose->srose_call   = rose->dest_call; diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index 1135d8227f9..9b97172db84 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -204,7 +204,6 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,  	if (err < 0)  		return err; -	err = -EINVAL;  	if (tb[TCA_FW_CLASSID]) {  		f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]);  		tcf_bind_filter(tp, &f->res, base); @@ -218,6 +217,7 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,  	}  #endif /* CONFIG_NET_CLS_IND */ +	err = -EINVAL;  	if (tb[TCA_FW_MASK]) {  		mask = nla_get_u32(tb[TCA_FW_MASK]);  		if (mask != head->mask) diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 13aa47aa2ff..1bc210ffcba 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -962,8 +962,11 @@ cbq_dequeue(struct Qdisc *sch)  		cbq_update(q);  		if ((incr -= incr2) < 0)  			incr = 0; +		q->now += incr; +	} else { +		if (now > q->now) +			q->now = now;  	} -	q->now += incr;  	q->now_rt = now;  	for (;;) { diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 4e606fcb253..55786283a3d 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -195,7 +195,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)  		flow->deficit = q->quantum;  		flow->dropped = 0;  	} -	if (++sch->q.qlen < sch->limit) +	if (++sch->q.qlen <= sch->limit)  		return NET_XMIT_SUCCESS;  	q->drop_overlimit++; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index ffad48109a2..eac7e0ee23c 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -904,7 +904,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate)  	u64 mult;  	int shift; -	r->rate_bps = rate << 3; +	r->rate_bps = (u64)rate << 3;  	r->shift = 0;  	r->mult = 1;  	/* diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index dcc446e7fbf..d5f35f15af9 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -304,10 +304,8 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru  	err = rpciod_up();  	if (err)  		goto out_no_rpciod; -	err = -EINVAL; -	if (!xprt) -		goto out_no_xprt; +	err = -EINVAL;  	if (args->version >= program->nrvers)  		goto out_err;  	version = program->version[args->version]; @@ -382,10 +380,9 @@ out_no_principal:  out_no_stats:  	kfree(clnt);  out_err: -	xprt_put(xprt); -out_no_xprt:  	rpciod_down();  out_no_rpciod: +	xprt_put(xprt);  	return ERR_PTR(err);  } @@ -512,7 +509,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,  	new = rpc_new_client(args, xprt);  	if (IS_ERR(new)) {  		err = PTR_ERR(new); -		goto out_put; +		goto out_err;  	}  	atomic_inc(&clnt->cl_count); @@ -525,8 +522,6 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,  	new->cl_chatty = clnt->cl_chatty;  	return new; -out_put: -	xprt_put(xprt);  out_err:  	dprintk("RPC:       %s: returned error %d\n", __func__, err);  	return ERR_PTR(err); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index a9622b6cd91..515ce38e4f4 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -790,6 +790,7 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)  	if (addr) {  		addr->family = AF_TIPC;  		addr->addrtype = TIPC_ADDR_ID; +		memset(&addr->addr, 0, sizeof(addr->addr));  		addr->addr.id.ref = msg_origport(msg);  		addr->addr.id.node = msg_orignode(msg);  		addr->addr.name.domain = 0;	/* could leave uninitialized */ @@ -904,6 +905,9 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,  		goto exit;  	} +	/* will be updated in set_orig_addr() if needed */ +	m->msg_namelen = 0; +  	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);  restart: @@ -1013,6 +1017,9 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,  		goto exit;  	} +	/* will be updated in set_orig_addr() if needed */ +	m->msg_namelen = 0; +  	target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);  	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 971282b6f6a..2db702d82e7 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1412,8 +1412,8 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,  	if (UNIXCB(skb).cred)  		return;  	if (test_bit(SOCK_PASSCRED, &sock->flags) || -	    (other->sk_socket && -	    test_bit(SOCK_PASSCRED, &other->sk_socket->flags))) { +	    !other->sk_socket || +	    test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {  		UNIXCB(skb).pid  = get_pid(task_tgid(current));  		UNIXCB(skb).cred = get_current_cred();  	} @@ -1993,7 +1993,7 @@ again:  			if ((UNIXCB(skb).pid  != siocb->scm->pid) ||  			    (UNIXCB(skb).cred != siocb->scm->cred))  				break; -		} else { +		} else if (test_bit(SOCK_PASSCRED, &sock->flags)) {  			/* Copy credentials */  			scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);  			check_creds = 1; diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index ca511c4f388..7f93e2a42d7 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -207,7 +207,7 @@ static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr)  	struct vsock_sock *vsk;  	list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) -		if (vsock_addr_equals_addr_any(addr, &vsk->local_addr)) +		if (addr->svm_port == vsk->local_addr.svm_port)  			return sk_vsock(vsk);  	return NULL; @@ -220,8 +220,8 @@ static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src,  	list_for_each_entry(vsk, vsock_connected_sockets(src, dst),  			    connected_table) { -		if (vsock_addr_equals_addr(src, &vsk->remote_addr) -		    && vsock_addr_equals_addr(dst, &vsk->local_addr)) { +		if (vsock_addr_equals_addr(src, &vsk->remote_addr) && +		    dst->svm_port == vsk->local_addr.svm_port) {  			return sk_vsock(vsk);  		}  	} @@ -1670,6 +1670,8 @@ vsock_stream_recvmsg(struct kiocb *kiocb,  	vsk = vsock_sk(sk);  	err = 0; +	msg->msg_namelen = 0; +  	lock_sock(sk);  	if (sk->sk_state != SS_CONNECTED) { diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index a70ace83a15..5e04d3d9628 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -464,19 +464,16 @@ static struct sock *vmci_transport_get_pending(  	struct vsock_sock *vlistener;  	struct vsock_sock *vpending;  	struct sock *pending; +	struct sockaddr_vm src; + +	vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);  	vlistener = vsock_sk(listener);  	list_for_each_entry(vpending, &vlistener->pending_links,  			    pending_links) { -		struct sockaddr_vm src; -		struct sockaddr_vm dst; - -		vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port); -		vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port); -  		if (vsock_addr_equals_addr(&src, &vpending->remote_addr) && -		    vsock_addr_equals_addr(&dst, &vpending->local_addr)) { +		    pkt->dst_port == vpending->local_addr.svm_port) {  			pending = sk_vsock(vpending);  			sock_hold(pending);  			goto found; @@ -739,10 +736,15 @@ static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg)  	 */  	bh_lock_sock(sk); -	if (!sock_owned_by_user(sk) && sk->sk_state == SS_CONNECTED) -		vmci_trans(vsk)->notify_ops->handle_notify_pkt( -				sk, pkt, true, &dst, &src, -				&bh_process_pkt); +	if (!sock_owned_by_user(sk)) { +		/* The local context ID may be out of date, update it. */ +		vsk->local_addr.svm_cid = dst.svm_cid; + +		if (sk->sk_state == SS_CONNECTED) +			vmci_trans(vsk)->notify_ops->handle_notify_pkt( +					sk, pkt, true, &dst, &src, +					&bh_process_pkt); +	}  	bh_unlock_sock(sk); @@ -902,6 +904,9 @@ static void vmci_transport_recv_pkt_work(struct work_struct *work)  	lock_sock(sk); +	/* The local context ID may be out of date. */ +	vsock_sk(sk)->local_addr.svm_cid = pkt->dg.dst.context; +  	switch (sk->sk_state) {  	case SS_LISTEN:  		vmci_transport_recv_listen(sk, pkt); @@ -958,6 +963,10 @@ static int vmci_transport_recv_listen(struct sock *sk,  	pending = vmci_transport_get_pending(sk, pkt);  	if (pending) {  		lock_sock(pending); + +		/* The local context ID may be out of date. */ +		vsock_sk(pending)->local_addr.svm_cid = pkt->dg.dst.context; +  		switch (pending->sk_state) {  		case SS_CONNECTING:  			err = vmci_transport_recv_connecting_server(sk, @@ -1727,6 +1736,8 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,  	if (flags & MSG_OOB || flags & MSG_ERRQUEUE)  		return -EOPNOTSUPP; +	msg->msg_namelen = 0; +  	/* Retrieve the head sk_buff from the socket's receive queue. */  	err = 0;  	skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); @@ -1759,7 +1770,6 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,  	if (err)  		goto out; -	msg->msg_namelen = 0;  	if (msg->msg_name) {  		struct sockaddr_vm *vm_addr; diff --git a/net/vmw_vsock/vsock_addr.c b/net/vmw_vsock/vsock_addr.c index b7df1aea7c5..ec2611b4ea0 100644 --- a/net/vmw_vsock/vsock_addr.c +++ b/net/vmw_vsock/vsock_addr.c @@ -64,16 +64,6 @@ bool vsock_addr_equals_addr(const struct sockaddr_vm *addr,  }  EXPORT_SYMBOL_GPL(vsock_addr_equals_addr); -bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr, -				const struct sockaddr_vm *other) -{ -	return (addr->svm_cid == VMADDR_CID_ANY || -		other->svm_cid == VMADDR_CID_ANY || -		addr->svm_cid == other->svm_cid) && -	       addr->svm_port == other->svm_port; -} -EXPORT_SYMBOL_GPL(vsock_addr_equals_addr_any); -  int vsock_addr_cast(const struct sockaddr *addr,  		    size_t len, struct sockaddr_vm **out_addr)  { diff --git a/net/vmw_vsock/vsock_addr.h b/net/vmw_vsock/vsock_addr.h index cdfbcefdf84..9ccd5316eac 100644 --- a/net/vmw_vsock/vsock_addr.h +++ b/net/vmw_vsock/vsock_addr.h @@ -24,8 +24,6 @@ bool vsock_addr_bound(const struct sockaddr_vm *addr);  void vsock_addr_unbind(struct sockaddr_vm *addr);  bool vsock_addr_equals_addr(const struct sockaddr_vm *addr,  			    const struct sockaddr_vm *other); -bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr, -				const struct sockaddr_vm *other);  int vsock_addr_cast(const struct sockaddr *addr, size_t len,  		    struct sockaddr_vm **out_addr); diff --git a/net/wireless/core.c b/net/wireless/core.c index ea4155fe973..6ddf74f0ae1 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -212,6 +212,39 @@ static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data)  	rdev_rfkill_poll(rdev);  } +void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, +			      struct wireless_dev *wdev) +{ +	lockdep_assert_held(&rdev->devlist_mtx); +	lockdep_assert_held(&rdev->sched_scan_mtx); + +	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)) +		return; + +	if (!wdev->p2p_started) +		return; + +	rdev_stop_p2p_device(rdev, wdev); +	wdev->p2p_started = false; + +	rdev->opencount--; + +	if (rdev->scan_req && rdev->scan_req->wdev == wdev) { +		bool busy = work_busy(&rdev->scan_done_wk); + +		/* +		 * If the work isn't pending or running (in which case it would +		 * be waiting for the lock we hold) the driver didn't properly +		 * cancel the scan when the interface was removed. In this case +		 * warn and leak the scan request object to not crash later. +		 */ +		WARN_ON(!busy); + +		rdev->scan_req->aborted = true; +		___cfg80211_scan_done(rdev, !busy); +	} +} +  static int cfg80211_rfkill_set_block(void *data, bool blocked)  {  	struct cfg80211_registered_device *rdev = data; @@ -221,7 +254,8 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)  		return 0;  	rtnl_lock(); -	mutex_lock(&rdev->devlist_mtx); + +	/* read-only iteration need not hold the devlist_mtx */  	list_for_each_entry(wdev, &rdev->wdev_list, list) {  		if (wdev->netdev) { @@ -231,18 +265,18 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)  		/* otherwise, check iftype */  		switch (wdev->iftype) {  		case NL80211_IFTYPE_P2P_DEVICE: -			if (!wdev->p2p_started) -				break; -			rdev_stop_p2p_device(rdev, wdev); -			wdev->p2p_started = false; -			rdev->opencount--; +			/* but this requires it */ +			mutex_lock(&rdev->devlist_mtx); +			mutex_lock(&rdev->sched_scan_mtx); +			cfg80211_stop_p2p_device(rdev, wdev); +			mutex_unlock(&rdev->sched_scan_mtx); +			mutex_unlock(&rdev->devlist_mtx);  			break;  		default:  			break;  		}  	} -	mutex_unlock(&rdev->devlist_mtx);  	rtnl_unlock();  	return 0; @@ -745,17 +779,13 @@ static void wdev_cleanup_work(struct work_struct *work)  	wdev = container_of(work, struct wireless_dev, cleanup_work);  	rdev = wiphy_to_dev(wdev->wiphy); -	cfg80211_lock_rdev(rdev); +	mutex_lock(&rdev->sched_scan_mtx);  	if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) {  		rdev->scan_req->aborted = true;  		___cfg80211_scan_done(rdev, true);  	} -	cfg80211_unlock_rdev(rdev); - -	mutex_lock(&rdev->sched_scan_mtx); -  	if (WARN_ON(rdev->sched_scan_req &&  		    rdev->sched_scan_req->dev == wdev->netdev)) {  		__cfg80211_stop_sched_scan(rdev, false); @@ -781,21 +811,19 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev)  		return;  	mutex_lock(&rdev->devlist_mtx); +	mutex_lock(&rdev->sched_scan_mtx);  	list_del_rcu(&wdev->list);  	rdev->devlist_generation++;  	switch (wdev->iftype) {  	case NL80211_IFTYPE_P2P_DEVICE: -		if (!wdev->p2p_started) -			break; -		rdev_stop_p2p_device(rdev, wdev); -		wdev->p2p_started = false; -		rdev->opencount--; +		cfg80211_stop_p2p_device(rdev, wdev);  		break;  	default:  		WARN_ON_ONCE(1);  		break;  	} +	mutex_unlock(&rdev->sched_scan_mtx);  	mutex_unlock(&rdev->devlist_mtx);  }  EXPORT_SYMBOL(cfg80211_unregister_wdev); @@ -936,6 +964,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,  		cfg80211_update_iface_num(rdev, wdev->iftype, 1);  		cfg80211_lock_rdev(rdev);  		mutex_lock(&rdev->devlist_mtx); +		mutex_lock(&rdev->sched_scan_mtx);  		wdev_lock(wdev);  		switch (wdev->iftype) {  #ifdef CONFIG_CFG80211_WEXT @@ -967,6 +996,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,  			break;  		}  		wdev_unlock(wdev); +		mutex_unlock(&rdev->sched_scan_mtx);  		rdev->opencount++;  		mutex_unlock(&rdev->devlist_mtx);  		cfg80211_unlock_rdev(rdev); diff --git a/net/wireless/core.h b/net/wireless/core.h index 3aec0e429d8..5845c2b37aa 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -503,6 +503,9 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,  void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,  			       enum nl80211_iftype iftype, int num); +void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, +			      struct wireless_dev *wdev); +  #define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10  #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index d44ab216c0e..58e13a8c95f 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -4702,14 +4702,19 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)  	if (!rdev->ops->scan)  		return -EOPNOTSUPP; -	if (rdev->scan_req) -		return -EBUSY; +	mutex_lock(&rdev->sched_scan_mtx); +	if (rdev->scan_req) { +		err = -EBUSY; +		goto unlock; +	}  	if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {  		n_channels = validate_scan_freqs(  				info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]); -		if (!n_channels) -			return -EINVAL; +		if (!n_channels) { +			err = -EINVAL; +			goto unlock; +		}  	} else {  		enum ieee80211_band band;  		n_channels = 0; @@ -4723,23 +4728,29 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)  		nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp)  			n_ssids++; -	if (n_ssids > wiphy->max_scan_ssids) -		return -EINVAL; +	if (n_ssids > wiphy->max_scan_ssids) { +		err = -EINVAL; +		goto unlock; +	}  	if (info->attrs[NL80211_ATTR_IE])  		ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);  	else  		ie_len = 0; -	if (ie_len > wiphy->max_scan_ie_len) -		return -EINVAL; +	if (ie_len > wiphy->max_scan_ie_len) { +		err = -EINVAL; +		goto unlock; +	}  	request = kzalloc(sizeof(*request)  			+ sizeof(*request->ssids) * n_ssids  			+ sizeof(*request->channels) * n_channels  			+ ie_len, GFP_KERNEL); -	if (!request) -		return -ENOMEM; +	if (!request) { +		err = -ENOMEM; +		goto unlock; +	}  	if (n_ssids)  		request->ssids = (void *)&request->channels[n_channels]; @@ -4876,6 +4887,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)  		kfree(request);  	} + unlock: +	mutex_unlock(&rdev->sched_scan_mtx);  	return err;  } @@ -7749,20 +7762,9 @@ static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info)  	if (!rdev->ops->stop_p2p_device)  		return -EOPNOTSUPP; -	if (!wdev->p2p_started) -		return 0; - -	rdev_stop_p2p_device(rdev, wdev); -	wdev->p2p_started = false; - -	mutex_lock(&rdev->devlist_mtx); -	rdev->opencount--; -	mutex_unlock(&rdev->devlist_mtx); - -	if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) { -		rdev->scan_req->aborted = true; -		___cfg80211_scan_done(rdev, true); -	} +	mutex_lock(&rdev->sched_scan_mtx); +	cfg80211_stop_p2p_device(rdev, wdev); +	mutex_unlock(&rdev->sched_scan_mtx);  	return 0;  } @@ -8486,7 +8488,7 @@ static int nl80211_add_scan_req(struct sk_buff *msg,  	struct nlattr *nest;  	int i; -	ASSERT_RDEV_LOCK(rdev); +	lockdep_assert_held(&rdev->sched_scan_mtx);  	if (WARN_ON(!req))  		return 0; diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 674aadca007..fd99ea495b7 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -169,7 +169,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)  	union iwreq_data wrqu;  #endif -	ASSERT_RDEV_LOCK(rdev); +	lockdep_assert_held(&rdev->sched_scan_mtx);  	request = rdev->scan_req; @@ -230,9 +230,9 @@ void __cfg80211_scan_done(struct work_struct *wk)  	rdev = container_of(wk, struct cfg80211_registered_device,  			    scan_done_wk); -	cfg80211_lock_rdev(rdev); +	mutex_lock(&rdev->sched_scan_mtx);  	___cfg80211_scan_done(rdev, false); -	cfg80211_unlock_rdev(rdev); +	mutex_unlock(&rdev->sched_scan_mtx);  }  void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) @@ -698,11 +698,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,  	found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR);  	if (found) { -		found->pub.beacon_interval = tmp->pub.beacon_interval; -		found->pub.signal = tmp->pub.signal; -		found->pub.capability = tmp->pub.capability; -		found->ts = tmp->ts; -  		/* Update IEs */  		if (rcu_access_pointer(tmp->pub.proberesp_ies)) {  			const struct cfg80211_bss_ies *old; @@ -723,6 +718,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,  			if (found->pub.hidden_beacon_bss &&  			    !list_empty(&found->hidden_list)) { +				const struct cfg80211_bss_ies *f; +  				/*  				 * The found BSS struct is one of the probe  				 * response members of a group, but we're @@ -732,6 +729,10 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,  				 * SSID to showing it, which is confusing so  				 * drop this information.  				 */ + +				f = rcu_access_pointer(tmp->pub.beacon_ies); +				kfree_rcu((struct cfg80211_bss_ies *)f, +					  rcu_head);  				goto drop;  			} @@ -761,6 +762,11 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,  				kfree_rcu((struct cfg80211_bss_ies *)old,  					  rcu_head);  		} + +		found->pub.beacon_interval = tmp->pub.beacon_interval; +		found->pub.signal = tmp->pub.signal; +		found->pub.capability = tmp->pub.capability; +		found->ts = tmp->ts;  	} else {  		struct cfg80211_internal_bss *new;  		struct cfg80211_internal_bss *hidden; @@ -1056,6 +1062,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,  	if (IS_ERR(rdev))  		return PTR_ERR(rdev); +	mutex_lock(&rdev->sched_scan_mtx);  	if (rdev->scan_req) {  		err = -EBUSY;  		goto out; @@ -1162,6 +1169,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,  		dev_hold(dev);  	}   out: +	mutex_unlock(&rdev->sched_scan_mtx);  	kfree(creq);  	cfg80211_unlock_rdev(rdev);  	return err; diff --git a/net/wireless/sme.c b/net/wireless/sme.c index f432bd3755b..482c70e7012 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -85,6 +85,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)  	ASSERT_RTNL();  	ASSERT_RDEV_LOCK(rdev);  	ASSERT_WDEV_LOCK(wdev); +	lockdep_assert_held(&rdev->sched_scan_mtx);  	if (rdev->scan_req)  		return -EBUSY; @@ -223,6 +224,7 @@ void cfg80211_conn_work(struct work_struct *work)  	rtnl_lock();  	cfg80211_lock_rdev(rdev);  	mutex_lock(&rdev->devlist_mtx); +	mutex_lock(&rdev->sched_scan_mtx);  	list_for_each_entry(wdev, &rdev->wdev_list, list) {  		wdev_lock(wdev); @@ -247,6 +249,7 @@ void cfg80211_conn_work(struct work_struct *work)  		wdev_unlock(wdev);  	} +	mutex_unlock(&rdev->sched_scan_mtx);  	mutex_unlock(&rdev->devlist_mtx);  	cfg80211_unlock_rdev(rdev);  	rtnl_unlock(); @@ -320,11 +323,9 @@ void cfg80211_sme_scan_done(struct net_device *dev)  {  	struct wireless_dev *wdev = dev->ieee80211_ptr; -	mutex_lock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx);  	wdev_lock(wdev);  	__cfg80211_sme_scan_done(dev);  	wdev_unlock(wdev); -	mutex_unlock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx);  }  void cfg80211_sme_rx_auth(struct net_device *dev, @@ -924,9 +925,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,  	int err;  	mutex_lock(&rdev->devlist_mtx); +	/* might request scan - scan_mtx -> wdev_mtx dependency */ +	mutex_lock(&rdev->sched_scan_mtx);  	wdev_lock(dev->ieee80211_ptr);  	err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL);  	wdev_unlock(dev->ieee80211_ptr); +	mutex_unlock(&rdev->sched_scan_mtx);  	mutex_unlock(&rdev->devlist_mtx);  	return err; diff --git a/net/wireless/trace.h b/net/wireless/trace.h index b7a531380e1..7586de77a2f 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -27,7 +27,8 @@  #define WIPHY_PR_ARG	__entry->wiphy_name  #define WDEV_ENTRY	__field(u32, id) -#define WDEV_ASSIGN	(__entry->id) = (wdev ? wdev->identifier : 0) +#define WDEV_ASSIGN	(__entry->id) = (!IS_ERR_OR_NULL(wdev)	\ +					 ? wdev->identifier : 0)  #define WDEV_PR_FMT	"wdev(%u)"  #define WDEV_PR_ARG	(__entry->id) @@ -1778,7 +1779,7 @@ TRACE_EVENT(rdev_set_mac_acl,  	),  	TP_fast_assign(  		WIPHY_ASSIGN; -		WIPHY_ASSIGN; +		NETDEV_ASSIGN;  		__entry->acl_policy = params->acl_policy;  	),  	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", acl policy: %d", diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index fb9622f6d99..e79cb5c0655 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c @@ -89,6 +89,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,  	cfg80211_lock_rdev(rdev);  	mutex_lock(&rdev->devlist_mtx); +	mutex_lock(&rdev->sched_scan_mtx);  	wdev_lock(wdev);  	if (wdev->sme_state != CFG80211_SME_IDLE) { @@ -135,6 +136,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,  	err = cfg80211_mgd_wext_connect(rdev, wdev);   out:  	wdev_unlock(wdev); +	mutex_unlock(&rdev->sched_scan_mtx);  	mutex_unlock(&rdev->devlist_mtx);  	cfg80211_unlock_rdev(rdev);  	return err; @@ -190,6 +192,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,  	cfg80211_lock_rdev(rdev);  	mutex_lock(&rdev->devlist_mtx); +	mutex_lock(&rdev->sched_scan_mtx);  	wdev_lock(wdev);  	err = 0; @@ -223,6 +226,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,  	err = cfg80211_mgd_wext_connect(rdev, wdev);   out:  	wdev_unlock(wdev); +	mutex_unlock(&rdev->sched_scan_mtx);  	mutex_unlock(&rdev->devlist_mtx);  	cfg80211_unlock_rdev(rdev);  	return err; @@ -285,6 +289,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,  	cfg80211_lock_rdev(rdev);  	mutex_lock(&rdev->devlist_mtx); +	mutex_lock(&rdev->sched_scan_mtx);  	wdev_lock(wdev);  	if (wdev->sme_state != CFG80211_SME_IDLE) { @@ -313,6 +318,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,  	err = cfg80211_mgd_wext_connect(rdev, wdev);   out:  	wdev_unlock(wdev); +	mutex_unlock(&rdev->sched_scan_mtx);  	mutex_unlock(&rdev->devlist_mtx);  	cfg80211_unlock_rdev(rdev);  	return err; diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index 35754cc8a9e..8dafe6d3c6e 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c @@ -334,6 +334,70 @@ static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)  		x->xflags &= ~XFRM_TIME_DEFER;  } +static void xfrm_replay_notify_esn(struct xfrm_state *x, int event) +{ +	u32 seq_diff, oseq_diff; +	struct km_event c; +	struct xfrm_replay_state_esn *replay_esn = x->replay_esn; +	struct xfrm_replay_state_esn *preplay_esn = x->preplay_esn; + +	/* we send notify messages in case +	 *  1. we updated on of the sequence numbers, and the seqno difference +	 *     is at least x->replay_maxdiff, in this case we also update the +	 *     timeout of our timer function +	 *  2. if x->replay_maxage has elapsed since last update, +	 *     and there were changes +	 * +	 *  The state structure must be locked! +	 */ + +	switch (event) { +	case XFRM_REPLAY_UPDATE: +		if (!x->replay_maxdiff) +			break; + +		if (replay_esn->seq_hi == preplay_esn->seq_hi) +			seq_diff = replay_esn->seq - preplay_esn->seq; +		else +			seq_diff = ~preplay_esn->seq + replay_esn->seq + 1; + +		if (replay_esn->oseq_hi == preplay_esn->oseq_hi) +			oseq_diff = replay_esn->oseq - preplay_esn->oseq; +		else +			oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1; + +		if (seq_diff < x->replay_maxdiff && +		    oseq_diff < x->replay_maxdiff) { + +			if (x->xflags & XFRM_TIME_DEFER) +				event = XFRM_REPLAY_TIMEOUT; +			else +				return; +		} + +		break; + +	case XFRM_REPLAY_TIMEOUT: +		if (memcmp(x->replay_esn, x->preplay_esn, +			   xfrm_replay_state_esn_len(replay_esn)) == 0) { +			x->xflags |= XFRM_TIME_DEFER; +			return; +		} + +		break; +	} + +	memcpy(x->preplay_esn, x->replay_esn, +	       xfrm_replay_state_esn_len(replay_esn)); +	c.event = XFRM_MSG_NEWAE; +	c.data.aevent = event; +	km_state_notify(x, &c); + +	if (x->replay_maxage && +	    !mod_timer(&x->rtimer, jiffies + x->replay_maxage)) +		x->xflags &= ~XFRM_TIME_DEFER; +} +  static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb)  {  	int err = 0; @@ -510,7 +574,7 @@ static struct xfrm_replay xfrm_replay_esn = {  	.advance	= xfrm_replay_advance_esn,  	.check		= xfrm_replay_check_esn,  	.recheck	= xfrm_replay_recheck_esn, -	.notify		= xfrm_replay_notify_bmp, +	.notify		= xfrm_replay_notify_esn,  	.overflow	= xfrm_replay_overflow_esn,  }; diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index b28cc384a5b..4de4bc48493 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -3016,6 +3016,7 @@ sub process {  			    $dstat !~ /^'X'$/ &&					# character constants  			    $dstat !~ /$exceptions/ &&  			    $dstat !~ /^\.$Ident\s*=/ &&				# .foo = +			    $dstat !~ /^(?:\#\s*$Ident|\#\s*$Constant)\s*$/ &&		# stringification #foo  			    $dstat !~ /^do\s*$Constant\s*while\s*$Constant;?$/ &&	# do {...} while (...); // do {...} while (...)  			    $dstat !~ /^for\s*$Constant$/ &&				# for (...)  			    $dstat !~ /^for\s*$Constant\s+(?:$Ident|-?$Constant)$/ &&	# for (...) bar() diff --git a/security/capability.c b/security/capability.c index 57977508896..6783c3e6c88 100644 --- a/security/capability.c +++ b/security/capability.c @@ -737,6 +737,11 @@ static int cap_tun_dev_open(void *security)  {  	return 0;  } + +static void cap_skb_owned_by(struct sk_buff *skb, struct sock *sk) +{ +} +  #endif	/* CONFIG_SECURITY_NETWORK */  #ifdef CONFIG_SECURITY_NETWORK_XFRM @@ -1071,6 +1076,7 @@ void __init security_fixup_ops(struct security_operations *ops)  	set_to_cap_if_null(ops, tun_dev_open);  	set_to_cap_if_null(ops, tun_dev_attach_queue);  	set_to_cap_if_null(ops, tun_dev_attach); +	set_to_cap_if_null(ops, skb_owned_by);  #endif	/* CONFIG_SECURITY_NETWORK */  #ifdef CONFIG_SECURITY_NETWORK_XFRM  	set_to_cap_if_null(ops, xfrm_policy_alloc_security); diff --git a/security/security.c b/security/security.c index 7b88c6aeaed..03f248b84e9 100644 --- a/security/security.c +++ b/security/security.c @@ -1290,6 +1290,11 @@ int security_tun_dev_open(void *security)  }  EXPORT_SYMBOL(security_tun_dev_open); +void security_skb_owned_by(struct sk_buff *skb, struct sock *sk) +{ +	security_ops->skb_owned_by(skb, sk); +} +  #endif	/* CONFIG_SECURITY_NETWORK */  #ifdef CONFIG_SECURITY_NETWORK_XFRM diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 2fa28c88900..7171a957b93 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -51,6 +51,7 @@  #include <linux/tty.h>  #include <net/icmp.h>  #include <net/ip.h>		/* for local_port_range[] */ +#include <net/sock.h>  #include <net/tcp.h>		/* struct or_callable used in sock_rcv_skb */  #include <net/net_namespace.h>  #include <net/netlabel.h> @@ -4363,6 +4364,11 @@ static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb)  	selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid);  } +static void selinux_skb_owned_by(struct sk_buff *skb, struct sock *sk) +{ +	skb_set_owner_w(skb, sk); +} +  static int selinux_secmark_relabel_packet(u32 sid)  {  	const struct task_security_struct *__tsec; @@ -5664,6 +5670,7 @@ static struct security_operations selinux_ops = {  	.tun_dev_attach_queue =		selinux_tun_dev_attach_queue,  	.tun_dev_attach =		selinux_tun_dev_attach,  	.tun_dev_open =			selinux_tun_dev_open, +	.skb_owned_by =			selinux_skb_owned_by,  #ifdef CONFIG_SECURITY_NETWORK_XFRM  	.xfrm_policy_alloc_security =	selinux_xfrm_policy_alloc, diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 71ae86ca64a..eb560fa3232 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -3222,18 +3222,10 @@ EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);  int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,  			   struct vm_area_struct *area)  { -	long size; -	unsigned long offset; +	struct snd_pcm_runtime *runtime = substream->runtime;;  	area->vm_page_prot = pgprot_noncached(area->vm_page_prot); -	area->vm_flags |= VM_IO; -	size = area->vm_end - area->vm_start; -	offset = area->vm_pgoff << PAGE_SHIFT; -	if (io_remap_pfn_range(area, area->vm_start, -				(substream->runtime->dma_addr + offset) >> PAGE_SHIFT, -				size, area->vm_page_prot)) -		return -EAGAIN; -	return 0; +	return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);  }  EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem); diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index ecdf30eb587..4aba7646dd9 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -173,7 +173,7 @@ const char *snd_hda_get_jack_type(u32 cfg)  		"Line Out", "Speaker", "HP Out", "CD",  		"SPDIF Out", "Digital Out", "Modem Line", "Modem Hand",  		"Line In", "Aux", "Mic", "Telephony", -		"SPDIF In", "Digitial In", "Reserved", "Other" +		"SPDIF In", "Digital In", "Reserved", "Other"  	};  	return jack_types[(cfg & AC_DEFCFG_DEVICE) diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c index 7dd846380a5..d0d7ac1e99d 100644 --- a/sound/pci/hda/hda_eld.c +++ b/sound/pci/hda/hda_eld.c @@ -320,7 +320,7 @@ int snd_hdmi_get_eld(struct hda_codec *codec, hda_nid_t nid,  		     unsigned char *buf, int *eld_size)  {  	int i; -	int ret; +	int ret = 0;  	int size;  	/* diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 43c2ea53956..2dbe767be16 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -740,7 +740,7 @@ EXPORT_SYMBOL_HDA(snd_hda_activate_path);  static void path_power_down_sync(struct hda_codec *codec, struct nid_path *path)  {  	struct hda_gen_spec *spec = codec->spec; -	bool changed; +	bool changed = false;  	int i;  	if (!spec->power_down_unused || path->active) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 418bfc0eb0a..bcd40ee488e 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -134,8 +134,8 @@ MODULE_PARM_DESC(power_save, "Automatic power-saving timeout "   * this may give more power-saving, but will take longer time to   * wake up.   */ -static int power_save_controller = -1; -module_param(power_save_controller, bint, 0644); +static bool power_save_controller = 1; +module_param(power_save_controller, bool, 0644);  MODULE_PARM_DESC(power_save_controller, "Reset controller in power save mode.");  #endif /* CONFIG_PM */ @@ -2931,8 +2931,6 @@ static int azx_runtime_idle(struct device *dev)  	struct snd_card *card = dev_get_drvdata(dev);  	struct azx *chip = card->private_data; -	if (power_save_controller > 0) -		return 0;  	if (!power_save_controller ||  	    !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))  		return -EBUSY; diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 78e1827d0a9..de8ac5c07fd 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -1196,7 +1196,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)  	_snd_printd(SND_PR_VERBOSE,  		"HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n", -		codec->addr, pin_nid, eld->monitor_present, eld->eld_valid); +		codec->addr, pin_nid, pin_eld->monitor_present, eld->eld_valid);  	if (eld->eld_valid) {  		if (snd_hdmi_get_eld(codec, pin_nid, eld->eld_buffer, diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 563c24df4d6..f15c36bde54 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -3440,7 +3440,8 @@ static int alc662_parse_auto_config(struct hda_codec *codec)  	const hda_nid_t *ssids;  	if (codec->vendor_id == 0x10ec0272 || codec->vendor_id == 0x10ec0663 || -	    codec->vendor_id == 0x10ec0665 || codec->vendor_id == 0x10ec0670) +	    codec->vendor_id == 0x10ec0665 || codec->vendor_id == 0x10ec0670 || +	    codec->vendor_id == 0x10ec0671)  		ssids = alc663_ssids;  	else  		ssids = alc662_ssids; @@ -3894,6 +3895,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {  	{ .id = 0x10ec0665, .name = "ALC665", .patch = patch_alc662 },  	{ .id = 0x10ec0668, .name = "ALC668", .patch = patch_alc662 },  	{ .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 }, +	{ .id = 0x10ec0671, .name = "ALC671", .patch = patch_alc662 },  	{ .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 },  	{ .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 },  	{ .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 }, diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c index fc176044994..fc176044994 100755..100644 --- a/sound/soc/codecs/max98090.c +++ b/sound/soc/codecs/max98090.c diff --git a/sound/soc/codecs/max98090.h b/sound/soc/codecs/max98090.h index 7e103f24905..7e103f24905 100755..100644 --- a/sound/soc/codecs/max98090.h +++ b/sound/soc/codecs/max98090.h diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c index f2d61a18783..566ea3256e2 100644 --- a/sound/soc/codecs/si476x.c +++ b/sound/soc/codecs/si476x.c @@ -159,6 +159,7 @@ static int si476x_codec_hw_params(struct snd_pcm_substream *substream,  	switch (params_format(params)) {  	case SNDRV_PCM_FORMAT_S8:  		width = SI476X_PCM_FORMAT_S8; +		break;  	case SNDRV_PCM_FORMAT_S16_LE:  		width = SI476X_PCM_FORMAT_S16_LE;  		break; diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c index b82bbf58414..34d0201d6a7 100644 --- a/sound/soc/codecs/wm5102.c +++ b/sound/soc/codecs/wm5102.c @@ -584,7 +584,7 @@ static int wm5102_sysclk_ev(struct snd_soc_dapm_widget *w,  			    struct snd_kcontrol *kcontrol, int event)  {  	struct snd_soc_codec *codec = w->codec; -	struct arizona *arizona = dev_get_drvdata(codec->dev); +	struct arizona *arizona = dev_get_drvdata(codec->dev->parent);  	struct regmap *regmap = codec->control_data;  	const struct reg_default *patch = NULL;  	int i, patch_size; diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c index 134e41c870b..f8a31ad0b20 100644 --- a/sound/soc/codecs/wm8903.c +++ b/sound/soc/codecs/wm8903.c @@ -1083,6 +1083,8 @@ static const struct snd_soc_dapm_route wm8903_intercon[] = {  	{ "ROP", NULL, "Right Speaker PGA" },  	{ "RON", NULL, "Right Speaker PGA" }, +	{ "Charge Pump", NULL, "CLK_DSP" }, +  	{ "Left Headphone Output PGA", NULL, "Charge Pump" },  	{ "Right Headphone Output PGA", NULL, "Charge Pump" },  	{ "Left Line Output PGA", NULL, "Charge Pump" }, diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index f3f7e75f862..9af1bddc4c6 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -828,7 +828,8 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)  						&buf_list);  			if (!buf) {  				adsp_err(dsp, "Out of memory\n"); -				return -ENOMEM; +				ret = -ENOMEM; +				goto out_fw;  			}  			adsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n", @@ -865,7 +866,7 @@ out_fw:  	wm_adsp_buf_free(&buf_list);  out:  	kfree(file); -	return 0; +	return ret;  }  int wm_adsp1_init(struct wm_adsp *adsp) diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c index 55464a5b070..810c7eeb7b0 100644 --- a/sound/soc/fsl/imx-ssi.c +++ b/sound/soc/fsl/imx-ssi.c @@ -496,6 +496,8 @@ static void imx_ssi_ac97_reset(struct snd_ac97 *ac97)  	if (imx_ssi->ac97_reset)  		imx_ssi->ac97_reset(ac97); +	/* First read sometimes fails, do a dummy read */ +	imx_ssi_ac97_read(ac97, 0);  }  static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97) @@ -504,6 +506,9 @@ static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97)  	if (imx_ssi->ac97_warm_reset)  		imx_ssi->ac97_warm_reset(ac97); + +	/* First read sometimes fails, do a dummy read */ +	imx_ssi_ac97_read(ac97, 0);  }  struct snd_ac97_bus_ops soc_ac97_ops = { diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c index 8e52c1485df..eb4373840bb 100644 --- a/sound/soc/fsl/pcm030-audio-fabric.c +++ b/sound/soc/fsl/pcm030-audio-fabric.c @@ -51,7 +51,7 @@ static struct snd_soc_card pcm030_card = {  	.num_links = ARRAY_SIZE(pcm030_fabric_dai),  }; -static int __init pcm030_fabric_probe(struct platform_device *op) +static int pcm030_fabric_probe(struct platform_device *op)  {  	struct device_node *np = op->dev.of_node;  	struct device_node *platform_np; diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c index d7231e336a7..6bbeb0bf1a7 100644 --- a/sound/soc/samsung/i2s.c +++ b/sound/soc/samsung/i2s.c @@ -972,6 +972,7 @@ static const struct snd_soc_dai_ops samsung_i2s_dai_ops = {  static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)  {  	struct i2s_dai *i2s; +	int ret;  	i2s = devm_kzalloc(&pdev->dev, sizeof(struct i2s_dai), GFP_KERNEL);  	if (i2s == NULL) @@ -996,15 +997,17 @@ static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)  		i2s->i2s_dai_drv.capture.channels_max = 2;  		i2s->i2s_dai_drv.capture.rates = SAMSUNG_I2S_RATES;  		i2s->i2s_dai_drv.capture.formats = SAMSUNG_I2S_FMTS; +		dev_set_drvdata(&i2s->pdev->dev, i2s);  	} else {	/* Create a new platform_device for Secondary */ -		i2s->pdev = platform_device_register_resndata(NULL, -				"samsung-i2s-sec", -1, NULL, 0, NULL, 0); +		i2s->pdev = platform_device_alloc("samsung-i2s-sec", -1);  		if (IS_ERR(i2s->pdev))  			return NULL; -	} -	/* Pre-assign snd_soc_dai_set_drvdata */ -	dev_set_drvdata(&i2s->pdev->dev, i2s); +		platform_set_drvdata(i2s->pdev, i2s); +		ret = platform_device_add(i2s->pdev); +		if (ret < 0) +			return NULL; +	}  	return i2s;  } @@ -1107,6 +1110,10 @@ static int samsung_i2s_probe(struct platform_device *pdev)  	if (samsung_dai_type == TYPE_SEC) {  		sec_dai = dev_get_drvdata(&pdev->dev); +		if (!sec_dai) { +			dev_err(&pdev->dev, "Unable to get drvdata\n"); +			return -EFAULT; +		}  		snd_soc_register_dai(&sec_dai->pdev->dev,  			&sec_dai->i2s_dai_drv);  		asoc_dma_platform_register(&pdev->dev); diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c index 19eff8fc4fd..1a8b03e4b41 100644 --- a/sound/soc/sh/dma-sh7760.c +++ b/sound/soc/sh/dma-sh7760.c @@ -342,8 +342,8 @@ static int camelot_pcm_new(struct snd_soc_pcm_runtime *rtd)  	return 0;  } -static struct snd_soc_platform sh7760_soc_platform = { -	.pcm_ops 	= &camelot_pcm_ops, +static struct snd_soc_platform_driver sh7760_soc_platform = { +	.ops		= &camelot_pcm_ops,  	.pcm_new	= camelot_pcm_new,  	.pcm_free	= camelot_pcm_free,  }; diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c index b5b3db71e25..ed0bfb0ddb9 100644 --- a/sound/soc/soc-compress.c +++ b/sound/soc/soc-compress.c @@ -211,19 +211,27 @@ static int soc_compr_set_params(struct snd_compr_stream *cstream,  	if (platform->driver->compr_ops && platform->driver->compr_ops->set_params) {  		ret = platform->driver->compr_ops->set_params(cstream, params);  		if (ret < 0) -			goto out; +			goto err;  	}  	if (rtd->dai_link->compr_ops && rtd->dai_link->compr_ops->set_params) {  		ret = rtd->dai_link->compr_ops->set_params(cstream);  		if (ret < 0) -			goto out; +			goto err;  	}  	snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,  				SND_SOC_DAPM_STREAM_START); -out: +	/* cancel any delayed stream shutdown that is pending */ +	rtd->pop_wait = 0; +	mutex_unlock(&rtd->pcm_mutex); + +	cancel_delayed_work_sync(&rtd->delayed_work); + +	return ret; + +err:  	mutex_unlock(&rtd->pcm_mutex);  	return ret;  } diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index b7e84a7cd9e..ff4b45a5d79 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -2963,7 +2963,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,  	val = val << shift;  	ret = snd_soc_update_bits_locked(codec, reg, val_mask, val); -	if (ret != 0) +	if (ret < 0)  		return ret;  	if (snd_soc_volsw_is_stereo(mc)) { @@ -3140,7 +3140,7 @@ int snd_soc_bytes_put(struct snd_kcontrol *kcontrol,  	if (params->mask) {  		ret = regmap_read(codec->control_data, params->base, &val);  		if (ret != 0) -			return ret; +			goto out;  		val &= params->mask; @@ -3158,13 +3158,15 @@ int snd_soc_bytes_put(struct snd_kcontrol *kcontrol,  			((u32 *)data)[0] |= cpu_to_be32(val);  			break;  		default: -			return -EINVAL; +			ret = -EINVAL; +			goto out;  		}  	}  	ret = regmap_raw_write(codec->control_data, params->base,  			       data, len); +out:  	kfree(data);  	return ret; @@ -4197,7 +4199,6 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,  			dev_err(card->dev,  				"ASoC: Property '%s' index %d could not be read: %d\n",  				propname, 2 * i, ret); -			kfree(routes);  			return -EINVAL;  		}  		ret = of_property_read_string_index(np, propname, @@ -4206,7 +4207,6 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,  			dev_err(card->dev,  				"ASoC: Property '%s' index %d could not be read: %d\n",  				propname, (2 * i) + 1, ret); -			kfree(routes);  			return -EINVAL;  		}  	} diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 1d6a9b3ceb2..d6d9ba2e691 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -831,6 +831,9 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,  		if (path->weak)  			continue; +		if (path->walking) +			return 1; +  		if (path->walked)  			continue; @@ -838,6 +841,7 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,  		if (path->sink && path->connect) {  			path->walked = 1; +			path->walking = 1;  			/* do we need to add this widget to the list ? */  			if (list) { @@ -847,11 +851,14 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,  					dev_err(widget->dapm->dev,  						"ASoC: could not add widget %s\n",  						widget->name); +					path->walking = 0;  					return con;  				}  			}  			con += is_connected_output_ep(path->sink, list); + +			path->walking = 0;  		}  	} @@ -931,6 +938,9 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,  		if (path->weak)  			continue; +		if (path->walking) +			return 1; +  		if (path->walked)  			continue; @@ -938,6 +948,7 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,  		if (path->source && path->connect) {  			path->walked = 1; +			path->walking = 1;  			/* do we need to add this widget to the list ? */  			if (list) { @@ -947,11 +958,14 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,  					dev_err(widget->dapm->dev,  						"ASoC: could not add widget %s\n",  						widget->name); +					path->walking = 0;  					return con;  				}  			}  			con += is_connected_input_ep(path->source, list); + +			path->walking = 0;  		}  	} diff --git a/sound/soc/spear/spear_pcm.c b/sound/soc/spear/spear_pcm.c index 9b76cc5a114..5e7aebe1e66 100644 --- a/sound/soc/spear/spear_pcm.c +++ b/sound/soc/spear/spear_pcm.c @@ -149,9 +149,9 @@ static void spear_pcm_free(struct snd_pcm *pcm)  static u64 spear_pcm_dmamask = DMA_BIT_MASK(32); -static int spear_pcm_new(struct snd_card *card, -		struct snd_soc_dai *dai, struct snd_pcm *pcm) +static int spear_pcm_new(struct snd_soc_pcm_runtime *rtd)  { +	struct snd_card *card = rtd->card->snd_card;  	int ret;  	if (!card->dev->dma_mask) @@ -159,16 +159,16 @@ static int spear_pcm_new(struct snd_card *card,  	if (!card->dev->coherent_dma_mask)  		card->dev->coherent_dma_mask = DMA_BIT_MASK(32); -	if (dai->driver->playback.channels_min) { -		ret = spear_pcm_preallocate_dma_buffer(pcm, +	if (rtd->cpu_dai->driver->playback.channels_min) { +		ret = spear_pcm_preallocate_dma_buffer(rtd->pcm,  				SNDRV_PCM_STREAM_PLAYBACK,  				spear_pcm_hardware.buffer_bytes_max);  		if (ret)  			return ret;  	} -	if (dai->driver->capture.channels_min) { -		ret = spear_pcm_preallocate_dma_buffer(pcm, +	if (rtd->cpu_dai->driver->capture.channels_min) { +		ret = spear_pcm_preallocate_dma_buffer(rtd->pcm,  				SNDRV_PCM_STREAM_CAPTURE,  				spear_pcm_hardware.buffer_bytes_max);  		if (ret) diff --git a/sound/soc/tegra/tegra_pcm.c b/sound/soc/tegra/tegra_pcm.c index c925ab0adeb..5e2c55c5b25 100644 --- a/sound/soc/tegra/tegra_pcm.c +++ b/sound/soc/tegra/tegra_pcm.c @@ -43,8 +43,6 @@  static const struct snd_pcm_hardware tegra_pcm_hardware = {  	.info			= SNDRV_PCM_INFO_MMAP |  				  SNDRV_PCM_INFO_MMAP_VALID | -				  SNDRV_PCM_INFO_PAUSE | -				  SNDRV_PCM_INFO_RESUME |  				  SNDRV_PCM_INFO_INTERLEAVED,  	.formats		= SNDRV_PCM_FMTBIT_S16_LE,  	.channels_min		= 2, @@ -127,26 +125,6 @@ static int tegra_pcm_hw_free(struct snd_pcm_substream *substream)  	return 0;  } -static int tegra_pcm_trigger(struct snd_pcm_substream *substream, int cmd) -{ -	switch (cmd) { -	case SNDRV_PCM_TRIGGER_START: -	case SNDRV_PCM_TRIGGER_RESUME: -	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: -		return snd_dmaengine_pcm_trigger(substream, -					SNDRV_PCM_TRIGGER_START); - -	case SNDRV_PCM_TRIGGER_STOP: -	case SNDRV_PCM_TRIGGER_SUSPEND: -	case SNDRV_PCM_TRIGGER_PAUSE_PUSH: -		return snd_dmaengine_pcm_trigger(substream, -					SNDRV_PCM_TRIGGER_STOP); -	default: -		return -EINVAL; -	} -	return 0; -} -  static int tegra_pcm_mmap(struct snd_pcm_substream *substream,  				struct vm_area_struct *vma)  { @@ -164,7 +142,7 @@ static struct snd_pcm_ops tegra_pcm_ops = {  	.ioctl		= snd_pcm_lib_ioctl,  	.hw_params	= tegra_pcm_hw_params,  	.hw_free	= tegra_pcm_hw_free, -	.trigger	= tegra_pcm_trigger, +	.trigger	= snd_dmaengine_pcm_trigger,  	.pointer	= snd_dmaengine_pcm_pointer,  	.mmap		= tegra_pcm_mmap,  }; diff --git a/sound/usb/clock.c b/sound/usb/clock.c index 5e634a2eb28..9e2703a2515 100644 --- a/sound/usb/clock.c +++ b/sound/usb/clock.c @@ -253,7 +253,7 @@ static int set_sample_rate_v2(struct snd_usb_audio *chip, int iface,  {  	struct usb_device *dev = chip->dev;  	unsigned char data[4]; -	int err, crate; +	int err, cur_rate, prev_rate;  	int clock = snd_usb_clock_find_source(chip, fmt->clock);  	if (clock < 0) @@ -266,6 +266,19 @@ static int set_sample_rate_v2(struct snd_usb_audio *chip, int iface,  		return -ENXIO;  	} +	err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, +			      USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, +			      UAC2_CS_CONTROL_SAM_FREQ << 8, +			      snd_usb_ctrl_intf(chip) | (clock << 8), +			      data, sizeof(data)); +	if (err < 0) { +		snd_printk(KERN_WARNING "%d:%d:%d: cannot get freq (v2)\n", +			   dev->devnum, iface, fmt->altsetting); +		prev_rate = 0; +	} else { +		prev_rate = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); +	} +  	data[0] = rate;  	data[1] = rate >> 8;  	data[2] = rate >> 16; @@ -280,19 +293,31 @@ static int set_sample_rate_v2(struct snd_usb_audio *chip, int iface,  		return err;  	} -	if ((err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, -				   USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, -				   UAC2_CS_CONTROL_SAM_FREQ << 8, -				   snd_usb_ctrl_intf(chip) | (clock << 8), -				   data, sizeof(data))) < 0) { +	err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, +			      USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, +			      UAC2_CS_CONTROL_SAM_FREQ << 8, +			      snd_usb_ctrl_intf(chip) | (clock << 8), +			      data, sizeof(data)); +	if (err < 0) {  		snd_printk(KERN_WARNING "%d:%d:%d: cannot get freq (v2)\n",  			   dev->devnum, iface, fmt->altsetting); -		return err; +		cur_rate = 0; +	} else { +		cur_rate = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24);  	} -	crate = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); -	if (crate != rate) -		snd_printd(KERN_WARNING "current rate %d is different from the runtime rate %d\n", crate, rate); +	if (cur_rate != rate) { +		snd_printd(KERN_WARNING +			   "current rate %d is different from the runtime rate %d\n", +			   cur_rate, rate); +	} + +	/* Some devices doesn't respond to sample rate changes while the +	 * interface is active. */ +	if (rate != prev_rate) { +		usb_set_interface(dev, iface, 0); +		usb_set_interface(dev, iface, fmt->altsetting); +	}  	return 0;  } diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 497d2741d11..ebe91440a06 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -509,7 +509,7 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol,  	else  		ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,  				  USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, -				  0, cpu_to_le16(wIndex), +				  0, wIndex,  				  &tmp, sizeof(tmp), 1000);  	up_read(&mixer->chip->shutdown_rwsem); @@ -540,7 +540,7 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol,  	else  		ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest,  				  USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, -				  cpu_to_le16(wValue), cpu_to_le16(wIndex), +				  wValue, wIndex,  				  NULL, 0, 1000);  	up_read(&mixer->chip->shutdown_rwsem); diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 5325a3869bb..9c5ab22358b 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -486,7 +486,7 @@ static int snd_usb_nativeinstruments_boot_quirk(struct usb_device *dev)  {  	int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),  				  0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE, -				  cpu_to_le16(1), 0, NULL, 0, 1000); +				  1, 0, NULL, 0, 1000);  	if (ret < 0)  		return ret; diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 6f3214ed444..321e066a075 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -1421,6 +1421,7 @@ int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)  	case 0x3C:	/* HSW */  	case 0x3F:	/* HSW */  	case 0x45:	/* HSW */ +	case 0x46:	/* HSW */  		return 1;  	case 0x2E:	/* Nehalem-EX Xeon - Beckton */  	case 0x2F:	/* Westmere-EX Xeon - Eagleton */ @@ -1515,6 +1516,7 @@ void rapl_probe(unsigned int family, unsigned int model)  	case 0x3C:	/* HSW */  	case 0x3F:	/* HSW */  	case 0x45:	/* HSW */ +	case 0x46:	/* HSW */  		do_rapl = RAPL_PKG | RAPL_CORES | RAPL_GFX;  		break;  	case 0x2D: @@ -1754,6 +1756,7 @@ int is_snb(unsigned int family, unsigned int model)  	case 0x3C:	/* HSW */  	case 0x3F:	/* HSW */  	case 0x45:	/* HSW */ +	case 0x46:	/* HSW */  		return 1;  	}  	return 0; @@ -2276,7 +2279,7 @@ int main(int argc, char **argv)  	cmdline(argc, argv);  	if (verbose) -		fprintf(stderr, "turbostat v3.2 February 11, 2013" +		fprintf(stderr, "turbostat v3.3 March 15, 2013"  			" - Len Brown <lenb@kernel.org>\n");  	turbostat_init(); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index adc68feb5c5..f18013f09e6 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1541,21 +1541,38 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,  }  int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, -			      gpa_t gpa) +			      gpa_t gpa, unsigned long len)  {  	struct kvm_memslots *slots = kvm_memslots(kvm);  	int offset = offset_in_page(gpa); -	gfn_t gfn = gpa >> PAGE_SHIFT; +	gfn_t start_gfn = gpa >> PAGE_SHIFT; +	gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; +	gfn_t nr_pages_needed = end_gfn - start_gfn + 1; +	gfn_t nr_pages_avail;  	ghc->gpa = gpa;  	ghc->generation = slots->generation; -	ghc->memslot = gfn_to_memslot(kvm, gfn); -	ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL); -	if (!kvm_is_error_hva(ghc->hva)) +	ghc->len = len; +	ghc->memslot = gfn_to_memslot(kvm, start_gfn); +	ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail); +	if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {  		ghc->hva += offset; -	else -		return -EFAULT; - +	} else { +		/* +		 * If the requested region crosses two memslots, we still +		 * verify that the entire region is valid here. +		 */ +		while (start_gfn <= end_gfn) { +			ghc->memslot = gfn_to_memslot(kvm, start_gfn); +			ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, +						   &nr_pages_avail); +			if (kvm_is_error_hva(ghc->hva)) +				return -EFAULT; +			start_gfn += nr_pages_avail; +		} +		/* Use the slow path for cross page reads and writes. */ +		ghc->memslot = NULL; +	}  	return 0;  }  EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); @@ -1566,8 +1583,13 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,  	struct kvm_memslots *slots = kvm_memslots(kvm);  	int r; +	BUG_ON(len > ghc->len); +  	if (slots->generation != ghc->generation) -		kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); +		kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); + +	if (unlikely(!ghc->memslot)) +		return kvm_write_guest(kvm, ghc->gpa, data, len);  	if (kvm_is_error_hva(ghc->hva))  		return -EFAULT; @@ -1587,8 +1609,13 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,  	struct kvm_memslots *slots = kvm_memslots(kvm);  	int r; +	BUG_ON(len > ghc->len); +  	if (slots->generation != ghc->generation) -		kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); +		kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); + +	if (unlikely(!ghc->memslot)) +		return kvm_read_guest(kvm, ghc->gpa, data, len);  	if (kvm_is_error_hva(ghc->hva))  		return -EFAULT;  |