diff options
660 files changed, 12416 insertions, 6323 deletions
diff --git a/Documentation/ABI/testing/sysfs-platform-ideapad-laptop b/Documentation/ABI/testing/sysfs-platform-ideapad-laptop index 814b01354c4..b31e782bd98 100644 --- a/Documentation/ABI/testing/sysfs-platform-ideapad-laptop +++ b/Documentation/ABI/testing/sysfs-platform-ideapad-laptop @@ -5,4 +5,15 @@ Contact:	"Ike Panhc <ike.pan@canonical.com>"  Description:  		Control the power of camera module. 1 means on, 0 means off. +What:		/sys/devices/platform/ideapad/fan_mode +Date:		June 2012 +KernelVersion:	3.6 +Contact:	"Maxim Mikityanskiy <maxtram95@gmail.com>" +Description: +		Change fan mode +		There are four available modes: +			* 0 -> Super Silent Mode +			* 1 -> Standard Mode +			* 2 -> Dust Cleaning +			* 4 -> Efficient Thermal Dissipation Mode diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl index 196b8b9dba1..b0300529ab1 100644 --- a/Documentation/DocBook/drm.tmpl +++ b/Documentation/DocBook/drm.tmpl @@ -6,11 +6,36 @@    <bookinfo>      <title>Linux DRM Developer's Guide</title> +    <authorgroup> +      <author> +	<firstname>Jesse</firstname> +	<surname>Barnes</surname> +	<contrib>Initial version</contrib> +	<affiliation> +	  <orgname>Intel Corporation</orgname> +	  <address> +	    <email>jesse.barnes@intel.com</email> +	  </address> +	</affiliation> +      </author> +      <author> +	<firstname>Laurent</firstname> +	<surname>Pinchart</surname> +	<contrib>Driver internals</contrib> +	<affiliation> +	  <orgname>Ideas on board SPRL</orgname> +	  <address> +	    <email>laurent.pinchart@ideasonboard.com</email> +	  </address> +	</affiliation> +      </author> +    </authorgroup> +      <copyright>        <year>2008-2009</year> -      <holder> -	Intel Corporation (Jesse Barnes <jesse.barnes@intel.com>) -      </holder> +      <year>2012</year> +      <holder>Intel Corporation</holder> +      <holder>Laurent Pinchart</holder>      </copyright>      <legalnotice> @@ -20,6 +45,17 @@  	the kernel source COPYING file.        </para>      </legalnotice> + +    <revhistory> +      <!-- Put document revisions here, newest first. --> +      <revision> +	<revnumber>1.0</revnumber> +	<date>2012-07-13</date> +	<authorinitials>LP</authorinitials> +	<revremark>Added extensive documentation about driver internals. +	</revremark> +      </revision> +    </revhistory>    </bookinfo>  <toc></toc> @@ -72,342 +108,361 @@        submission & fencing, suspend/resume support, and DMA        services.      </para> -    <para> -      The core of every DRM driver is struct drm_driver.  Drivers -      typically statically initialize a drm_driver structure, -      then pass it to drm_init() at load time. -    </para>    <!-- Internals: driver init -->    <sect1> -    <title>Driver initialization</title> -    <para> -      Before calling the DRM initialization routines, the driver must -      first create and fill out a struct drm_driver structure. -    </para> -    <programlisting> -      static struct drm_driver driver = { -	/* Don't use MTRRs here; the Xserver or userspace app should -	 * deal with them for Intel hardware. -	 */ -	.driver_features = -	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | -	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_MODESET, -	.load = i915_driver_load, -	.unload = i915_driver_unload, -	.firstopen = i915_driver_firstopen, -	.lastclose = i915_driver_lastclose, -	.preclose = i915_driver_preclose, -	.save = i915_save, -	.restore = i915_restore, -	.device_is_agp = i915_driver_device_is_agp, -	.get_vblank_counter = i915_get_vblank_counter, -	.enable_vblank = i915_enable_vblank, -	.disable_vblank = i915_disable_vblank, -	.irq_preinstall = i915_driver_irq_preinstall, -	.irq_postinstall = i915_driver_irq_postinstall, -	.irq_uninstall = i915_driver_irq_uninstall, -	.irq_handler = i915_driver_irq_handler, -	.reclaim_buffers = drm_core_reclaim_buffers, -	.get_map_ofs = drm_core_get_map_ofs, -	.get_reg_ofs = drm_core_get_reg_ofs, -	.fb_probe = intelfb_probe, -	.fb_remove = intelfb_remove, -	.fb_resize = intelfb_resize, -	.master_create = i915_master_create, -	.master_destroy = i915_master_destroy, -#if defined(CONFIG_DEBUG_FS) -	.debugfs_init = i915_debugfs_init, -	.debugfs_cleanup = i915_debugfs_cleanup, -#endif -	.gem_init_object = i915_gem_init_object, -	.gem_free_object = i915_gem_free_object, -	.gem_vm_ops = &i915_gem_vm_ops, -	.ioctls = i915_ioctls, -	.fops = { -		.owner = THIS_MODULE, -		.open = drm_open, -		.release = drm_release, -		.ioctl = drm_ioctl, -		.mmap = drm_mmap, -		.poll = drm_poll, -		.fasync = drm_fasync, -#ifdef CONFIG_COMPAT -		.compat_ioctl = i915_compat_ioctl, -#endif -		.llseek = noop_llseek, -		}, -	.pci_driver = { -		.name = DRIVER_NAME, -		.id_table = pciidlist, -		.probe = probe, -		.remove = __devexit_p(drm_cleanup_pci), -		}, -	.name = DRIVER_NAME, -	.desc = DRIVER_DESC, -	.date = DRIVER_DATE, -	.major = DRIVER_MAJOR, -	.minor = DRIVER_MINOR, -	.patchlevel = DRIVER_PATCHLEVEL, -      }; -    </programlisting> +    <title>Driver Initialization</title>      <para> -      In the example above, taken from the i915 DRM driver, the driver -      sets several flags indicating what core features it supports; -      we go over the individual callbacks in later sections.  Since -      flags indicate which features your driver supports to the DRM -      core, you need to set most of them prior to calling drm_init().  Some, -      like DRIVER_MODESET can be set later based on user supplied parameters, -      but that's the exception rather than the rule. +      At the core of every DRM driver is a <structname>drm_driver</structname> +      structure. Drivers typically statically initialize a drm_driver structure, +      and then pass it to one of the <function>drm_*_init()</function> functions +      to register it with the DRM subsystem.      </para> -    <variablelist> -      <title>Driver flags</title> -      <varlistentry> -	<term>DRIVER_USE_AGP</term> -	<listitem><para> -	    Driver uses AGP interface -	</para></listitem> -      </varlistentry> -      <varlistentry> -	<term>DRIVER_REQUIRE_AGP</term> -	<listitem><para> -	    Driver needs AGP interface to function. -	</para></listitem> -      </varlistentry> -      <varlistentry> -	<term>DRIVER_USE_MTRR</term> -	<listitem> -	  <para> -	    Driver uses MTRR interface for mapping memory.  Deprecated. -	  </para> -	</listitem> -      </varlistentry> -      <varlistentry> -	<term>DRIVER_PCI_DMA</term> -	<listitem><para> -	    Driver is capable of PCI DMA.  Deprecated. -	</para></listitem> -      </varlistentry> -      <varlistentry> -	<term>DRIVER_SG</term> -	<listitem><para> -	    Driver can perform scatter/gather DMA.  Deprecated. -	</para></listitem> -      </varlistentry> -      <varlistentry> -	<term>DRIVER_HAVE_DMA</term> -	<listitem><para>Driver supports DMA.  Deprecated.</para></listitem> -      </varlistentry> -      <varlistentry> -	<term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term> -	<listitem> -	  <para> -	    DRIVER_HAVE_IRQ indicates whether the driver has an IRQ -	    handler.  DRIVER_IRQ_SHARED indicates whether the device & -	    handler support shared IRQs (note that this is required of -	    PCI drivers). -	  </para> -	</listitem> -      </varlistentry> -      <varlistentry> -	<term>DRIVER_DMA_QUEUE</term> -	<listitem> -	  <para> -	    Should be set if the driver queues DMA requests and completes them -	    asynchronously.  Deprecated. -	  </para> -	</listitem> -      </varlistentry> -      <varlistentry> -	<term>DRIVER_FB_DMA</term> -	<listitem> -	  <para> -	    Driver supports DMA to/from the framebuffer.  Deprecated. -	  </para> -	</listitem> -      </varlistentry> -      <varlistentry> -	<term>DRIVER_MODESET</term> -	<listitem> -	  <para> -	    Driver supports mode setting interfaces. -	  </para> -	</listitem> -      </varlistentry> -    </variablelist>      <para> -      In this specific case, the driver requires AGP and supports -      IRQs.  DMA, as discussed later, is handled by device-specific ioctls -      in this case.  It also supports the kernel mode setting APIs, though -      unlike in the actual i915 driver source, this example unconditionally -      exports KMS capability. +      The <structname>drm_driver</structname> structure contains static +      information that describes the driver and features it supports, and +      pointers to methods that the DRM core will call to implement the DRM API. +      We will first go through the <structname>drm_driver</structname> static +      information fields, and will then describe individual operations in +      details as they get used in later sections.      </para> -  </sect1> - -  <!-- Internals: driver load --> - -  <sect1> -    <title>Driver load</title> -    <para> -      In the previous section, we saw what a typical drm_driver -      structure might look like.  One of the more important fields in -      the structure is the hook for the load function. -    </para> -    <programlisting> -      static struct drm_driver driver = { -      	... -      	.load = i915_driver_load, -        ... -      }; -    </programlisting> -    <para> -      The load function has many responsibilities: allocating a driver -      private structure, specifying supported performance counters, -      configuring the device (e.g. mapping registers & command -      buffers), initializing the memory manager, and setting up the -      initial output configuration. -    </para> -    <para> -      If compatibility is a concern (e.g. with drivers converted over -      to the new interfaces from the old ones), care must be taken to -      prevent device initialization and control that is incompatible with -      currently active userspace drivers.  For instance, if user -      level mode setting drivers are in use, it would be problematic -      to perform output discovery & configuration at load time. -      Likewise, if user-level drivers unaware of memory management are -      in use, memory management and command buffer setup may need to -      be omitted.  These requirements are driver-specific, and care -      needs to be taken to keep both old and new applications and -      libraries working.  The i915 driver supports the "modeset" -      module parameter to control whether advanced features are -      enabled at load time or in legacy fashion. -    </para> -      <sect2> -      <title>Driver private & performance counters</title> -      <para> -	The driver private hangs off the main drm_device structure and -	can be used for tracking various device-specific bits of -	information, like register offsets, command buffer status, -	register state for suspend/resume, etc.  At load time, a -	driver may simply allocate one and set drm_device.dev_priv -	appropriately; it should be freed and drm_device.dev_priv set -	to NULL when the driver is unloaded. -      </para> -      <para> -	The DRM supports several counters which may be used for rough -	performance characterization.  Note that the DRM stat counter -	system is not often used by applications, and supporting -	additional counters is completely optional. -      </para> -      <para> -	These interfaces are deprecated and should not be used.  If performance -	monitoring is desired, the developer should investigate and -	potentially enhance the kernel perf and tracing infrastructure to export -	GPU related performance information for consumption by performance -	monitoring tools and applications. -      </para> +      <title>Driver Information</title> +      <sect3> +        <title>Driver Features</title> +        <para> +          Drivers inform the DRM core about their requirements and supported +          features by setting appropriate flags in the +          <structfield>driver_features</structfield> field. Since those flags +          influence the DRM core behaviour since registration time, most of them +          must be set to registering the <structname>drm_driver</structname> +          instance. +        </para> +        <synopsis>u32 driver_features;</synopsis> +        <variablelist> +          <title>Driver Feature Flags</title> +          <varlistentry> +            <term>DRIVER_USE_AGP</term> +            <listitem><para> +              Driver uses AGP interface, the DRM core will manage AGP resources. +            </para></listitem> +          </varlistentry> +          <varlistentry> +            <term>DRIVER_REQUIRE_AGP</term> +            <listitem><para> +              Driver needs AGP interface to function. AGP initialization failure +              will become a fatal error. +            </para></listitem> +          </varlistentry> +          <varlistentry> +            <term>DRIVER_USE_MTRR</term> +            <listitem><para> +              Driver uses MTRR interface for mapping memory, the DRM core will +              manage MTRR resources. Deprecated. +            </para></listitem> +          </varlistentry> +          <varlistentry> +            <term>DRIVER_PCI_DMA</term> +            <listitem><para> +              Driver is capable of PCI DMA, mapping of PCI DMA buffers to +              userspace will be enabled. Deprecated. +            </para></listitem> +          </varlistentry> +          <varlistentry> +            <term>DRIVER_SG</term> +            <listitem><para> +              Driver can perform scatter/gather DMA, allocation and mapping of +              scatter/gather buffers will be enabled. Deprecated. +            </para></listitem> +          </varlistentry> +          <varlistentry> +            <term>DRIVER_HAVE_DMA</term> +            <listitem><para> +              Driver supports DMA, the userspace DMA API will be supported. +              Deprecated. +            </para></listitem> +          </varlistentry> +          <varlistentry> +            <term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term> +            <listitem><para> +              DRIVER_HAVE_IRQ indicates whether the driver has an IRQ handler. The +              DRM core will automatically register an interrupt handler when the +              flag is set. DRIVER_IRQ_SHARED indicates whether the device & +              handler support shared IRQs (note that this is required of PCI +              drivers). +            </para></listitem> +          </varlistentry> +          <varlistentry> +            <term>DRIVER_IRQ_VBL</term> +            <listitem><para>Unused. Deprecated.</para></listitem> +          </varlistentry> +          <varlistentry> +            <term>DRIVER_DMA_QUEUE</term> +            <listitem><para> +              Should be set if the driver queues DMA requests and completes them +              asynchronously.  Deprecated. +            </para></listitem> +          </varlistentry> +          <varlistentry> +            <term>DRIVER_FB_DMA</term> +            <listitem><para> +              Driver supports DMA to/from the framebuffer, mapping of frambuffer +              DMA buffers to userspace will be supported. Deprecated. +            </para></listitem> +          </varlistentry> +          <varlistentry> +            <term>DRIVER_IRQ_VBL2</term> +            <listitem><para>Unused. Deprecated.</para></listitem> +          </varlistentry> +          <varlistentry> +            <term>DRIVER_GEM</term> +            <listitem><para> +              Driver use the GEM memory manager. +            </para></listitem> +          </varlistentry> +          <varlistentry> +            <term>DRIVER_MODESET</term> +            <listitem><para> +              Driver supports mode setting interfaces (KMS). +            </para></listitem> +          </varlistentry> +          <varlistentry> +            <term>DRIVER_PRIME</term> +            <listitem><para> +              Driver implements DRM PRIME buffer sharing. +            </para></listitem> +          </varlistentry> +        </variablelist> +      </sect3> +      <sect3> +        <title>Major, Minor and Patchlevel</title> +        <synopsis>int major; +int minor; +int patchlevel;</synopsis> +        <para> +          The DRM core identifies driver versions by a major, minor and patch +          level triplet. The information is printed to the kernel log at +          initialization time and passed to userspace through the +          DRM_IOCTL_VERSION ioctl. +        </para> +        <para> +          The major and minor numbers are also used to verify the requested driver +          API version passed to DRM_IOCTL_SET_VERSION. When the driver API changes +          between minor versions, applications can call DRM_IOCTL_SET_VERSION to +          select a specific version of the API. If the requested major isn't equal +          to the driver major, or the requested minor is larger than the driver +          minor, the DRM_IOCTL_SET_VERSION call will return an error. Otherwise +          the driver's set_version() method will be called with the requested +          version. +        </para> +      </sect3> +      <sect3> +        <title>Name, Description and Date</title> +        <synopsis>char *name; +char *desc; +char *date;</synopsis> +        <para> +          The driver name is printed to the kernel log at initialization time, +          used for IRQ registration and passed to userspace through +          DRM_IOCTL_VERSION. +        </para> +        <para> +          The driver description is a purely informative string passed to +          userspace through the DRM_IOCTL_VERSION ioctl and otherwise unused by +          the kernel. +        </para> +        <para> +          The driver date, formatted as YYYYMMDD, is meant to identify the date of +          the latest modification to the driver. However, as most drivers fail to +          update it, its value is mostly useless. The DRM core prints it to the +          kernel log at initialization time and passes it to userspace through the +          DRM_IOCTL_VERSION ioctl. +        </para> +      </sect3>      </sect2> -      <sect2> -      <title>Configuring the device</title> -      <para> -	Obviously, device configuration is device-specific. -	However, there are several common operations: finding a -	device's PCI resources, mapping them, and potentially setting -	up an IRQ handler. -      </para> +      <title>Driver Load</title>        <para> -	Finding & mapping resources is fairly straightforward.  The -	DRM wrapper functions, drm_get_resource_start() and -	drm_get_resource_len(), may be used to find BARs on the given -	drm_device struct.  Once those values have been retrieved, the -	driver load function can call drm_addmap() to create a new -	mapping for the BAR in question.  Note that you probably want a -	drm_local_map_t in your driver private structure to track any -	mappings you create. -<!-- !Fdrivers/gpu/drm/drm_bufs.c drm_get_resource_* --> -<!-- !Finclude/drm/drmP.h drm_local_map_t --> +        The <methodname>load</methodname> method is the driver and device +        initialization entry point. The method is responsible for allocating and +        initializing driver private data, specifying supported performance +        counters, performing resource allocation and mapping (e.g. acquiring +        clocks, mapping registers or allocating command buffers), initializing +        the memory manager (<xref linkend="drm-memory-management"/>), installing +        the IRQ handler (<xref linkend="drm-irq-registration"/>), setting up +        vertical blanking handling (<xref linkend="drm-vertical-blank"/>), mode +	setting (<xref linkend="drm-mode-setting"/>) and initial output +	configuration (<xref linkend="drm-kms-init"/>).        </para> +      <note><para> +        If compatibility is a concern (e.g. with drivers converted over from +        User Mode Setting to Kernel Mode Setting), care must be taken to prevent +        device initialization and control that is incompatible with currently +        active userspace drivers. For instance, if user level mode setting +        drivers are in use, it would be problematic to perform output discovery +        & configuration at load time. Likewise, if user-level drivers +        unaware of memory management are in use, memory management and command +        buffer setup may need to be omitted. These requirements are +        driver-specific, and care needs to be taken to keep both old and new +        applications and libraries working. +      </para></note> +      <synopsis>int (*load) (struct drm_device *, unsigned long flags);</synopsis>        <para> -	if compatibility with other operating systems isn't a concern -	(DRM drivers can run under various BSD variants and OpenSolaris), -	native Linux calls may be used for the above, e.g. pci_resource_* -	and iomap*/iounmap.  See the Linux device driver book for more -	info. -      </para> -      <para> -	Once you have a register map, you may use the DRM_READn() and -	DRM_WRITEn() macros to access the registers on your device, or -	use driver-specific versions to offset into your MMIO space -	relative to a driver-specific base pointer (see I915_READ for -	an example). -      </para> -      <para> -	If your device supports interrupt generation, you may want to -	set up an interrupt handler when the driver is loaded.  This -	is done using the drm_irq_install() function.  If your device -	supports vertical blank interrupts, it should call -	drm_vblank_init() to initialize the core vblank handling code before -	enabling interrupts on your device.  This ensures the vblank related -	structures are allocated and allows the core to handle vblank events. -      </para> -<!--!Fdrivers/char/drm/drm_irq.c drm_irq_install--> -      <para> -	Once your interrupt handler is registered (it uses your -	drm_driver.irq_handler as the actual interrupt handling -	function), you can safely enable interrupts on your device, -	assuming any other state your interrupt handler uses is also -	initialized. -      </para> -      <para> -	Another task that may be necessary during configuration is -	mapping the video BIOS.  On many devices, the VBIOS describes -	device configuration, LCD panel timings (if any), and contains -	flags indicating device state.  Mapping the BIOS can be done -	using the pci_map_rom() call, a convenience function that -	takes care of mapping the actual ROM, whether it has been -	shadowed into memory (typically at address 0xc0000) or exists -	on the PCI device in the ROM BAR.  Note that after the ROM -	has been mapped and any necessary information has been extracted, -	it should be unmapped; on many devices, the ROM address decoder is -	shared with other BARs, so leaving it mapped could cause -	undesired behavior like hangs or memory corruption. -<!--!Fdrivers/pci/rom.c pci_map_rom--> +        The method takes two arguments, a pointer to the newly created +	<structname>drm_device</structname> and flags. The flags are used to +	pass the <structfield>driver_data</structfield> field of the device id +	corresponding to the device passed to <function>drm_*_init()</function>. +	Only PCI devices currently use this, USB and platform DRM drivers have +	their <methodname>load</methodname> method called with flags to 0.        </para> +      <sect3> +        <title>Driver Private & Performance Counters</title> +        <para> +          The driver private hangs off the main +          <structname>drm_device</structname> structure and can be used for +          tracking various device-specific bits of information, like register +          offsets, command buffer status, register state for suspend/resume, etc. +          At load time, a driver may simply allocate one and set +          <structname>drm_device</structname>.<structfield>dev_priv</structfield> +          appropriately; it should be freed and +          <structname>drm_device</structname>.<structfield>dev_priv</structfield> +          set to NULL when the driver is unloaded. +        </para> +        <para> +          DRM supports several counters which were used for rough performance +          characterization. This stat counter system is deprecated and should not +          be used. If performance monitoring is desired, the developer should +          investigate and potentially enhance the kernel perf and tracing +          infrastructure to export GPU related performance information for +          consumption by performance monitoring tools and applications. +        </para> +      </sect3> +      <sect3 id="drm-irq-registration"> +        <title>IRQ Registration</title> +        <para> +          The DRM core tries to facilitate IRQ handler registration and +          unregistration by providing <function>drm_irq_install</function> and +          <function>drm_irq_uninstall</function> functions. Those functions only +          support a single interrupt per device. +        </para> +  <!--!Fdrivers/char/drm/drm_irq.c drm_irq_install--> +        <para> +          Both functions get the device IRQ by calling +          <function>drm_dev_to_irq</function>. This inline function will call a +          bus-specific operation to retrieve the IRQ number. For platform devices, +          <function>platform_get_irq</function>(..., 0) is used to retrieve the +          IRQ number. +        </para> +        <para> +          <function>drm_irq_install</function> starts by calling the +          <methodname>irq_preinstall</methodname> driver operation. The operation +          is optional and must make sure that the interrupt will not get fired by +          clearing all pending interrupt flags or disabling the interrupt. +        </para> +        <para> +          The IRQ will then be requested by a call to +          <function>request_irq</function>. If the DRIVER_IRQ_SHARED driver +          feature flag is set, a shared (IRQF_SHARED) IRQ handler will be +          requested. +        </para> +        <para> +          The IRQ handler function must be provided as the mandatory irq_handler +          driver operation. It will get passed directly to +          <function>request_irq</function> and thus has the same prototype as all +          IRQ handlers. It will get called with a pointer to the DRM device as the +          second argument. +        </para> +        <para> +          Finally the function calls the optional +          <methodname>irq_postinstall</methodname> driver operation. The operation +          usually enables interrupts (excluding the vblank interrupt, which is +          enabled separately), but drivers may choose to enable/disable interrupts +          at a different time. +        </para> +        <para> +          <function>drm_irq_uninstall</function> is similarly used to uninstall an +          IRQ handler. It starts by waking up all processes waiting on a vblank +          interrupt to make sure they don't hang, and then calls the optional +          <methodname>irq_uninstall</methodname> driver operation. The operation +          must disable all hardware interrupts. Finally the function frees the IRQ +          by calling <function>free_irq</function>. +        </para> +      </sect3> +      <sect3> +        <title>Memory Manager Initialization</title> +        <para> +          Every DRM driver requires a memory manager which must be initialized at +          load time. DRM currently contains two memory managers, the Translation +          Table Manager (TTM) and the Graphics Execution Manager (GEM). +          This document describes the use of the GEM memory manager only. See +          <xref linkend="drm-memory-management"/> for details. +        </para> +      </sect3> +      <sect3> +        <title>Miscellaneous Device Configuration</title> +        <para> +          Another task that may be necessary for PCI devices during configuration +          is mapping the video BIOS. On many devices, the VBIOS describes device +          configuration, LCD panel timings (if any), and contains flags indicating +          device state. Mapping the BIOS can be done using the pci_map_rom() call, +          a convenience function that takes care of mapping the actual ROM, +          whether it has been shadowed into memory (typically at address 0xc0000) +          or exists on the PCI device in the ROM BAR. Note that after the ROM has +          been mapped and any necessary information has been extracted, it should +          be unmapped; on many devices, the ROM address decoder is shared with +          other BARs, so leaving it mapped could cause undesired behaviour like +          hangs or memory corruption. +  <!--!Fdrivers/pci/rom.c pci_map_rom--> +        </para> +      </sect3>      </sect2> +  </sect1> +  <!-- Internals: memory management --> + +  <sect1 id="drm-memory-management"> +    <title>Memory management</title> +    <para> +      Modern Linux systems require large amount of graphics memory to store +      frame buffers, textures, vertices and other graphics-related data. Given +      the very dynamic nature of many of that data, managing graphics memory +      efficiently is thus crucial for the graphics stack and plays a central +      role in the DRM infrastructure. +    </para> +    <para> +      The DRM core includes two memory managers, namely Translation Table Maps +      (TTM) and Graphics Execution Manager (GEM). TTM was the first DRM memory +      manager to be developed and tried to be a one-size-fits-them all +      solution. It provides a single userspace API to accomodate the need of +      all hardware, supporting both Unified Memory Architecture (UMA) devices +      and devices with dedicated video RAM (i.e. most discrete video cards). +      This resulted in a large, complex piece of code that turned out to be +      hard to use for driver development. +    </para> +    <para> +      GEM started as an Intel-sponsored project in reaction to TTM's +      complexity. Its design philosophy is completely different: instead of +      providing a solution to every graphics memory-related problems, GEM +      identified common code between drivers and created a support library to +      share it. GEM has simpler initialization and execution requirements than +      TTM, but has no video RAM management capabitilies and is thus limited to +      UMA devices. +    </para>      <sect2> -      <title>Memory manager initialization</title> -      <para> -	In order to allocate command buffers, cursor memory, scanout -	buffers, etc., as well as support the latest features provided -	by packages like Mesa and the X.Org X server, your driver -	should support a memory manager. -      </para> +      <title>The Translation Table Manager (TTM)</title>        <para> -	If your driver supports memory management (it should!), you -	need to set that up at load time as well.  How you initialize -	it depends on which memory manager you're using: TTM or GEM. +	TTM design background and information belongs here.        </para>        <sect3>  	<title>TTM initialization</title> -	<para> -	  TTM (for Translation Table Manager) manages video memory and -	  aperture space for graphics devices. TTM supports both UMA devices -	  and devices with dedicated video RAM (VRAM), i.e. most discrete -	  graphics devices.  If your device has dedicated RAM, supporting -	  TTM is desirable.  TTM also integrates tightly with your -	  driver-specific buffer execution function.  See the radeon -	  driver for examples. -	</para> -	<para> -	  The core TTM structure is the ttm_bo_driver struct.  It contains -	  several fields with function pointers for initializing the TTM, -	  allocating and freeing memory, waiting for command completion -	  and fence synchronization, and memory migration.  See the -	  radeon_ttm.c file for an example of usage. +        <warning><para>This section is outdated.</para></warning> +        <para> +          Drivers wishing to support TTM must fill out a drm_bo_driver +          structure. The structure contains several fields with function +          pointers for initializing the TTM, allocating and freeing memory, +          waiting for command completion and fence synchronization, and memory +          migration. See the radeon_ttm.c file for an example of usage.  	</para>  	<para>  	  The ttm_global_reference structure is made up of several fields: @@ -445,82 +500,1081 @@  	  count for the TTM, which will call your initialization function.  	</para>        </sect3> +    </sect2> +    <sect2 id="drm-gem"> +      <title>The Graphics Execution Manager (GEM)</title> +      <para> +        The GEM design approach has resulted in a memory manager that doesn't +        provide full coverage of all (or even all common) use cases in its +        userspace or kernel API. GEM exposes a set of standard memory-related +        operations to userspace and a set of helper functions to drivers, and let +        drivers implement hardware-specific operations with their own private API. +      </para> +      <para> +        The GEM userspace API is described in the +        <ulink url="http://lwn.net/Articles/283798/"><citetitle>GEM - the Graphics +        Execution Manager</citetitle></ulink> article on LWN. While slightly +        outdated, the document provides a good overview of the GEM API principles. +        Buffer allocation and read and write operations, described as part of the +        common GEM API, are currently implemented using driver-specific ioctls. +      </para> +      <para> +        GEM is data-agnostic. It manages abstract buffer objects without knowing +        what individual buffers contain. APIs that require knowledge of buffer +        contents or purpose, such as buffer allocation or synchronization +        primitives, are thus outside of the scope of GEM and must be implemented +        using driver-specific ioctls. +      </para> +      <para> +	On a fundamental level, GEM involves several operations: +	<itemizedlist> +	  <listitem>Memory allocation and freeing</listitem> +	  <listitem>Command execution</listitem> +	  <listitem>Aperture management at command execution time</listitem> +	</itemizedlist> +	Buffer object allocation is relatively straightforward and largely +        provided by Linux's shmem layer, which provides memory to back each +        object. +      </para> +      <para> +        Device-specific operations, such as command execution, pinning, buffer +	read & write, mapping, and domain ownership transfers are left to +        driver-specific ioctls. +      </para>        <sect3> -	<title>GEM initialization</title> -	<para> -	  GEM is an alternative to TTM, designed specifically for UMA -	  devices.  It has simpler initialization and execution requirements -	  than TTM, but has no VRAM management capability.  Core GEM -	  is initialized by calling drm_mm_init() to create -	  a GTT DRM MM object, which provides an address space pool for -	  object allocation.  In a KMS configuration, the driver -	  needs to allocate and initialize a command ring buffer following -	  core GEM initialization.  A UMA device usually has what is called a -	  "stolen" memory region, which provides space for the initial -	  framebuffer and large, contiguous memory regions required by the -	  device.  This space is not typically managed by GEM, and it must -	  be initialized separately into its own DRM MM object. -	</para> -	<para> -	  Initialization is driver-specific. In the case of Intel -	  integrated graphics chips like 965GM, GEM initialization can -	  be done by calling the internal GEM init function, -	  i915_gem_do_init().  Since the 965GM is a UMA device -	  (i.e. it doesn't have dedicated VRAM), GEM manages -	  making regular RAM available for GPU operations.  Memory set -	  aside by the BIOS (called "stolen" memory by the i915 -	  driver) is managed by the DRM memrange allocator; the -	  rest of the aperture is managed by GEM. -	  <programlisting> -	    /* Basic memrange allocator for stolen space (aka vram) */ -	    drm_memrange_init(&dev_priv->vram, 0, prealloc_size); -	    /* Let GEM Manage from end of prealloc space to end of aperture */ -	    i915_gem_do_init(dev, prealloc_size, agp_size); -	  </programlisting> -<!--!Edrivers/char/drm/drm_memrange.c--> -	</para> -	<para> -	  Once the memory manager has been set up, we may allocate the -	  command buffer.  In the i915 case, this is also done with a -	  GEM function, i915_gem_init_ringbuffer(). -	</para> +        <title>GEM Initialization</title> +        <para> +          Drivers that use GEM must set the DRIVER_GEM bit in the struct +          <structname>drm_driver</structname> +          <structfield>driver_features</structfield> field. The DRM core will +          then automatically initialize the GEM core before calling the +          <methodname>load</methodname> operation. Behind the scene, this will +          create a DRM Memory Manager object which provides an address space +          pool for object allocation. +        </para> +        <para> +          In a KMS configuration, drivers need to allocate and initialize a +          command ring buffer following core GEM initialization if required by +          the hardware. UMA devices usually have what is called a "stolen" +          memory region, which provides space for the initial framebuffer and +          large, contiguous memory regions required by the device. This space is +          typically not managed by GEM, and must be initialized separately into +          its own DRM MM object. +        </para> +      </sect3> +      <sect3> +        <title>GEM Objects Creation</title> +        <para> +          GEM splits creation of GEM objects and allocation of the memory that +          backs them in two distinct operations. +        </para> +        <para> +          GEM objects are represented by an instance of struct +          <structname>drm_gem_object</structname>. Drivers usually need to extend +          GEM objects with private information and thus create a driver-specific +          GEM object structure type that embeds an instance of struct +          <structname>drm_gem_object</structname>. +        </para> +        <para> +          To create a GEM object, a driver allocates memory for an instance of its +          specific GEM object type and initializes the embedded struct +          <structname>drm_gem_object</structname> with a call to +          <function>drm_gem_object_init</function>. The function takes a pointer to +          the DRM device, a pointer to the GEM object and the buffer object size +          in bytes. +        </para> +        <para> +          GEM uses shmem to allocate anonymous pageable memory. +          <function>drm_gem_object_init</function> will create an shmfs file of +          the requested size and store it into the struct +          <structname>drm_gem_object</structname> <structfield>filp</structfield> +          field. The memory is used as either main storage for the object when the +          graphics hardware uses system memory directly or as a backing store +          otherwise. +        </para> +        <para> +          Drivers are responsible for the actual physical pages allocation by +          calling <function>shmem_read_mapping_page_gfp</function> for each page. +          Note that they can decide to allocate pages when initializing the GEM +          object, or to delay allocation until the memory is needed (for instance +          when a page fault occurs as a result of a userspace memory access or +          when the driver needs to start a DMA transfer involving the memory). +        </para> +        <para> +          Anonymous pageable memory allocation is not always desired, for instance +          when the hardware requires physically contiguous system memory as is +          often the case in embedded devices. Drivers can create GEM objects with +          no shmfs backing (called private GEM objects) by initializing them with +          a call to <function>drm_gem_private_object_init</function> instead of +          <function>drm_gem_object_init</function>. Storage for private GEM +          objects must be managed by drivers. +        </para> +        <para> +          Drivers that do not need to extend GEM objects with private information +          can call the <function>drm_gem_object_alloc</function> function to +          allocate and initialize a struct <structname>drm_gem_object</structname> +          instance. The GEM core will call the optional driver +          <methodname>gem_init_object</methodname> operation after initializing +          the GEM object with <function>drm_gem_object_init</function>. +          <synopsis>int (*gem_init_object) (struct drm_gem_object *obj);</synopsis> +        </para> +        <para> +          No alloc-and-init function exists for private GEM objects. +        </para> +      </sect3> +      <sect3> +        <title>GEM Objects Lifetime</title> +        <para> +          All GEM objects are reference-counted by the GEM core. References can be +          acquired and release by <function>calling drm_gem_object_reference</function> +          and <function>drm_gem_object_unreference</function> respectively. The +          caller must hold the <structname>drm_device</structname> +          <structfield>struct_mutex</structfield> lock. As a convenience, GEM +          provides the <function>drm_gem_object_reference_unlocked</function> and +          <function>drm_gem_object_unreference_unlocked</function> functions that +          can be called without holding the lock. +        </para> +        <para> +          When the last reference to a GEM object is released the GEM core calls +          the <structname>drm_driver</structname> +          <methodname>gem_free_object</methodname> operation. That operation is +          mandatory for GEM-enabled drivers and must free the GEM object and all +          associated resources. +        </para> +        <para> +          <synopsis>void (*gem_free_object) (struct drm_gem_object *obj);</synopsis> +          Drivers are responsible for freeing all GEM object resources, including +          the resources created by the GEM core. If an mmap offset has been +          created for the object (in which case +          <structname>drm_gem_object</structname>::<structfield>map_list</structfield>::<structfield>map</structfield> +          is not NULL) it must be freed by a call to +          <function>drm_gem_free_mmap_offset</function>. The shmfs backing store +          must be released by calling <function>drm_gem_object_release</function> +          (that function can safely be called if no shmfs backing store has been +          created). +        </para> +      </sect3> +      <sect3> +        <title>GEM Objects Naming</title> +        <para> +          Communication between userspace and the kernel refers to GEM objects +          using local handles, global names or, more recently, file descriptors. +          All of those are 32-bit integer values; the usual Linux kernel limits +          apply to the file descriptors. +        </para> +        <para> +          GEM handles are local to a DRM file. Applications get a handle to a GEM +          object through a driver-specific ioctl, and can use that handle to refer +          to the GEM object in other standard or driver-specific ioctls. Closing a +          DRM file handle frees all its GEM handles and dereferences the +          associated GEM objects. +        </para> +        <para> +          To create a handle for a GEM object drivers call +          <function>drm_gem_handle_create</function>. The function takes a pointer +          to the DRM file and the GEM object and returns a locally unique handle. +          When the handle is no longer needed drivers delete it with a call to +          <function>drm_gem_handle_delete</function>. Finally the GEM object +          associated with a handle can be retrieved by a call to +          <function>drm_gem_object_lookup</function>. +        </para> +        <para> +          Handles don't take ownership of GEM objects, they only take a reference +          to the object that will be dropped when the handle is destroyed. To +          avoid leaking GEM objects, drivers must make sure they drop the +          reference(s) they own (such as the initial reference taken at object +          creation time) as appropriate, without any special consideration for the +          handle. For example, in the particular case of combined GEM object and +          handle creation in the implementation of the +          <methodname>dumb_create</methodname> operation, drivers must drop the +          initial reference to the GEM object before returning the handle. +        </para> +        <para> +          GEM names are similar in purpose to handles but are not local to DRM +          files. They can be passed between processes to reference a GEM object +          globally. Names can't be used directly to refer to objects in the DRM +          API, applications must convert handles to names and names to handles +          using the DRM_IOCTL_GEM_FLINK and DRM_IOCTL_GEM_OPEN ioctls +          respectively. The conversion is handled by the DRM core without any +          driver-specific support. +        </para> +        <para> +          Similar to global names, GEM file descriptors are also used to share GEM +          objects across processes. They offer additional security: as file +          descriptors must be explictly sent over UNIX domain sockets to be shared +          between applications, they can't be guessed like the globally unique GEM +          names. +        </para> +        <para> +          Drivers that support GEM file descriptors, also known as the DRM PRIME +          API, must set the DRIVER_PRIME bit in the struct +          <structname>drm_driver</structname> +          <structfield>driver_features</structfield> field, and implement the +          <methodname>prime_handle_to_fd</methodname> and +          <methodname>prime_fd_to_handle</methodname> operations. +        </para> +        <para> +          <synopsis>int (*prime_handle_to_fd)(struct drm_device *dev, +                            struct drm_file *file_priv, uint32_t handle, +                            uint32_t flags, int *prime_fd); +  int (*prime_fd_to_handle)(struct drm_device *dev, +                            struct drm_file *file_priv, int prime_fd, +                            uint32_t *handle);</synopsis> +          Those two operations convert a handle to a PRIME file descriptor and +          vice versa. Drivers must use the kernel dma-buf buffer sharing framework +          to manage the PRIME file descriptors. +        </para> +        <para> +          While non-GEM drivers must implement the operations themselves, GEM +          drivers must use the <function>drm_gem_prime_handle_to_fd</function> +          and <function>drm_gem_prime_fd_to_handle</function> helper functions. +          Those helpers rely on the driver +          <methodname>gem_prime_export</methodname> and +          <methodname>gem_prime_import</methodname> operations to create a dma-buf +          instance from a GEM object (dma-buf exporter role) and to create a GEM +          object from a dma-buf instance (dma-buf importer role). +        </para> +        <para> +          <synopsis>struct dma_buf * (*gem_prime_export)(struct drm_device *dev, +                                       struct drm_gem_object *obj, +                                       int flags); +  struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, +                                              struct dma_buf *dma_buf);</synopsis> +          These two operations are mandatory for GEM drivers that support DRM +          PRIME. +        </para> +      </sect3> +      <sect3 id="drm-gem-objects-mapping"> +        <title>GEM Objects Mapping</title> +        <para> +          Because mapping operations are fairly heavyweight GEM favours +          read/write-like access to buffers, implemented through driver-specific +          ioctls, over mapping buffers to userspace. However, when random access +          to the buffer is needed (to perform software rendering for instance), +          direct access to the object can be more efficient. +        </para> +        <para> +          The mmap system call can't be used directly to map GEM objects, as they +          don't have their own file handle. Two alternative methods currently +          co-exist to map GEM objects to userspace. The first method uses a +          driver-specific ioctl to perform the mapping operation, calling +          <function>do_mmap</function> under the hood. This is often considered +          dubious, seems to be discouraged for new GEM-enabled drivers, and will +          thus not be described here. +        </para> +        <para> +          The second method uses the mmap system call on the DRM file handle. +          <synopsis>void *mmap(void *addr, size_t length, int prot, int flags, int fd, +             off_t offset);</synopsis> +          DRM identifies the GEM object to be mapped by a fake offset passed +          through the mmap offset argument. Prior to being mapped, a GEM object +          must thus be associated with a fake offset. To do so, drivers must call +          <function>drm_gem_create_mmap_offset</function> on the object. The +          function allocates a fake offset range from a pool and stores the +          offset divided by PAGE_SIZE in +          <literal>obj->map_list.hash.key</literal>. Care must be taken not to +          call <function>drm_gem_create_mmap_offset</function> if a fake offset +          has already been allocated for the object. This can be tested by +          <literal>obj->map_list.map</literal> being non-NULL. +        </para> +        <para> +          Once allocated, the fake offset value +          (<literal>obj->map_list.hash.key << PAGE_SHIFT</literal>) +          must be passed to the application in a driver-specific way and can then +          be used as the mmap offset argument. +        </para> +        <para> +          The GEM core provides a helper method <function>drm_gem_mmap</function> +          to handle object mapping. The method can be set directly as the mmap +          file operation handler. It will look up the GEM object based on the +          offset value and set the VMA operations to the +          <structname>drm_driver</structname> <structfield>gem_vm_ops</structfield> +          field. Note that <function>drm_gem_mmap</function> doesn't map memory to +          userspace, but relies on the driver-provided fault handler to map pages +          individually. +        </para> +        <para> +          To use <function>drm_gem_mmap</function>, drivers must fill the struct +          <structname>drm_driver</structname> <structfield>gem_vm_ops</structfield> +          field with a pointer to VM operations. +        </para> +        <para> +          <synopsis>struct vm_operations_struct *gem_vm_ops + +  struct vm_operations_struct { +          void (*open)(struct vm_area_struct * area); +          void (*close)(struct vm_area_struct * area); +          int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); +  };</synopsis> +        </para> +        <para> +          The <methodname>open</methodname> and <methodname>close</methodname> +          operations must update the GEM object reference count. Drivers can use +          the <function>drm_gem_vm_open</function> and +          <function>drm_gem_vm_close</function> helper functions directly as open +          and close handlers. +        </para> +        <para> +          The fault operation handler is responsible for mapping individual pages +          to userspace when a page fault occurs. Depending on the memory +          allocation scheme, drivers can allocate pages at fault time, or can +          decide to allocate memory for the GEM object at the time the object is +          created. +        </para> +        <para> +          Drivers that want to map the GEM object upfront instead of handling page +          faults can implement their own mmap file operation handler. +        </para> +      </sect3> +      <sect3> +        <title>Dumb GEM Objects</title> +        <para> +          The GEM API doesn't standardize GEM objects creation and leaves it to +          driver-specific ioctls. While not an issue for full-fledged graphics +          stacks that include device-specific userspace components (in libdrm for +          instance), this limit makes DRM-based early boot graphics unnecessarily +          complex. +        </para> +        <para> +          Dumb GEM objects partly alleviate the problem by providing a standard +          API to create dumb buffers suitable for scanout, which can then be used +          to create KMS frame buffers. +        </para> +        <para> +          To support dumb GEM objects drivers must implement the +          <methodname>dumb_create</methodname>, +          <methodname>dumb_destroy</methodname> and +          <methodname>dumb_map_offset</methodname> operations. +        </para> +        <itemizedlist> +          <listitem> +            <synopsis>int (*dumb_create)(struct drm_file *file_priv, struct drm_device *dev, +                     struct drm_mode_create_dumb *args);</synopsis> +            <para> +              The <methodname>dumb_create</methodname> operation creates a GEM +              object suitable for scanout based on the width, height and depth +              from the struct <structname>drm_mode_create_dumb</structname> +              argument. It fills the argument's <structfield>handle</structfield>, +              <structfield>pitch</structfield> and <structfield>size</structfield> +              fields with a handle for the newly created GEM object and its line +              pitch and size in bytes. +            </para> +          </listitem> +          <listitem> +            <synopsis>int (*dumb_destroy)(struct drm_file *file_priv, struct drm_device *dev, +                      uint32_t handle);</synopsis> +            <para> +              The <methodname>dumb_destroy</methodname> operation destroys a dumb +              GEM object created by <methodname>dumb_create</methodname>. +            </para> +          </listitem> +          <listitem> +            <synopsis>int (*dumb_map_offset)(struct drm_file *file_priv, struct drm_device *dev, +                         uint32_t handle, uint64_t *offset);</synopsis> +            <para> +              The <methodname>dumb_map_offset</methodname> operation associates an +              mmap fake offset with the GEM object given by the handle and returns +              it. Drivers must use the +              <function>drm_gem_create_mmap_offset</function> function to +              associate the fake offset as described in +              <xref linkend="drm-gem-objects-mapping"/>. +            </para> +          </listitem> +        </itemizedlist> +      </sect3> +      <sect3> +        <title>Memory Coherency</title> +        <para> +          When mapped to the device or used in a command buffer, backing pages +          for an object are flushed to memory and marked write combined so as to +          be coherent with the GPU. Likewise, if the CPU accesses an object +          after the GPU has finished rendering to the object, then the object +          must be made coherent with the CPU's view of memory, usually involving +          GPU cache flushing of various kinds. This core CPU<->GPU +          coherency management is provided by a device-specific ioctl, which +          evaluates an object's current domain and performs any necessary +          flushing or synchronization to put the object into the desired +          coherency domain (note that the object may be busy, i.e. an active +          render target; in that case, setting the domain blocks the client and +          waits for rendering to complete before performing any necessary +          flushing operations). +        </para> +      </sect3> +      <sect3> +        <title>Command Execution</title> +        <para> +	  Perhaps the most important GEM function for GPU devices is providing a +          command execution interface to clients. Client programs construct +          command buffers containing references to previously allocated memory +          objects, and then submit them to GEM. At that point, GEM takes care to +          bind all the objects into the GTT, execute the buffer, and provide +          necessary synchronization between clients accessing the same buffers. +          This often involves evicting some objects from the GTT and re-binding +          others (a fairly expensive operation), and providing relocation +          support which hides fixed GTT offsets from clients. Clients must take +          care not to submit command buffers that reference more objects than +          can fit in the GTT; otherwise, GEM will reject them and no rendering +          will occur. Similarly, if several objects in the buffer require fence +          registers to be allocated for correct rendering (e.g. 2D blits on +          pre-965 chips), care must be taken not to require more fence registers +          than are available to the client. Such resource management should be +          abstracted from the client in libdrm. +        </para>        </sect3>      </sect2> +  </sect1> +  <!-- Internals: mode setting --> + +  <sect1 id="drm-mode-setting"> +    <title>Mode Setting</title> +    <para> +      Drivers must initialize the mode setting core by calling +      <function>drm_mode_config_init</function> on the DRM device. The function +      initializes the <structname>drm_device</structname> +      <structfield>mode_config</structfield> field and never fails. Once done, +      mode configuration must be setup by initializing the following fields. +    </para> +    <itemizedlist> +      <listitem> +        <synopsis>int min_width, min_height; +int max_width, max_height;</synopsis> +        <para> +	  Minimum and maximum width and height of the frame buffers in pixel +	  units. +	</para> +      </listitem> +      <listitem> +        <synopsis>struct drm_mode_config_funcs *funcs;</synopsis> +	<para>Mode setting functions.</para> +      </listitem> +    </itemizedlist>      <sect2> -      <title>Output configuration</title> +      <title>Frame Buffer Creation</title> +      <synopsis>struct drm_framebuffer *(*fb_create)(struct drm_device *dev, +				     struct drm_file *file_priv, +				     struct drm_mode_fb_cmd2 *mode_cmd);</synopsis>        <para> -	The final initialization task is output configuration.  This involves: -	<itemizedlist> -	  <listitem> -	    Finding and initializing the CRTCs, encoders, and connectors -	    for the device. -	  </listitem> -	  <listitem> -	    Creating an initial configuration. -	  </listitem> -	  <listitem> -	    Registering a framebuffer console driver. -	  </listitem> -	</itemizedlist> +        Frame buffers are abstract memory objects that provide a source of +        pixels to scanout to a CRTC. Applications explicitly request the +        creation of frame buffers through the DRM_IOCTL_MODE_ADDFB(2) ioctls and +        receive an opaque handle that can be passed to the KMS CRTC control, +        plane configuration and page flip functions. +      </para> +      <para> +        Frame buffers rely on the underneath memory manager for low-level memory +        operations. When creating a frame buffer applications pass a memory +        handle (or a list of memory handles for multi-planar formats) through +        the <parameter>drm_mode_fb_cmd2</parameter> argument. This document +        assumes that the driver uses GEM, those handles thus reference GEM +        objects. +      </para> +      <para> +        Drivers must first validate the requested frame buffer parameters passed +        through the mode_cmd argument. In particular this is where invalid +        sizes, pixel formats or pitches can be caught. +      </para> +      <para> +        If the parameters are deemed valid, drivers then create, initialize and +        return an instance of struct <structname>drm_framebuffer</structname>. +        If desired the instance can be embedded in a larger driver-specific +        structure. The new instance is initialized with a call to +        <function>drm_framebuffer_init</function> which takes a pointer to DRM +        frame buffer operations (struct +        <structname>drm_framebuffer_funcs</structname>). Frame buffer operations are +        <itemizedlist> +          <listitem> +            <synopsis>int (*create_handle)(struct drm_framebuffer *fb, +		     struct drm_file *file_priv, unsigned int *handle);</synopsis> +            <para> +              Create a handle to the frame buffer underlying memory object. If +              the frame buffer uses a multi-plane format, the handle will +              reference the memory object associated with the first plane. +            </para> +            <para> +              Drivers call <function>drm_gem_handle_create</function> to create +              the handle. +            </para> +          </listitem> +          <listitem> +            <synopsis>void (*destroy)(struct drm_framebuffer *framebuffer);</synopsis> +            <para> +              Destroy the frame buffer object and frees all associated +              resources. Drivers must call +              <function>drm_framebuffer_cleanup</function> to free resources +              allocated by the DRM core for the frame buffer object, and must +              make sure to unreference all memory objects associated with the +              frame buffer. Handles created by the +              <methodname>create_handle</methodname> operation are released by +              the DRM core. +            </para> +          </listitem> +          <listitem> +            <synopsis>int (*dirty)(struct drm_framebuffer *framebuffer, +	     struct drm_file *file_priv, unsigned flags, unsigned color, +	     struct drm_clip_rect *clips, unsigned num_clips);</synopsis> +            <para> +              This optional operation notifies the driver that a region of the +              frame buffer has changed in response to a DRM_IOCTL_MODE_DIRTYFB +              ioctl call. +            </para> +          </listitem> +        </itemizedlist> +      </para> +      <para> +        After initializing the <structname>drm_framebuffer</structname> +        instance drivers must fill its <structfield>width</structfield>, +        <structfield>height</structfield>, <structfield>pitches</structfield>, +        <structfield>offsets</structfield>, <structfield>depth</structfield>, +        <structfield>bits_per_pixel</structfield> and +        <structfield>pixel_format</structfield> fields from the values passed +        through the <parameter>drm_mode_fb_cmd2</parameter> argument. They +        should call the <function>drm_helper_mode_fill_fb_struct</function> +        helper function to do so. +      </para> +    </sect2> +    <sect2> +      <title>Output Polling</title> +      <synopsis>void (*output_poll_changed)(struct drm_device *dev);</synopsis> +      <para> +        This operation notifies the driver that the status of one or more +        connectors has changed. Drivers that use the fb helper can just call the +        <function>drm_fb_helper_hotplug_event</function> function to handle this +        operation. +      </para> +    </sect2> +  </sect1> + +  <!-- Internals: kms initialization and cleanup --> + +  <sect1 id="drm-kms-init"> +    <title>KMS Initialization and Cleanup</title> +    <para> +      A KMS device is abstracted and exposed as a set of planes, CRTCs, encoders +      and connectors. KMS drivers must thus create and initialize all those +      objects at load time after initializing mode setting. +    </para> +    <sect2> +      <title>CRTCs (struct <structname>drm_crtc</structname>)</title> +      <para> +        A CRTC is an abstraction representing a part of the chip that contains a +	pointer to a scanout buffer. Therefore, the number of CRTCs available +	determines how many independent scanout buffers can be active at any +	given time. The CRTC structure contains several fields to support this: +	a pointer to some video memory (abstracted as a frame buffer object), a +	display mode, and an (x, y) offset into the video memory to support +	panning or configurations where one piece of video memory spans multiple +	CRTCs.        </para>        <sect3> -	<title>Output discovery and initialization</title> -	<para> -	  Several core functions exist to create CRTCs, encoders, and -	  connectors, namely: drm_crtc_init(), drm_connector_init(), and -	  drm_encoder_init(), along with several "helper" functions to -	  perform common tasks. -	</para> -	<para> -	  Connectors should be registered with sysfs once they've been -	  detected and initialized, using the -	  drm_sysfs_connector_add() function.  Likewise, when they're -	  removed from the system, they should be destroyed with -	  drm_sysfs_connector_remove(). -	</para> -	<programlisting> -<![CDATA[ +        <title>CRTC Initialization</title> +        <para> +          A KMS device must create and register at least one struct +          <structname>drm_crtc</structname> instance. The instance is allocated +          and zeroed by the driver, possibly as part of a larger structure, and +          registered with a call to <function>drm_crtc_init</function> with a +          pointer to CRTC functions. +        </para> +      </sect3> +      <sect3> +        <title>CRTC Operations</title> +        <sect4> +          <title>Set Configuration</title> +          <synopsis>int (*set_config)(struct drm_mode_set *set);</synopsis> +          <para> +            Apply a new CRTC configuration to the device. The configuration +            specifies a CRTC, a frame buffer to scan out from, a (x,y) position in +            the frame buffer, a display mode and an array of connectors to drive +            with the CRTC if possible. +          </para> +          <para> +            If the frame buffer specified in the configuration is NULL, the driver +            must detach all encoders connected to the CRTC and all connectors +            attached to those encoders and disable them. +          </para> +          <para> +            This operation is called with the mode config lock held. +          </para> +          <note><para> +            FIXME: How should set_config interact with DPMS? If the CRTC is +            suspended, should it be resumed? +          </para></note> +        </sect4> +        <sect4> +          <title>Page Flipping</title> +          <synopsis>int (*page_flip)(struct drm_crtc *crtc, struct drm_framebuffer *fb, +                   struct drm_pending_vblank_event *event);</synopsis> +          <para> +            Schedule a page flip to the given frame buffer for the CRTC. This +            operation is called with the mode config mutex held. +          </para> +          <para> +            Page flipping is a synchronization mechanism that replaces the frame +            buffer being scanned out by the CRTC with a new frame buffer during +            vertical blanking, avoiding tearing. When an application requests a page +            flip the DRM core verifies that the new frame buffer is large enough to +            be scanned out by  the CRTC in the currently configured mode and then +            calls the CRTC <methodname>page_flip</methodname> operation with a +            pointer to the new frame buffer. +          </para> +          <para> +            The <methodname>page_flip</methodname> operation schedules a page flip. +            Once any pending rendering targetting the new frame buffer has +            completed, the CRTC will be reprogrammed to display that frame buffer +            after the next vertical refresh. The operation must return immediately +            without waiting for rendering or page flip to complete and must block +            any new rendering to the frame buffer until the page flip completes. +          </para> +          <para> +            If a page flip is already pending, the +            <methodname>page_flip</methodname> operation must return +            -<errorname>EBUSY</errorname>. +          </para> +          <para> +            To synchronize page flip to vertical blanking the driver will likely +            need to enable vertical blanking interrupts. It should call +            <function>drm_vblank_get</function> for that purpose, and call +            <function>drm_vblank_put</function> after the page flip completes. +          </para> +          <para> +            If the application has requested to be notified when page flip completes +            the <methodname>page_flip</methodname> operation will be called with a +            non-NULL <parameter>event</parameter> argument pointing to a +            <structname>drm_pending_vblank_event</structname> instance. Upon page +            flip completion the driver must fill the +            <parameter>event</parameter>::<structfield>event</structfield> +            <structfield>sequence</structfield>, <structfield>tv_sec</structfield> +            and <structfield>tv_usec</structfield> fields with the associated +            vertical blanking count and timestamp, add the event to the +            <parameter>drm_file</parameter> list of events to be signaled, and wake +            up any waiting process. This can be performed with +            <programlisting><![CDATA[ +            struct timeval now; + +            event->event.sequence = drm_vblank_count_and_time(..., &now); +            event->event.tv_sec = now.tv_sec; +            event->event.tv_usec = now.tv_usec; + +            spin_lock_irqsave(&dev->event_lock, flags); +            list_add_tail(&event->base.link, &event->base.file_priv->event_list); +            wake_up_interruptible(&event->base.file_priv->event_wait); +            spin_unlock_irqrestore(&dev->event_lock, flags); +            ]]></programlisting> +          </para> +          <note><para> +            FIXME: Could drivers that don't need to wait for rendering to complete +            just add the event to <literal>dev->vblank_event_list</literal> and +            let the DRM core handle everything, as for "normal" vertical blanking +            events? +          </para></note> +          <para> +            While waiting for the page flip to complete, the +            <literal>event->base.link</literal> list head can be used freely by +            the driver to store the pending event in a driver-specific list. +          </para> +          <para> +            If the file handle is closed before the event is signaled, drivers must +            take care to destroy the event in their +            <methodname>preclose</methodname> operation (and, if needed, call +            <function>drm_vblank_put</function>). +          </para> +        </sect4> +        <sect4> +          <title>Miscellaneous</title> +          <itemizedlist> +            <listitem> +              <synopsis>void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, +                        uint32_t start, uint32_t size);</synopsis> +              <para> +                Apply a gamma table to the device. The operation is optional. +              </para> +            </listitem> +            <listitem> +              <synopsis>void (*destroy)(struct drm_crtc *crtc);</synopsis> +              <para> +                Destroy the CRTC when not needed anymore. See +                <xref linkend="drm-kms-init"/>. +              </para> +            </listitem> +          </itemizedlist> +        </sect4> +      </sect3> +    </sect2> +    <sect2> +      <title>Planes (struct <structname>drm_plane</structname>)</title> +      <para> +        A plane represents an image source that can be blended with or overlayed +	on top of a CRTC during the scanout process. Planes are associated with +	a frame buffer to crop a portion of the image memory (source) and +	optionally scale it to a destination size. The result is then blended +	with or overlayed on top of a CRTC. +      </para> +      <sect3> +        <title>Plane Initialization</title> +        <para> +          Planes are optional. To create a plane, a KMS drivers allocates and +          zeroes an instances of struct <structname>drm_plane</structname> +          (possibly as part of a larger structure) and registers it with a call +          to <function>drm_plane_init</function>. The function takes a bitmask +          of the CRTCs that can be associated with the plane, a pointer to the +          plane functions and a list of format supported formats. +        </para> +      </sect3> +      <sect3> +        <title>Plane Operations</title> +        <itemizedlist> +          <listitem> +            <synopsis>int (*update_plane)(struct drm_plane *plane, struct drm_crtc *crtc, +                        struct drm_framebuffer *fb, int crtc_x, int crtc_y, +                        unsigned int crtc_w, unsigned int crtc_h, +                        uint32_t src_x, uint32_t src_y, +                        uint32_t src_w, uint32_t src_h);</synopsis> +            <para> +              Enable and configure the plane to use the given CRTC and frame buffer. +            </para> +            <para> +              The source rectangle in frame buffer memory coordinates is given by +              the <parameter>src_x</parameter>, <parameter>src_y</parameter>, +              <parameter>src_w</parameter> and <parameter>src_h</parameter> +              parameters (as 16.16 fixed point values). Devices that don't support +              subpixel plane coordinates can ignore the fractional part. +            </para> +            <para> +              The destination rectangle in CRTC coordinates is given by the +              <parameter>crtc_x</parameter>, <parameter>crtc_y</parameter>, +              <parameter>crtc_w</parameter> and <parameter>crtc_h</parameter> +              parameters (as integer values). Devices scale the source rectangle to +              the destination rectangle. If scaling is not supported, and the source +              rectangle size doesn't match the destination rectangle size, the +              driver must return a -<errorname>EINVAL</errorname> error. +            </para> +          </listitem> +          <listitem> +            <synopsis>int (*disable_plane)(struct drm_plane *plane);</synopsis> +            <para> +              Disable the plane. The DRM core calls this method in response to a +              DRM_IOCTL_MODE_SETPLANE ioctl call with the frame buffer ID set to 0. +              Disabled planes must not be processed by the CRTC. +            </para> +          </listitem> +          <listitem> +            <synopsis>void (*destroy)(struct drm_plane *plane);</synopsis> +            <para> +              Destroy the plane when not needed anymore. See +              <xref linkend="drm-kms-init"/>. +            </para> +          </listitem> +        </itemizedlist> +      </sect3> +    </sect2> +    <sect2> +      <title>Encoders (struct <structname>drm_encoder</structname>)</title> +      <para> +        An encoder takes pixel data from a CRTC and converts it to a format +	suitable for any attached connectors. On some devices, it may be +	possible to have a CRTC send data to more than one encoder. In that +	case, both encoders would receive data from the same scanout buffer, +	resulting in a "cloned" display configuration across the connectors +	attached to each encoder. +      </para> +      <sect3> +        <title>Encoder Initialization</title> +        <para> +          As for CRTCs, a KMS driver must create, initialize and register at +          least one struct <structname>drm_encoder</structname> instance. The +          instance is allocated and zeroed by the driver, possibly as part of a +          larger structure. +        </para> +        <para> +          Drivers must initialize the struct <structname>drm_encoder</structname> +          <structfield>possible_crtcs</structfield> and +          <structfield>possible_clones</structfield> fields before registering the +          encoder. Both fields are bitmasks of respectively the CRTCs that the +          encoder can be connected to, and sibling encoders candidate for cloning. +        </para> +        <para> +          After being initialized, the encoder must be registered with a call to +          <function>drm_encoder_init</function>. The function takes a pointer to +          the encoder functions and an encoder type. Supported types are +          <itemizedlist> +            <listitem> +              DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A +              </listitem> +            <listitem> +              DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort +            </listitem> +            <listitem> +              DRM_MODE_ENCODER_LVDS for display panels +            </listitem> +            <listitem> +              DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video, Component, +              SCART) +            </listitem> +            <listitem> +              DRM_MODE_ENCODER_VIRTUAL for virtual machine displays +            </listitem> +          </itemizedlist> +        </para> +        <para> +          Encoders must be attached to a CRTC to be used. DRM drivers leave +          encoders unattached at initialization time. Applications (or the fbdev +          compatibility layer when implemented) are responsible for attaching the +          encoders they want to use to a CRTC. +        </para> +      </sect3> +      <sect3> +        <title>Encoder Operations</title> +        <itemizedlist> +          <listitem> +            <synopsis>void (*destroy)(struct drm_encoder *encoder);</synopsis> +            <para> +              Called to destroy the encoder when not needed anymore. See +              <xref linkend="drm-kms-init"/>. +            </para> +          </listitem> +        </itemizedlist> +      </sect3> +    </sect2> +    <sect2> +      <title>Connectors (struct <structname>drm_connector</structname>)</title> +      <para> +        A connector is the final destination for pixel data on a device, and +	usually connects directly to an external display device like a monitor +	or laptop panel. A connector can only be attached to one encoder at a +	time. The connector is also the structure where information about the +	attached display is kept, so it contains fields for display data, EDID +	data, DPMS & connection status, and information about modes +	supported on the attached displays. +      </para> +      <sect3> +        <title>Connector Initialization</title> +        <para> +          Finally a KMS driver must create, initialize, register and attach at +          least one struct <structname>drm_connector</structname> instance. The +          instance is created as other KMS objects and initialized by setting the +          following fields. +        </para> +        <variablelist> +          <varlistentry> +            <term><structfield>interlace_allowed</structfield></term> +            <listitem><para> +              Whether the connector can handle interlaced modes. +            </para></listitem> +          </varlistentry> +          <varlistentry> +            <term><structfield>doublescan_allowed</structfield></term> +            <listitem><para> +              Whether the connector can handle doublescan. +            </para></listitem> +          </varlistentry> +          <varlistentry> +            <term><structfield>display_info +            </structfield></term> +            <listitem><para> +              Display information is filled from EDID information when a display +              is detected. For non hot-pluggable displays such as flat panels in +              embedded systems, the driver should initialize the +              <structfield>display_info</structfield>.<structfield>width_mm</structfield> +              and +              <structfield>display_info</structfield>.<structfield>height_mm</structfield> +              fields with the physical size of the display. +            </para></listitem> +          </varlistentry> +          <varlistentry> +            <term id="drm-kms-connector-polled"><structfield>polled</structfield></term> +            <listitem><para> +              Connector polling mode, a combination of +              <variablelist> +                <varlistentry> +                  <term>DRM_CONNECTOR_POLL_HPD</term> +                  <listitem><para> +                    The connector generates hotplug events and doesn't need to be +                    periodically polled. The CONNECT and DISCONNECT flags must not +                    be set together with the HPD flag. +                  </para></listitem> +                </varlistentry> +                <varlistentry> +                  <term>DRM_CONNECTOR_POLL_CONNECT</term> +                  <listitem><para> +                    Periodically poll the connector for connection. +                  </para></listitem> +                </varlistentry> +                <varlistentry> +                  <term>DRM_CONNECTOR_POLL_DISCONNECT</term> +                  <listitem><para> +                    Periodically poll the connector for disconnection. +                  </para></listitem> +                </varlistentry> +              </variablelist> +              Set to 0 for connectors that don't support connection status +              discovery. +            </para></listitem> +          </varlistentry> +        </variablelist> +        <para> +          The connector is then registered with a call to +          <function>drm_connector_init</function> with a pointer to the connector +          functions and a connector type, and exposed through sysfs with a call to +          <function>drm_sysfs_connector_add</function>. +        </para> +        <para> +          Supported connector types are +          <itemizedlist> +            <listitem>DRM_MODE_CONNECTOR_VGA</listitem> +            <listitem>DRM_MODE_CONNECTOR_DVII</listitem> +            <listitem>DRM_MODE_CONNECTOR_DVID</listitem> +            <listitem>DRM_MODE_CONNECTOR_DVIA</listitem> +            <listitem>DRM_MODE_CONNECTOR_Composite</listitem> +            <listitem>DRM_MODE_CONNECTOR_SVIDEO</listitem> +            <listitem>DRM_MODE_CONNECTOR_LVDS</listitem> +            <listitem>DRM_MODE_CONNECTOR_Component</listitem> +            <listitem>DRM_MODE_CONNECTOR_9PinDIN</listitem> +            <listitem>DRM_MODE_CONNECTOR_DisplayPort</listitem> +            <listitem>DRM_MODE_CONNECTOR_HDMIA</listitem> +            <listitem>DRM_MODE_CONNECTOR_HDMIB</listitem> +            <listitem>DRM_MODE_CONNECTOR_TV</listitem> +            <listitem>DRM_MODE_CONNECTOR_eDP</listitem> +            <listitem>DRM_MODE_CONNECTOR_VIRTUAL</listitem> +          </itemizedlist> +        </para> +        <para> +          Connectors must be attached to an encoder to be used. For devices that +          map connectors to encoders 1:1, the connector should be attached at +          initialization time with a call to +          <function>drm_mode_connector_attach_encoder</function>. The driver must +          also set the <structname>drm_connector</structname> +          <structfield>encoder</structfield> field to point to the attached +          encoder. +        </para> +        <para> +          Finally, drivers must initialize the connectors state change detection +          with a call to <function>drm_kms_helper_poll_init</function>. If at +          least one connector is pollable but can't generate hotplug interrupts +          (indicated by the DRM_CONNECTOR_POLL_CONNECT and +          DRM_CONNECTOR_POLL_DISCONNECT connector flags), a delayed work will +          automatically be queued to periodically poll for changes. Connectors +          that can generate hotplug interrupts must be marked with the +          DRM_CONNECTOR_POLL_HPD flag instead, and their interrupt handler must +          call <function>drm_helper_hpd_irq_event</function>. The function will +          queue a delayed work to check the state of all connectors, but no +          periodic polling will be done. +        </para> +      </sect3> +      <sect3> +        <title>Connector Operations</title> +        <note><para> +          Unless otherwise state, all operations are mandatory. +        </para></note> +        <sect4> +          <title>DPMS</title> +          <synopsis>void (*dpms)(struct drm_connector *connector, int mode);</synopsis> +          <para> +            The DPMS operation sets the power state of a connector. The mode +            argument is one of +            <itemizedlist> +              <listitem><para>DRM_MODE_DPMS_ON</para></listitem> +              <listitem><para>DRM_MODE_DPMS_STANDBY</para></listitem> +              <listitem><para>DRM_MODE_DPMS_SUSPEND</para></listitem> +              <listitem><para>DRM_MODE_DPMS_OFF</para></listitem> +            </itemizedlist> +          </para> +          <para> +            In all but DPMS_ON mode the encoder to which the connector is attached +            should put the display in low-power mode by driving its signals +            appropriately. If more than one connector is attached to the encoder +            care should be taken not to change the power state of other displays as +            a side effect. Low-power mode should be propagated to the encoders and +            CRTCs when all related connectors are put in low-power mode. +          </para> +        </sect4> +        <sect4> +          <title>Modes</title> +          <synopsis>int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, +                      uint32_t max_height);</synopsis> +          <para> +            Fill the mode list with all supported modes for the connector. If the +            <parameter>max_width</parameter> and <parameter>max_height</parameter> +            arguments are non-zero, the implementation must ignore all modes wider +            than <parameter>max_width</parameter> or higher than +            <parameter>max_height</parameter>. +          </para> +          <para> +            The connector must also fill in this operation its +            <structfield>display_info</structfield> +            <structfield>width_mm</structfield> and +            <structfield>height_mm</structfield> fields with the connected display +            physical size in millimeters. The fields should be set to 0 if the value +            isn't known or is not applicable (for instance for projector devices). +          </para> +        </sect4> +        <sect4> +          <title>Connection Status</title> +          <para> +            The connection status is updated through polling or hotplug events when +            supported (see <xref linkend="drm-kms-connector-polled"/>). The status +            value is reported to userspace through ioctls and must not be used +            inside the driver, as it only gets initialized by a call to +            <function>drm_mode_getconnector</function> from userspace. +          </para> +          <synopsis>enum drm_connector_status (*detect)(struct drm_connector *connector, +                                        bool force);</synopsis> +          <para> +            Check to see if anything is attached to the connector. The +            <parameter>force</parameter> parameter is set to false whilst polling or +            to true when checking the connector due to user request. +            <parameter>force</parameter> can be used by the driver to avoid +            expensive, destructive operations during automated probing. +          </para> +          <para> +            Return connector_status_connected if something is connected to the +            connector, connector_status_disconnected if nothing is connected and +            connector_status_unknown if the connection state isn't known. +          </para> +          <para> +            Drivers should only return connector_status_connected if the connection +            status has really been probed as connected. Connectors that can't detect +            the connection status, or failed connection status probes, should return +            connector_status_unknown. +          </para> +        </sect4> +        <sect4> +          <title>Miscellaneous</title> +          <itemizedlist> +            <listitem> +              <synopsis>void (*destroy)(struct drm_connector *connector);</synopsis> +              <para> +                Destroy the connector when not needed anymore. See +                <xref linkend="drm-kms-init"/>. +              </para> +            </listitem> +          </itemizedlist> +        </sect4> +      </sect3> +    </sect2> +    <sect2> +      <title>Cleanup</title> +      <para> +        The DRM core manages its objects' lifetime. When an object is not needed +	anymore the core calls its destroy function, which must clean up and +	free every resource allocated for the object. Every +	<function>drm_*_init</function> call must be matched with a +	corresponding <function>drm_*_cleanup</function> call to cleanup CRTCs +	(<function>drm_crtc_cleanup</function>), planes +	(<function>drm_plane_cleanup</function>), encoders +	(<function>drm_encoder_cleanup</function>) and connectors +	(<function>drm_connector_cleanup</function>). Furthermore, connectors +	that have been added to sysfs must be removed by a call to +	<function>drm_sysfs_connector_remove</function> before calling +	<function>drm_connector_cleanup</function>. +      </para> +      <para> +        Connectors state change detection must be cleanup up with a call to +	<function>drm_kms_helper_poll_fini</function>. +      </para> +    </sect2> +    <sect2> +      <title>Output discovery and initialization example</title> +      <programlisting><![CDATA[  void intel_crt_init(struct drm_device *dev)  {  	struct drm_connector *connector; @@ -556,252 +1610,741 @@ void intel_crt_init(struct drm_device *dev)  	drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);  	drm_sysfs_connector_add(connector); -} -]]> -	</programlisting> -	<para> -	  In the example above (again, taken from the i915 driver), a -	  CRT connector and encoder combination is created.  A device-specific -	  i2c bus is also created for fetching EDID data and -	  performing monitor detection.  Once the process is complete, -	  the new connector is registered with sysfs to make its -	  properties available to applications. -	</para> -	<sect4> -	  <title>Helper functions and core functions</title> -	  <para> -	    Since many PC-class graphics devices have similar display output -	    designs, the DRM provides a set of helper functions to make -	    output management easier.  The core helper routines handle -	    encoder re-routing and the disabling of unused functions following -	    mode setting.  Using the helpers is optional, but recommended for -	    devices with PC-style architectures (i.e. a set of display planes -	    for feeding pixels to encoders which are in turn routed to -	    connectors).  Devices with more complex requirements needing -	    finer grained management may opt to use the core callbacks -	    directly. -	  </para> -	  <para> -	    [Insert typical diagram here.]  [Insert OMAP style config here.] -	  </para> -	</sect4> -	<para> -	  Each encoder object needs to provide: -	  <itemizedlist> -	    <listitem> -	      A DPMS (basically on/off) function. -	    </listitem> -	    <listitem> -	      A mode-fixup function (for converting requested modes into -	      native hardware timings). -	    </listitem> -	    <listitem> -	      Functions (prepare, set, and commit) for use by the core DRM -	      helper functions. -	    </listitem> -	  </itemizedlist> -	  Connector helpers need to provide functions (mode-fetch, validity, -	  and encoder-matching) for returning an ideal encoder for a given -	  connector.  The core connector functions include a DPMS callback, -	  save/restore routines (deprecated), detection, mode probing, -	  property handling, and cleanup functions. -	</para> -<!--!Edrivers/char/drm/drm_crtc.h--> -<!--!Edrivers/char/drm/drm_crtc.c--> -<!--!Edrivers/char/drm/drm_crtc_helper.c--> -      </sect3> +}]]></programlisting> +      <para> +        In the example above (taken from the i915 driver), a CRTC, connector and +        encoder combination is created. A device-specific i2c bus is also +        created for fetching EDID data and performing monitor detection. Once +        the process is complete, the new connector is registered with sysfs to +        make its properties available to applications. +      </para>      </sect2>    </sect1> -  <!-- Internals: vblank handling --> +  <!-- Internals: mid-layer helper functions -->    <sect1> -    <title>VBlank event handling</title> +    <title>Mid-layer Helper Functions</title>      <para> -      The DRM core exposes two vertical blank related ioctls: -      <variablelist> -        <varlistentry> -          <term>DRM_IOCTL_WAIT_VBLANK</term> -          <listitem> -            <para> -              This takes a struct drm_wait_vblank structure as its argument, -              and it is used to block or request a signal when a specified -              vblank event occurs. -            </para> -          </listitem> -        </varlistentry> -        <varlistentry> -          <term>DRM_IOCTL_MODESET_CTL</term> -          <listitem> -            <para> -              This should be called by application level drivers before and -              after mode setting, since on many devices the vertical blank -              counter is reset at that time.  Internally, the DRM snapshots -              the last vblank count when the ioctl is called with the -              _DRM_PRE_MODESET command, so that the counter won't go backwards -              (which is dealt with when _DRM_POST_MODESET is used). -            </para> -          </listitem> -        </varlistentry> -      </variablelist> -<!--!Edrivers/char/drm/drm_irq.c--> +      The CRTC, encoder and connector functions provided by the drivers +      implement the DRM API. They're called by the DRM core and ioctl handlers +      to handle device state changes and configuration request. As implementing +      those functions often requires logic not specific to drivers, mid-layer +      helper functions are available to avoid duplicating boilerplate code.      </para>      <para> -      To support the functions above, the DRM core provides several -      helper functions for tracking vertical blank counters, and -      requires drivers to provide several callbacks: -      get_vblank_counter(), enable_vblank() and disable_vblank().  The -      core uses get_vblank_counter() to keep the counter accurate -      across interrupt disable periods.  It should return the current -      vertical blank event count, which is often tracked in a device -      register.  The enable and disable vblank callbacks should enable -      and disable vertical blank interrupts, respectively.  In the -      absence of DRM clients waiting on vblank events, the core DRM -      code uses the disable_vblank() function to disable -      interrupts, which saves power.  They are re-enabled again when -      a client calls the vblank wait ioctl above. +      The DRM core contains one mid-layer implementation. The mid-layer provides +      implementations of several CRTC, encoder and connector functions (called +      from the top of the mid-layer) that pre-process requests and call +      lower-level functions provided by the driver (at the bottom of the +      mid-layer). For instance, the +      <function>drm_crtc_helper_set_config</function> function can be used to +      fill the struct <structname>drm_crtc_funcs</structname> +      <structfield>set_config</structfield> field. When called, it will split +      the <methodname>set_config</methodname> operation in smaller, simpler +      operations and call the driver to handle them.      </para>      <para> -      A device that doesn't provide a count register may simply use an -      internal atomic counter incremented on every vertical blank -      interrupt (and then treat the enable_vblank() and disable_vblank() -      callbacks as no-ops). +      To use the mid-layer, drivers call <function>drm_crtc_helper_add</function>, +      <function>drm_encoder_helper_add</function> and +      <function>drm_connector_helper_add</function> functions to install their +      mid-layer bottom operations handlers, and fill the +      <structname>drm_crtc_funcs</structname>, +      <structname>drm_encoder_funcs</structname> and +      <structname>drm_connector_funcs</structname> structures with pointers to +      the mid-layer top API functions. Installing the mid-layer bottom operation +      handlers is best done right after registering the corresponding KMS object.      </para> +    <para> +      The mid-layer is not split between CRTC, encoder and connector operations. +      To use it, a driver must provide bottom functions for all of the three KMS +      entities. +    </para> +    <sect2> +      <title>Helper Functions</title> +      <itemizedlist> +        <listitem> +          <synopsis>int drm_crtc_helper_set_config(struct drm_mode_set *set);</synopsis> +          <para> +            The <function>drm_crtc_helper_set_config</function> helper function +            is a CRTC <methodname>set_config</methodname> implementation. It +            first tries to locate the best encoder for each connector by calling +            the connector <methodname>best_encoder</methodname> helper +            operation. +          </para> +          <para> +            After locating the appropriate encoders, the helper function will +            call the <methodname>mode_fixup</methodname> encoder and CRTC helper +            operations to adjust the requested mode, or reject it completely in +            which case an error will be returned to the application. If the new +            configuration after mode adjustment is identical to the current +            configuration the helper function will return without performing any +            other operation. +          </para> +          <para> +            If the adjusted mode is identical to the current mode but changes to +            the frame buffer need to be applied, the +            <function>drm_crtc_helper_set_config</function> function will call +            the CRTC <methodname>mode_set_base</methodname> helper operation. If +            the adjusted mode differs from the current mode, or if the +            <methodname>mode_set_base</methodname> helper operation is not +            provided, the helper function performs a full mode set sequence by +            calling the <methodname>prepare</methodname>, +            <methodname>mode_set</methodname> and +            <methodname>commit</methodname> CRTC and encoder helper operations, +            in that order. +          </para> +        </listitem> +        <listitem> +          <synopsis>void drm_helper_connector_dpms(struct drm_connector *connector, int mode);</synopsis> +          <para> +            The <function>drm_helper_connector_dpms</function> helper function +            is a connector <methodname>dpms</methodname> implementation that +            tracks power state of connectors. To use the function, drivers must +            provide <methodname>dpms</methodname> helper operations for CRTCs +            and encoders to apply the DPMS state to the device. +          </para> +          <para> +            The mid-layer doesn't track the power state of CRTCs and encoders. +            The <methodname>dpms</methodname> helper operations can thus be +            called with a mode identical to the currently active mode. +          </para> +        </listitem> +        <listitem> +          <synopsis>int drm_helper_probe_single_connector_modes(struct drm_connector *connector, +                                            uint32_t maxX, uint32_t maxY);</synopsis> +          <para> +            The <function>drm_helper_probe_single_connector_modes</function> helper +            function is a connector <methodname>fill_modes</methodname> +            implementation that updates the connection status for the connector +            and then retrieves a list of modes by calling the connector +            <methodname>get_modes</methodname> helper operation. +          </para> +          <para> +            The function filters out modes larger than +            <parameter>max_width</parameter> and <parameter>max_height</parameter> +            if specified. It then calls the connector +            <methodname>mode_valid</methodname> helper operation for  each mode in +            the probed list to check whether the mode is valid for the connector. +          </para> +        </listitem> +      </itemizedlist> +    </sect2> +    <sect2> +      <title>CRTC Helper Operations</title> +      <itemizedlist> +        <listitem id="drm-helper-crtc-mode-fixup"> +          <synopsis>bool (*mode_fixup)(struct drm_crtc *crtc, +                       const struct drm_display_mode *mode, +                       struct drm_display_mode *adjusted_mode);</synopsis> +          <para> +            Let CRTCs adjust the requested mode or reject it completely. This +            operation returns true if the mode is accepted (possibly after being +            adjusted) or false if it is rejected. +          </para> +          <para> +            The <methodname>mode_fixup</methodname> operation should reject the +            mode if it can't reasonably use it. The definition of "reasonable" +            is currently fuzzy in this context. One possible behaviour would be +            to set the adjusted mode to the panel timings when a fixed-mode +            panel is used with hardware capable of scaling. Another behaviour +            would be to accept any input mode and adjust it to the closest mode +            supported by the hardware (FIXME: This needs to be clarified). +          </para> +        </listitem> +        <listitem> +          <synopsis>int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, +                     struct drm_framebuffer *old_fb)</synopsis> +          <para> +            Move the CRTC on the current frame buffer (stored in +            <literal>crtc->fb</literal>) to position (x,y). Any of the frame +            buffer, x position or y position may have been modified. +          </para> +          <para> +            This helper operation is optional. If not provided, the +            <function>drm_crtc_helper_set_config</function> function will fall +            back to the <methodname>mode_set</methodname> helper operation. +          </para> +          <note><para> +            FIXME: Why are x and y passed as arguments, as they can be accessed +            through <literal>crtc->x</literal> and +            <literal>crtc->y</literal>? +          </para></note> +        </listitem> +        <listitem> +          <synopsis>void (*prepare)(struct drm_crtc *crtc);</synopsis> +          <para> +            Prepare the CRTC for mode setting. This operation is called after +            validating the requested mode. Drivers use it to perform +            device-specific operations required before setting the new mode. +          </para> +        </listitem> +        <listitem> +          <synopsis>int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, +                struct drm_display_mode *adjusted_mode, int x, int y, +                struct drm_framebuffer *old_fb);</synopsis> +          <para> +            Set a new mode, position and frame buffer. Depending on the device +            requirements, the mode can be stored internally by the driver and +            applied in the <methodname>commit</methodname> operation, or +            programmed to the hardware immediately. +          </para> +          <para> +            The <methodname>mode_set</methodname> operation returns 0 on success +	    or a negative error code if an error occurs. +          </para> +        </listitem> +        <listitem> +          <synopsis>void (*commit)(struct drm_crtc *crtc);</synopsis> +          <para> +            Commit a mode. This operation is called after setting the new mode. +            Upon return the device must use the new mode and be fully +            operational. +          </para> +        </listitem> +      </itemizedlist> +    </sect2> +    <sect2> +      <title>Encoder Helper Operations</title> +      <itemizedlist> +        <listitem> +          <synopsis>bool (*mode_fixup)(struct drm_encoder *encoder, +                       const struct drm_display_mode *mode, +                       struct drm_display_mode *adjusted_mode);</synopsis> +          <note><para> +            FIXME: The mode argument be const, but the i915 driver modifies +            mode->clock in <function>intel_dp_mode_fixup</function>. +          </para></note> +          <para> +            Let encoders adjust the requested mode or reject it completely. This +            operation returns true if the mode is accepted (possibly after being +            adjusted) or false if it is rejected. See the +            <link linkend="drm-helper-crtc-mode-fixup">mode_fixup CRTC helper +            operation</link> for an explanation of the allowed adjustments. +          </para> +        </listitem> +        <listitem> +          <synopsis>void (*prepare)(struct drm_encoder *encoder);</synopsis> +          <para> +            Prepare the encoder for mode setting. This operation is called after +            validating the requested mode. Drivers use it to perform +            device-specific operations required before setting the new mode. +          </para> +        </listitem> +        <listitem> +          <synopsis>void (*mode_set)(struct drm_encoder *encoder, +                 struct drm_display_mode *mode, +                 struct drm_display_mode *adjusted_mode);</synopsis> +          <para> +            Set a new mode. Depending on the device requirements, the mode can +            be stored internally by the driver and applied in the +            <methodname>commit</methodname> operation, or programmed to the +            hardware immediately. +          </para> +        </listitem> +        <listitem> +          <synopsis>void (*commit)(struct drm_encoder *encoder);</synopsis> +          <para> +            Commit a mode. This operation is called after setting the new mode. +            Upon return the device must use the new mode and be fully +            operational. +          </para> +        </listitem> +      </itemizedlist> +    </sect2> +    <sect2> +      <title>Connector Helper Operations</title> +      <itemizedlist> +        <listitem> +          <synopsis>struct drm_encoder *(*best_encoder)(struct drm_connector *connector);</synopsis> +          <para> +            Return a pointer to the best encoder for the connecter. Device that +            map connectors to encoders 1:1 simply return the pointer to the +            associated encoder. This operation is mandatory. +          </para> +        </listitem> +        <listitem> +          <synopsis>int (*get_modes)(struct drm_connector *connector);</synopsis> +          <para> +            Fill the connector's <structfield>probed_modes</structfield> list +            by parsing EDID data with <function>drm_add_edid_modes</function> or +            calling <function>drm_mode_probed_add</function> directly for every +            supported mode and return the number of modes it has detected. This +            operation is mandatory. +          </para> +          <para> +            When adding modes manually the driver creates each mode with a call to +            <function>drm_mode_create</function> and must fill the following fields. +            <itemizedlist> +              <listitem> +                <synopsis>__u32 type;</synopsis> +                <para> +                  Mode type bitmask, a combination of +                  <variablelist> +                    <varlistentry> +                      <term>DRM_MODE_TYPE_BUILTIN</term> +                      <listitem><para>not used?</para></listitem> +                    </varlistentry> +                    <varlistentry> +                      <term>DRM_MODE_TYPE_CLOCK_C</term> +                      <listitem><para>not used?</para></listitem> +                    </varlistentry> +                    <varlistentry> +                      <term>DRM_MODE_TYPE_CRTC_C</term> +                      <listitem><para>not used?</para></listitem> +                    </varlistentry> +                    <varlistentry> +                      <term> +        DRM_MODE_TYPE_PREFERRED - The preferred mode for the connector +                      </term> +                      <listitem> +                        <para>not used?</para> +                      </listitem> +                    </varlistentry> +                    <varlistentry> +                      <term>DRM_MODE_TYPE_DEFAULT</term> +                      <listitem><para>not used?</para></listitem> +                    </varlistentry> +                    <varlistentry> +                      <term>DRM_MODE_TYPE_USERDEF</term> +                      <listitem><para>not used?</para></listitem> +                    </varlistentry> +                    <varlistentry> +                      <term>DRM_MODE_TYPE_DRIVER</term> +                      <listitem> +                        <para> +                          The mode has been created by the driver (as opposed to +                          to user-created modes). +                        </para> +                      </listitem> +                    </varlistentry> +                  </variablelist> +                  Drivers must set the DRM_MODE_TYPE_DRIVER bit for all modes they +                  create, and set the DRM_MODE_TYPE_PREFERRED bit for the preferred +                  mode. +                </para> +              </listitem> +              <listitem> +                <synopsis>__u32 clock;</synopsis> +                <para>Pixel clock frequency in kHz unit</para> +              </listitem> +              <listitem> +                <synopsis>__u16 hdisplay, hsync_start, hsync_end, htotal; +    __u16 vdisplay, vsync_start, vsync_end, vtotal;</synopsis> +                <para>Horizontal and vertical timing information</para> +                <screen><![CDATA[ +             Active                 Front           Sync           Back +             Region                 Porch                          Porch +    <-----------------------><----------------><-------------><--------------> + +      //////////////////////| +     ////////////////////// | +    //////////////////////  |..................               ................ +                                               _______________ + +    <----- [hv]display -----> +    <------------- [hv]sync_start ------------> +    <--------------------- [hv]sync_end ---------------------> +    <-------------------------------- [hv]total -----------------------------> +]]></screen> +              </listitem> +              <listitem> +                <synopsis>__u16 hskew; +    __u16 vscan;</synopsis> +                <para>Unknown</para> +              </listitem> +              <listitem> +                <synopsis>__u32 flags;</synopsis> +                <para> +                  Mode flags, a combination of +                  <variablelist> +                    <varlistentry> +                      <term>DRM_MODE_FLAG_PHSYNC</term> +                      <listitem><para> +                        Horizontal sync is active high +                      </para></listitem> +                    </varlistentry> +                    <varlistentry> +                      <term>DRM_MODE_FLAG_NHSYNC</term> +                      <listitem><para> +                        Horizontal sync is active low +                      </para></listitem> +                    </varlistentry> +                    <varlistentry> +                      <term>DRM_MODE_FLAG_PVSYNC</term> +                      <listitem><para> +                        Vertical sync is active high +                      </para></listitem> +                    </varlistentry> +                    <varlistentry> +                      <term>DRM_MODE_FLAG_NVSYNC</term> +                      <listitem><para> +                        Vertical sync is active low +                      </para></listitem> +                    </varlistentry> +                    <varlistentry> +                      <term>DRM_MODE_FLAG_INTERLACE</term> +                      <listitem><para> +                        Mode is interlaced +                      </para></listitem> +                    </varlistentry> +                    <varlistentry> +                      <term>DRM_MODE_FLAG_DBLSCAN</term> +                      <listitem><para> +                        Mode uses doublescan +                      </para></listitem> +                    </varlistentry> +                    <varlistentry> +                      <term>DRM_MODE_FLAG_CSYNC</term> +                      <listitem><para> +                        Mode uses composite sync +                      </para></listitem> +                    </varlistentry> +                    <varlistentry> +                      <term>DRM_MODE_FLAG_PCSYNC</term> +                      <listitem><para> +                        Composite sync is active high +                      </para></listitem> +                    </varlistentry> +                    <varlistentry> +                      <term>DRM_MODE_FLAG_NCSYNC</term> +                      <listitem><para> +                        Composite sync is active low +                      </para></listitem> +                    </varlistentry> +                    <varlistentry> +                      <term>DRM_MODE_FLAG_HSKEW</term> +                      <listitem><para> +                        hskew provided (not used?) +                      </para></listitem> +                    </varlistentry> +                    <varlistentry> +                      <term>DRM_MODE_FLAG_BCAST</term> +                      <listitem><para> +                        not used? +                      </para></listitem> +                    </varlistentry> +                    <varlistentry> +                      <term>DRM_MODE_FLAG_PIXMUX</term> +                      <listitem><para> +                        not used? +                      </para></listitem> +                    </varlistentry> +                    <varlistentry> +                      <term>DRM_MODE_FLAG_DBLCLK</term> +                      <listitem><para> +                        not used? +                      </para></listitem> +                    </varlistentry> +                    <varlistentry> +                      <term>DRM_MODE_FLAG_CLKDIV2</term> +                      <listitem><para> +                        ? +                      </para></listitem> +                    </varlistentry> +                  </variablelist> +                </para> +                <para> +                  Note that modes marked with the INTERLACE or DBLSCAN flags will be +                  filtered out by +                  <function>drm_helper_probe_single_connector_modes</function> if +                  the connector's <structfield>interlace_allowed</structfield> or +                  <structfield>doublescan_allowed</structfield> field is set to 0. +                </para> +              </listitem> +              <listitem> +                <synopsis>char name[DRM_DISPLAY_MODE_LEN];</synopsis> +                <para> +                  Mode name. The driver must call +                  <function>drm_mode_set_name</function> to fill the mode name from +                  <structfield>hdisplay</structfield>, +                  <structfield>vdisplay</structfield> and interlace flag after +                  filling the corresponding fields. +                </para> +              </listitem> +            </itemizedlist> +          </para> +          <para> +            The <structfield>vrefresh</structfield> value is computed by +            <function>drm_helper_probe_single_connector_modes</function>. +          </para> +          <para> +            When parsing EDID data, <function>drm_add_edid_modes</function> fill the +            connector <structfield>display_info</structfield> +            <structfield>width_mm</structfield> and +            <structfield>height_mm</structfield> fields. When creating modes +            manually the <methodname>get_modes</methodname> helper operation must +            set the <structfield>display_info</structfield> +            <structfield>width_mm</structfield> and +            <structfield>height_mm</structfield> fields if they haven't been set +            already (for instance at initilization time when a fixed-size panel is +            attached to the connector). The mode <structfield>width_mm</structfield> +            and <structfield>height_mm</structfield> fields are only used internally +            during EDID parsing and should not be set when creating modes manually. +          </para> +        </listitem> +        <listitem> +          <synopsis>int (*mode_valid)(struct drm_connector *connector, +		  struct drm_display_mode *mode);</synopsis> +          <para> +            Verify whether a mode is valid for the connector. Return MODE_OK for +            supported modes and one of the enum drm_mode_status values (MODE_*) +            for unsupported modes. This operation is mandatory. +          </para> +          <para> +            As the mode rejection reason is currently not used beside for +            immediately removing the unsupported mode, an implementation can +            return MODE_BAD regardless of the exact reason why the mode is not +            valid. +          </para> +          <note><para> +            Note that the <methodname>mode_valid</methodname> helper operation is +            only called for modes detected by the device, and +            <emphasis>not</emphasis> for modes set by the user through the CRTC +            <methodname>set_config</methodname> operation. +          </para></note> +        </listitem> +      </itemizedlist> +    </sect2>    </sect1> -  <sect1> -    <title>Memory management</title> +  <!-- Internals: vertical blanking --> + +  <sect1 id="drm-vertical-blank"> +    <title>Vertical Blanking</title>      <para> -      The memory manager lies at the heart of many DRM operations; it -      is required to support advanced client features like OpenGL -      pbuffers.  The DRM currently contains two memory managers: TTM -      and GEM. +      Vertical blanking plays a major role in graphics rendering. To achieve +      tear-free display, users must synchronize page flips and/or rendering to +      vertical blanking. The DRM API offers ioctls to perform page flips +      synchronized to vertical blanking and wait for vertical blanking.      </para> +    <para> +      The DRM core handles most of the vertical blanking management logic, which +      involves filtering out spurious interrupts, keeping race-free blanking +      counters, coping with counter wrap-around and resets and keeping use +      counts. It relies on the driver to generate vertical blanking interrupts +      and optionally provide a hardware vertical blanking counter. Drivers must +      implement the following operations. +    </para> +    <itemizedlist> +      <listitem> +        <synopsis>int (*enable_vblank) (struct drm_device *dev, int crtc); +void (*disable_vblank) (struct drm_device *dev, int crtc);</synopsis> +        <para> +	  Enable or disable vertical blanking interrupts for the given CRTC. +	</para> +      </listitem> +      <listitem> +        <synopsis>u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);</synopsis> +        <para> +	  Retrieve the value of the vertical blanking counter for the given +	  CRTC. If the hardware maintains a vertical blanking counter its value +	  should be returned. Otherwise drivers can use the +	  <function>drm_vblank_count</function> helper function to handle this +	  operation. +	</para> +      </listitem> +    </itemizedlist> +    <para> +      Drivers must initialize the vertical blanking handling core with a call to +      <function>drm_vblank_init</function> in their +      <methodname>load</methodname> operation. The function will set the struct +      <structname>drm_device</structname> +      <structfield>vblank_disable_allowed</structfield> field to 0. This will +      keep vertical blanking interrupts enabled permanently until the first mode +      set operation, where <structfield>vblank_disable_allowed</structfield> is +      set to 1. The reason behind this is not clear. Drivers can set the field +      to 1 after <function>calling drm_vblank_init</function> to make vertical +      blanking interrupts dynamically managed from the beginning. +    </para> +    <para> +      Vertical blanking interrupts can be enabled by the DRM core or by drivers +      themselves (for instance to handle page flipping operations). The DRM core +      maintains a vertical blanking use count to ensure that the interrupts are +      not disabled while a user still needs them. To increment the use count, +      drivers call <function>drm_vblank_get</function>. Upon return vertical +      blanking interrupts are guaranteed to be enabled. +    </para> +    <para> +      To decrement the use count drivers call +      <function>drm_vblank_put</function>. Only when the use count drops to zero +      will the DRM core disable the vertical blanking interrupts after a delay +      by scheduling a timer. The delay is accessible through the vblankoffdelay +      module parameter or the <varname>drm_vblank_offdelay</varname> global +      variable and expressed in milliseconds. Its default value is 5000 ms. +    </para> +    <para> +      When a vertical blanking interrupt occurs drivers only need to call the +      <function>drm_handle_vblank</function> function to account for the +      interrupt. +    </para> +    <para> +      Resources allocated by <function>drm_vblank_init</function> must be freed +      with a call to <function>drm_vblank_cleanup</function> in the driver +      <methodname>unload</methodname> operation handler. +    </para> +  </sect1> + +  <!-- Internals: open/close, file operations and ioctls --> +  <sect1> +    <title>Open/Close, File Operations and IOCTLs</title>      <sect2> -      <title>The Translation Table Manager (TTM)</title> +      <title>Open and Close</title> +      <synopsis>int (*firstopen) (struct drm_device *); +void (*lastclose) (struct drm_device *); +int (*open) (struct drm_device *, struct drm_file *); +void (*preclose) (struct drm_device *, struct drm_file *); +void (*postclose) (struct drm_device *, struct drm_file *);</synopsis> +      <abstract>Open and close handlers. None of those methods are mandatory. +      </abstract>        <para> -	TTM was developed by Tungsten Graphics, primarily by Thomas -	Hellström, and is intended to be a flexible, high performance -	graphics memory manager. +        The <methodname>firstopen</methodname> method is called by the DRM core +	when an application opens a device that has no other opened file handle. +	Similarly the <methodname>lastclose</methodname> method is called when +	the last application holding a file handle opened on the device closes +	it. Both methods are mostly used for UMS (User Mode Setting) drivers to +	acquire and release device resources which should be done in the +	<methodname>load</methodname> and <methodname>unload</methodname> +	methods for KMS drivers.        </para>        <para> -	Drivers wishing to support TTM must fill out a drm_bo_driver -	structure. +        Note that the <methodname>lastclose</methodname> method is also called +	at module unload time or, for hot-pluggable devices, when the device is +	unplugged. The <methodname>firstopen</methodname> and +	<methodname>lastclose</methodname> calls can thus be unbalanced.        </para>        <para> -	TTM design background and information belongs here. +        The <methodname>open</methodname> method is called every time the device +	is opened by an application. Drivers can allocate per-file private data +	in this method and store them in the struct +	<structname>drm_file</structname> <structfield>driver_priv</structfield> +	field. Note that the <methodname>open</methodname> method is called +	before <methodname>firstopen</methodname>. +      </para> +      <para> +        The close operation is split into <methodname>preclose</methodname> and +	<methodname>postclose</methodname> methods. Drivers must stop and +	cleanup all per-file operations in the <methodname>preclose</methodname> +	method. For instance pending vertical blanking and page flip events must +	be cancelled. No per-file operation is allowed on the file handle after +	returning from the <methodname>preclose</methodname> method. +      </para> +      <para> +        Finally the <methodname>postclose</methodname> method is called as the +	last step of the close operation, right before calling the +	<methodname>lastclose</methodname> method if no other open file handle +	exists for the device. Drivers that have allocated per-file private data +	in the <methodname>open</methodname> method should free it here. +      </para> +      <para> +        The <methodname>lastclose</methodname> method should restore CRTC and +	plane properties to default value, so that a subsequent open of the +	device will not inherit state from the previous user.        </para>      </sect2> -      <sect2> -      <title>The Graphics Execution Manager (GEM)</title> +      <title>File Operations</title> +      <synopsis>const struct file_operations *fops</synopsis> +      <abstract>File operations for the DRM device node.</abstract>        <para> -	GEM is an Intel project, authored by Eric Anholt and Keith -	Packard.  It provides simpler interfaces than TTM, and is well -	suited for UMA devices. +        Drivers must define the file operations structure that forms the DRM +	userspace API entry point, even though most of those operations are +	implemented in the DRM core. The <methodname>open</methodname>, +	<methodname>release</methodname> and <methodname>ioctl</methodname> +	operations are handled by +	<programlisting> +	.owner = THIS_MODULE, +	.open = drm_open, +	.release = drm_release, +	.unlocked_ioctl = drm_ioctl, +  #ifdef CONFIG_COMPAT +	.compat_ioctl = drm_compat_ioctl, +  #endif +        </programlisting>        </para>        <para> -	GEM-enabled drivers must provide gem_init_object() and -	gem_free_object() callbacks to support the core memory -	allocation routines.  They should also provide several driver-specific -	ioctls to support command execution, pinning, buffer -	read & write, mapping, and domain ownership transfers. +        Drivers that implement private ioctls that requires 32/64bit +	compatibility support must provide their own +	<methodname>compat_ioctl</methodname> handler that processes private +	ioctls and calls <function>drm_compat_ioctl</function> for core ioctls.        </para>        <para> -	On a fundamental level, GEM involves several operations: -	<itemizedlist> -	  <listitem>Memory allocation and freeing</listitem> -	  <listitem>Command execution</listitem> -	  <listitem>Aperture management at command execution time</listitem> -	</itemizedlist> -	Buffer object allocation is relatively -	straightforward and largely provided by Linux's shmem layer, which -	provides memory to back each object.  When mapped into the GTT -	or used in a command buffer, the backing pages for an object are -	flushed to memory and marked write combined so as to be coherent -	with the GPU.  Likewise, if the CPU accesses an object after the GPU -	has finished rendering to the object, then the object must be made -	coherent with the CPU's view -	of memory, usually involving GPU cache flushing of various kinds. -	This core CPU<->GPU coherency management is provided by a -	device-specific ioctl, which evaluates an object's current domain and -	performs any necessary flushing or synchronization to put the object -	into the desired coherency domain (note that the object may be busy, -	i.e. an active render target; in that case, setting the domain -	blocks the client and waits for rendering to complete before -	performing any necessary flushing operations). +        The <methodname>read</methodname> and <methodname>poll</methodname> +	operations provide support for reading DRM events and polling them. They +	are implemented by +	<programlisting> +	.poll = drm_poll, +	.read = drm_read, +	.fasync = drm_fasync, +	.llseek = no_llseek, +	</programlisting> +      </para> +      <para> +        The memory mapping implementation varies depending on how the driver +	manages memory. Pre-GEM drivers will use <function>drm_mmap</function>, +	while GEM-aware drivers will use <function>drm_gem_mmap</function>. See +	<xref linkend="drm-gem"/>. +	<programlisting> +	.mmap = drm_gem_mmap, +	</programlisting>        </para>        <para> -	Perhaps the most important GEM function is providing a command -	execution interface to clients.  Client programs construct command -	buffers containing references to previously allocated memory objects, -	and then submit them to GEM.  At that point, GEM takes care to bind -	all the objects into the GTT, execute the buffer, and provide -	necessary synchronization between clients accessing the same buffers. -	This often involves evicting some objects from the GTT and re-binding -	others (a fairly expensive operation), and providing relocation -	support which hides fixed GTT offsets from clients.  Clients must -	take care not to submit command buffers that reference more objects -	than can fit in the GTT; otherwise, GEM will reject them and no rendering -	will occur.  Similarly, if several objects in the buffer require -	fence registers to be allocated for correct rendering (e.g. 2D blits -	on pre-965 chips), care must be taken not to require more fence -	registers than are available to the client.  Such resource management -	should be abstracted from the client in libdrm. +        No other file operation is supported by the DRM API. +      </para> +    </sect2> +    <sect2> +      <title>IOCTLs</title> +      <synopsis>struct drm_ioctl_desc *ioctls; +int num_ioctls;</synopsis> +      <abstract>Driver-specific ioctls descriptors table.</abstract> +      <para> +        Driver-specific ioctls numbers start at DRM_COMMAND_BASE. The ioctls +	descriptors table is indexed by the ioctl number offset from the base +	value. Drivers can use the DRM_IOCTL_DEF_DRV() macro to initialize the +	table entries. +      </para> +      <para> +        <programlisting>DRM_IOCTL_DEF_DRV(ioctl, func, flags)</programlisting> +	<para> +	  <parameter>ioctl</parameter> is the ioctl name. Drivers must define +	  the DRM_##ioctl and DRM_IOCTL_##ioctl macros to the ioctl number +	  offset from DRM_COMMAND_BASE and the ioctl number respectively. The +	  first macro is private to the device while the second must be exposed +	  to userspace in a public header. +	</para> +	<para> +	  <parameter>func</parameter> is a pointer to the ioctl handler function +	  compatible with the <type>drm_ioctl_t</type> type. +	  <programlisting>typedef int drm_ioctl_t(struct drm_device *dev, void *data, +		struct drm_file *file_priv);</programlisting> +	</para> +	<para> +	  <parameter>flags</parameter> is a bitmask combination of the following +	  values. It restricts how the ioctl is allowed to be called. +	  <itemizedlist> +	    <listitem><para> +	      DRM_AUTH - Only authenticated callers allowed +	    </para></listitem> +	    <listitem><para> +	      DRM_MASTER - The ioctl can only be called on the master file +	      handle +	    </para></listitem> +            <listitem><para> +	      DRM_ROOT_ONLY - Only callers with the SYSADMIN capability allowed +	    </para></listitem> +            <listitem><para> +	      DRM_CONTROL_ALLOW - The ioctl can only be called on a control +	      device +	    </para></listitem> +            <listitem><para> +	      DRM_UNLOCKED - The ioctl handler will be called without locking +	      the DRM global mutex +	    </para></listitem> +	  </itemizedlist> +	</para>        </para>      </sect2> - -  </sect1> - -  <!-- Output management --> -  <sect1> -    <title>Output management</title> -    <para> -      At the core of the DRM output management code is a set of -      structures representing CRTCs, encoders, and connectors. -    </para> -    <para> -      A CRTC is an abstraction representing a part of the chip that -      contains a pointer to a scanout buffer.  Therefore, the number -      of CRTCs available determines how many independent scanout -      buffers can be active at any given time.  The CRTC structure -      contains several fields to support this: a pointer to some video -      memory, a display mode, and an (x, y) offset into the video -      memory to support panning or configurations where one piece of -      video memory spans multiple CRTCs. -    </para> -    <para> -      An encoder takes pixel data from a CRTC and converts it to a -      format suitable for any attached connectors.  On some devices, -      it may be possible to have a CRTC send data to more than one -      encoder.  In that case, both encoders would receive data from -      the same scanout buffer, resulting in a "cloned" display -      configuration across the connectors attached to each encoder. -    </para> -    <para> -      A connector is the final destination for pixel data on a device, -      and usually connects directly to an external display device like -      a monitor or laptop panel.  A connector can only be attached to -      one encoder at a time.  The connector is also the structure -      where information about the attached display is kept, so it -      contains fields for display data, EDID data, DPMS & -      connection status, and information about modes supported on the -      attached displays. -    </para> -<!--!Edrivers/char/drm/drm_crtc.c--> -  </sect1> - -  <sect1> -    <title>Framebuffer management</title> -    <para> -      Clients need to provide a framebuffer object which provides a source -      of pixels for a CRTC to deliver to the encoder(s) and ultimately the -      connector(s). A framebuffer is fundamentally a driver-specific memory -      object, made into an opaque handle by the DRM's addfb() function. -      Once a framebuffer has been created this way, it may be passed to the -      KMS mode setting routines for use in a completed configuration. -    </para>    </sect1>    <sect1> @@ -812,15 +2355,24 @@ void intel_crt_init(struct drm_device *dev)      </para>    </sect1> +  <!-- Internals: suspend/resume --> +    <sect1> -    <title>Suspend/resume</title> +    <title>Suspend/Resume</title> +    <para> +      The DRM core provides some suspend/resume code, but drivers wanting full +      suspend/resume support should provide save() and restore() functions. +      These are called at suspend, hibernate, or resume time, and should perform +      any state save or restore required by your device across suspend or +      hibernate states. +    </para> +    <synopsis>int (*suspend) (struct drm_device *, pm_message_t state); +int (*resume) (struct drm_device *);</synopsis>      <para> -      The DRM core provides some suspend/resume code, but drivers -      wanting full suspend/resume support should provide save() and -      restore() functions.  These are called at suspend, -      hibernate, or resume time, and should perform any state save or -      restore required by your device across suspend or hibernate -      states. +      Those are legacy suspend and resume methods. New driver should use the +      power management interface provided by their bus type (usually through +      the struct <structname>device_driver</structname> dev_pm_ops) and set +      these methods to NULL.      </para>    </sect1> @@ -833,6 +2385,35 @@ void intel_crt_init(struct drm_device *dev)    </sect1>    </chapter> +<!-- TODO + +- Add a glossary +- Document the struct_mutex catch-all lock +- Document connector properties + +- Why is the load method optional? +- What are drivers supposed to set the initial display state to, and how? +  Connector's DPMS states are not initialized and are thus equal to +  DRM_MODE_DPMS_ON. The fbcon compatibility layer calls +  drm_helper_disable_unused_functions(), which disables unused encoders and +  CRTCs, but doesn't touch the connectors' DPMS state, and +  drm_helper_connector_dpms() in reaction to fbdev blanking events. Do drivers +  that don't implement (or just don't use) fbcon compatibility need to call +  those functions themselves? +- KMS drivers must call drm_vblank_pre_modeset() and drm_vblank_post_modeset() +  around mode setting. Should this be done in the DRM core? +- vblank_disable_allowed is set to 1 in the first drm_vblank_post_modeset() +  call and never set back to 0. It seems to be safe to permanently set it to 1 +  in drm_vblank_init() for KMS driver, and it might be safe for UMS drivers as +  well. This should be investigated. +- crtc and connector .save and .restore operations are only used internally in +  drivers, should they be removed from the core? +- encoder mid-layer .save and .restore operations are only used internally in +  drivers, should they be removed from the core? +- encoder mid-layer .detect operation is only used internally in drivers, +  should it be removed from the core? +--> +    <!-- External interfaces -->    <chapter id="drmExternals"> @@ -853,6 +2434,42 @@ void intel_crt_init(struct drm_device *dev)        Cover generic ioctls and sysfs layout here.  We only need high-level        info, since man pages should cover the rest.      </para> + +  <!-- External: vblank handling --> + +    <sect1> +      <title>VBlank event handling</title> +      <para> +        The DRM core exposes two vertical blank related ioctls: +        <variablelist> +          <varlistentry> +            <term>DRM_IOCTL_WAIT_VBLANK</term> +            <listitem> +              <para> +                This takes a struct drm_wait_vblank structure as its argument, +                and it is used to block or request a signal when a specified +                vblank event occurs. +              </para> +            </listitem> +          </varlistentry> +          <varlistentry> +            <term>DRM_IOCTL_MODESET_CTL</term> +            <listitem> +              <para> +                This should be called by application level drivers before and +                after mode setting, since on many devices the vertical blank +                counter is reset at that time.  Internally, the DRM snapshots +                the last vblank count when the ioctl is called with the +                _DRM_PRE_MODESET command, so that the counter won't go backwards +                (which is dealt with when _DRM_POST_MODESET is used). +              </para> +            </listitem> +          </varlistentry> +        </variablelist> +<!--!Edrivers/char/drm/drm_irq.c--> +      </para> +    </sect1> +    </chapter>    <!-- API reference --> diff --git a/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml b/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml index 72039512790..701138f1209 100644 --- a/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml +++ b/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml @@ -125,7 +125,7 @@ the structure refers to a radio tuner the  <constant>V4L2_TUNER_CAP_NORM</constant> flags can't be used.</para>  <para>If multiple frequency bands are supported, then  <structfield>capability</structfield> is the union of all -<structfield>capability></structfield> fields of each &v4l2-frequency-band;. +<structfield>capability</structfield> fields of each &v4l2-frequency-band;.  </para></entry>  	  </row>  	  <row> diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX index d111e3b23db..d18ecd827c4 100644 --- a/Documentation/block/00-INDEX +++ b/Documentation/block/00-INDEX @@ -3,15 +3,21 @@  biodoc.txt  	- Notes on the Generic Block Layer Rewrite in Linux 2.5  capability.txt -	- Generic Block Device Capability (/sys/block/<disk>/capability) +	- Generic Block Device Capability (/sys/block/<device>/capability) +cfq-iosched.txt +	- CFQ IO scheduler tunables +data-integrity.txt +	- Block data integrity  deadline-iosched.txt  	- Deadline IO scheduler tunables  ioprio.txt  	- Block io priorities (in CFQ scheduler) +queue-sysfs.txt +	- Queue's sysfs entries  request.txt  	- The members of struct request (in include/linux/blkdev.h)  stat.txt -	- Block layer statistics in /sys/block/<dev>/stat +	- Block layer statistics in /sys/block/<device>/stat  switching-sched.txt  	- Switching I/O schedulers at runtime  writeback_cache_control.txt diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt index 6d670f57045..d89b4fe724d 100644 --- a/Documentation/block/cfq-iosched.txt +++ b/Documentation/block/cfq-iosched.txt @@ -1,3 +1,14 @@ +CFQ (Complete Fairness Queueing) +=============================== + +The main aim of CFQ scheduler is to provide a fair allocation of the disk +I/O bandwidth for all the processes which requests an I/O operation. + +CFQ maintains the per process queue for the processes which request I/O +operation(syncronous requests). In case of asynchronous requests, all the +requests from all the processes are batched together according to their +process's I/O priority. +  CFQ ioscheduler tunables  ======================== @@ -25,6 +36,72 @@ there are multiple spindles behind single LUN (Host based hardware RAID  controller or for storage arrays), setting slice_idle=0 might end up in better  throughput and acceptable latencies. +back_seek_max +------------- +This specifies, given in Kbytes, the maximum "distance" for backward seeking. +The distance is the amount of space from the current head location to the +sectors that are backward in terms of distance. + +This parameter allows the scheduler to anticipate requests in the "backward" +direction and consider them as being the "next" if they are within this +distance from the current head location. + +back_seek_penalty +----------------- +This parameter is used to compute the cost of backward seeking. If the +backward distance of request is just 1/back_seek_penalty from a "front" +request, then the seeking cost of two requests is considered equivalent. + +So scheduler will not bias toward one or the other request (otherwise scheduler +will bias toward front request). Default value of back_seek_penalty is 2. + +fifo_expire_async +----------------- +This parameter is used to set the timeout of asynchronous requests. Default +value of this is 248ms. + +fifo_expire_sync +---------------- +This parameter is used to set the timeout of synchronous requests. Default +value of this is 124ms. In case to favor synchronous requests over asynchronous +one, this value should be decreased relative to fifo_expire_async. + +slice_async +----------- +This parameter is same as of slice_sync but for asynchronous queue. The +default value is 40ms. + +slice_async_rq +-------------- +This parameter is used to limit the dispatching of asynchronous request to +device request queue in queue's slice time. The maximum number of request that +are allowed to be dispatched also depends upon the io priority. Default value +for this is 2. + +slice_sync +---------- +When a queue is selected for execution, the queues IO requests are only +executed for a certain amount of time(time_slice) before switching to another +queue. This parameter is used to calculate the time slice of synchronous +queue. + +time_slice is computed using the below equation:- +time_slice = slice_sync + (slice_sync/5 * (4 - prio)). To increase the +time_slice of synchronous queue, increase the value of slice_sync. Default +value is 100ms. + +quantum +------- +This specifies the number of request dispatched to the device queue. In a +queue's time slice, a request will not be dispatched if the number of request +in the device exceeds this parameter. This parameter is used for synchronous +request. + +In case of storage with several disk, this setting can limit the parallel +processing of request. Therefore, increasing the value can imporve the +performace although this can cause the latency of some I/O to increase due +to more number of requests. +  CFQ IOPS Mode for group scheduling  ===================================  Basic CFQ design is to provide priority based time slices. Higher priority diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt index 6518a55273e..e54ac1d5340 100644 --- a/Documentation/block/queue-sysfs.txt +++ b/Documentation/block/queue-sysfs.txt @@ -9,20 +9,71 @@ These files are the ones found in the /sys/block/xxx/queue/ directory.  Files denoted with a RO postfix are readonly and the RW postfix means  read-write. +add_random (RW) +---------------- +This file allows to trun off the disk entropy contribution. Default +value of this file is '1'(on). + +discard_granularity (RO) +----------------------- +This shows the size of internal allocation of the device in bytes, if +reported by the device. A value of '0' means device does not support +the discard functionality. + +discard_max_bytes (RO) +---------------------- +Devices that support discard functionality may have internal limits on +the number of bytes that can be trimmed or unmapped in a single operation. +The discard_max_bytes parameter is set by the device driver to the maximum +number of bytes that can be discarded in a single operation. Discard +requests issued to the device must not exceed this limit. A discard_max_bytes +value of 0 means that the device does not support discard functionality. + +discard_zeroes_data (RO) +------------------------ +When read, this file will show if the discarded block are zeroed by the +device or not. If its value is '1' the blocks are zeroed otherwise not. +  hw_sector_size (RO)  -------------------  This is the hardware sector size of the device, in bytes. +iostats (RW) +------------- +This file is used to control (on/off) the iostats accounting of the +disk. + +logical_block_size (RO) +----------------------- +This is the logcal block size of the device, in bytes. +  max_hw_sectors_kb (RO)  ----------------------  This is the maximum number of kilobytes supported in a single data transfer. +max_integrity_segments (RO) +--------------------------- +When read, this file shows the max limit of integrity segments as +set by block layer which a hardware controller can handle. +  max_sectors_kb (RW)  -------------------  This is the maximum number of kilobytes that the block layer will allow  for a filesystem request. Must be smaller than or equal to the maximum  size allowed by the hardware. +max_segments (RO) +----------------- +Maximum number of segments of the device. + +max_segment_size (RO) +--------------------- +Maximum segment size of the device. + +minimum_io_size (RO) +-------------------- +This is the smallest preferred io size reported by the device. +  nomerges (RW)  -------------  This enables the user to disable the lookup logic involved with IO @@ -45,11 +96,24 @@ per-block-cgroup request pool.  IOW, if there are N block cgroups,  each request queue may have upto N request pools, each independently  regulated by nr_requests. +optimal_io_size (RO) +-------------------- +This is the optimal io size reported by the device. + +physical_block_size (RO) +------------------------ +This is the physical block size of device, in bytes. +  read_ahead_kb (RW)  ------------------  Maximum number of kilobytes to read-ahead for filesystems on this block  device. +rotational (RW) +--------------- +This file is used to stat if the device is of rotational type or +non-rotational type. +  rq_affinity (RW)  ----------------  If this option is '1', the block layer will migrate request completions to the diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt index 70cd49b1caa..1dd622546d0 100644 --- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt +++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt @@ -10,8 +10,8 @@ Required properties:  - compatible : Should be "fsl,<chip>-esdhc"  Optional properties: -- fsl,cd-internal : Indicate to use controller internal card detection -- fsl,wp-internal : Indicate to use controller internal write protection +- fsl,cd-controller : Indicate to use controller internal card detection +- fsl,wp-controller : Indicate to use controller internal write protection  Examples: @@ -19,8 +19,8 @@ esdhc@70004000 {  	compatible = "fsl,imx51-esdhc";  	reg = <0x70004000 0x4000>;  	interrupts = <1>; -	fsl,cd-internal; -	fsl,wp-internal; +	fsl,cd-controller; +	fsl,wp-controller;  };  esdhc@70008000 { diff --git a/Documentation/devicetree/bindings/regulator/tps6586x.txt b/Documentation/devicetree/bindings/regulator/tps6586x.txt index d156e1b5db1..da80c2ae091 100644 --- a/Documentation/devicetree/bindings/regulator/tps6586x.txt +++ b/Documentation/devicetree/bindings/regulator/tps6586x.txt @@ -9,9 +9,9 @@ Required properties:  - regulators: list of regulators provided by this controller, must have    property "regulator-compatible" to match their hardware counterparts:    sm[0-2], ldo[0-9] and ldo_rtc -- sm0-supply: The input supply for the SM0. -- sm1-supply: The input supply for the SM1. -- sm2-supply: The input supply for the SM2. +- vin-sm0-supply: The input supply for the SM0. +- vin-sm1-supply: The input supply for the SM1. +- vin-sm2-supply: The input supply for the SM2.  - vinldo01-supply: The input supply for the LDO1 and LDO2  - vinldo23-supply: The input supply for the LDO2 and LDO3  - vinldo4-supply: The input supply for the LDO4 @@ -30,9 +30,9 @@ Example:  		#gpio-cells = <2>;  		gpio-controller; -		sm0-supply = <&some_reg>; -		sm1-supply = <&some_reg>; -		sm2-supply = <&some_reg>; +		vin-sm0-supply = <&some_reg>; +		vin-sm1-supply = <&some_reg>; +		vin-sm2-supply = <&some_reg>;  		vinldo01-supply = <...>;  		vinldo23-supply = <...>;  		vinldo4-supply = <...>; diff --git a/Documentation/filesystems/vfat.txt b/Documentation/filesystems/vfat.txt index ead764b2728..de1e6c4dccf 100644 --- a/Documentation/filesystems/vfat.txt +++ b/Documentation/filesystems/vfat.txt @@ -137,6 +137,17 @@ errors=panic|continue|remount-ro  		 without doing anything or remount the partition in  		 read-only mode (default behavior). +discard       -- If set, issues discard/TRIM commands to the block +		 device when blocks are freed. This is useful for SSD devices +		 and sparse/thinly-provisoned LUNs. + +nfs           -- This option maintains an index (cache) of directory +		 inodes by i_logstart which is used by the nfs-related code to +		 improve look-ups. + +		 Enable this only if you want to export the FAT filesystem +		 over NFS +  <bool>: 0,1,yes,no,true,false  TODO diff --git a/Documentation/networking/netconsole.txt b/Documentation/networking/netconsole.txt index 8d022073e3e..2e9e0ae2cd4 100644 --- a/Documentation/networking/netconsole.txt +++ b/Documentation/networking/netconsole.txt @@ -51,8 +51,23 @@ Built-in netconsole starts immediately after the TCP stack is  initialized and attempts to bring up the supplied dev at the supplied  address. -The remote host can run either 'netcat -u -l -p <port>', -'nc -l -u <port>' or syslogd. +The remote host has several options to receive the kernel messages, +for example: + +1) syslogd + +2) netcat + +   On distributions using a BSD-based netcat version (e.g. Fedora, +   openSUSE and Ubuntu) the listening port must be specified without +   the -p switch: + +   'nc -u -l -p <port>' / 'nc -u -l <port>' or +   'netcat -u -l -p <port>' / 'netcat -u -l <port>' + +3) socat + +   'socat udp-recv:<port> -'  Dynamic reconfiguration:  ======================== diff --git a/Documentation/pinctrl.txt b/Documentation/pinctrl.txt index e40f4b4e197..1479aca2374 100644 --- a/Documentation/pinctrl.txt +++ b/Documentation/pinctrl.txt @@ -840,9 +840,9 @@ static unsigned long i2c_pin_configs[] = {  static struct pinctrl_map __initdata mapping[] = {  	PIN_MAP_MUX_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", "i2c0"), -	PIN_MAP_MUX_CONFIGS_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", i2c_grp_configs), -	PIN_MAP_MUX_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0scl", i2c_pin_configs), -	PIN_MAP_MUX_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0sda", i2c_pin_configs), +	PIN_MAP_CONFIGS_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", i2c_grp_configs), +	PIN_MAP_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0scl", i2c_pin_configs), +	PIN_MAP_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0sda", i2c_pin_configs),  };  Finally, some devices expect the mapping table to contain certain specific diff --git a/Documentation/vm/hugetlbpage.txt b/Documentation/vm/hugetlbpage.txt index f8551b3879f..4ac359b7aa1 100644 --- a/Documentation/vm/hugetlbpage.txt +++ b/Documentation/vm/hugetlbpage.txt @@ -299,11 +299,17 @@ map_hugetlb.c.  *******************************************************************  /* - * hugepage-shm:  see Documentation/vm/hugepage-shm.c + * map_hugetlb: see tools/testing/selftests/vm/map_hugetlb.c   */  *******************************************************************  /* - * hugepage-mmap:  see Documentation/vm/hugepage-mmap.c + * hugepage-shm:  see tools/testing/selftests/vm/hugepage-shm.c + */ + +******************************************************************* + +/* + * hugepage-mmap:  see tools/testing/selftests/vm/hugepage-mmap.c   */ diff --git a/Documentation/w1/slaves/w1_therm b/Documentation/w1/slaves/w1_therm index 0403aaaba87..874a8ca93fe 100644 --- a/Documentation/w1/slaves/w1_therm +++ b/Documentation/w1/slaves/w1_therm @@ -3,6 +3,7 @@ Kernel driver w1_therm  Supported chips:    * Maxim ds18*20 based temperature sensors. +  * Maxim ds1825 based temperature sensors.  Author: Evgeniy Polyakov <johnpol@2ka.mipt.ru> @@ -15,6 +16,7 @@ supported family codes:  W1_THERM_DS18S20	0x10  W1_THERM_DS1822		0x22  W1_THERM_DS18B20	0x28 +W1_THERM_DS1825		0x3B  Support is provided through the sysfs w1_slave file.  Each open and  read sequence will initiate a temperature conversion then provide two diff --git a/MAINTAINERS b/MAINTAINERS index 61ad79ea2b0..fdc0119963e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2217,7 +2217,7 @@ S:	Maintained  F:	drivers/scsi/tmscsim.*  DC395x SCSI driver -M:	Oliver Neukum <oliver@neukum.name> +M:	Oliver Neukum <oliver@neukum.org>  M:	Ali Akcaagac <aliakc@web.de>  M:	Jamie Lenehan <lenehan@twibble.org>  W:	http://twibble.org/dist/dc395x/ @@ -4537,7 +4537,7 @@ S:	Supported  F:	arch/microblaze/  MICROTEK X6 SCANNER -M:	Oliver Neukum <oliver@neukum.name> +M:	Oliver Neukum <oliver@neukum.org>  S:	Maintained  F:	drivers/usb/image/microtek.* @@ -7076,7 +7076,7 @@ F:	include/linux/mtd/ubi.h  F:	include/mtd/ubi-user.h  USB ACM DRIVER -M:	Oliver Neukum <oliver@neukum.name> +M:	Oliver Neukum <oliver@neukum.org>  L:	linux-usb@vger.kernel.org  S:	Maintained  F:	Documentation/usb/acm.txt @@ -7097,7 +7097,7 @@ S:	Supported  F:	drivers/block/ub.c  USB CDC ETHERNET DRIVER -M:	Oliver Neukum <oliver@neukum.name> +M:	Oliver Neukum <oliver@neukum.org>  L:	linux-usb@vger.kernel.org  S:	Maintained  F:	drivers/net/usb/cdc_*.c @@ -7170,7 +7170,7 @@ F:	drivers/usb/host/isp116x*  F:	include/linux/usb/isp116x.h  USB KAWASAKI LSI DRIVER -M:	Oliver Neukum <oliver@neukum.name> +M:	Oliver Neukum <oliver@neukum.org>  L:	linux-usb@vger.kernel.org  S:	Maintained  F:	drivers/usb/serial/kl5kusb105.* @@ -7288,6 +7288,12 @@ W:	http://www.connecttech.com  S:	Supported  F:	drivers/usb/serial/whiteheat* +USB SMSC75XX ETHERNET DRIVER +M:	Steve Glendinning <steve.glendinning@shawell.net> +L:	netdev@vger.kernel.org +S:	Maintained +F:	drivers/net/usb/smsc75xx.* +  USB SMSC95XX ETHERNET DRIVER  M:	Steve Glendinning <steve.glendinning@shawell.net>  L:	netdev@vger.kernel.org @@ -7670,23 +7676,28 @@ S:	Supported  F:	Documentation/hwmon/wm83??  F:	arch/arm/mach-s3c64xx/mach-crag6410*  F:	drivers/clk/clk-wm83*.c +F:	drivers/extcon/extcon-arizona.c  F:	drivers/leds/leds-wm83*.c  F:	drivers/gpio/gpio-*wm*.c +F:	drivers/gpio/gpio-arizona.c  F:	drivers/hwmon/wm83??-hwmon.c  F:	drivers/input/misc/wm831x-on.c  F:	drivers/input/touchscreen/wm831x-ts.c  F:	drivers/input/touchscreen/wm97*.c -F:	drivers/mfd/wm8*.c +F:	drivers/mfd/arizona* +F:	drivers/mfd/wm*.c  F:	drivers/power/wm83*.c  F:	drivers/rtc/rtc-wm83*.c  F:	drivers/regulator/wm8*.c  F:	drivers/video/backlight/wm83*_bl.c  F:	drivers/watchdog/wm83*_wdt.c +F:	include/linux/mfd/arizona/  F:	include/linux/mfd/wm831x/  F:	include/linux/mfd/wm8350/  F:	include/linux/mfd/wm8400*  F:	include/linux/wm97xx.h  F:	include/sound/wm????.h +F:	sound/soc/codecs/arizona.?  F:	sound/soc/codecs/wm*  WORKQUEUE @@ -1,7 +1,7 @@  VERSION = 3  PATCHLEVEL = 6  SUBLEVEL = 0 -EXTRAVERSION = -rc2 +EXTRAVERSION = -rc3  NAME = Saber-toothed Squirrel  # *DOCUMENTATION* diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index d5b9b5e645c..9944dedee5b 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -18,6 +18,8 @@ config ALPHA  	select ARCH_HAVE_NMI_SAFE_CMPXCHG  	select GENERIC_SMP_IDLE_THREAD  	select GENERIC_CMOS_UPDATE +	select GENERIC_STRNCPY_FROM_USER +	select GENERIC_STRNLEN_USER  	help  	  The Alpha is a 64-bit general-purpose processor designed and  	  marketed by the Digital Equipment Corporation of blessed memory, diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 3bb7ffeae3b..c2cbe4fc391 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -14,8 +14,8 @@   */ -#define ATOMIC_INIT(i)		( (atomic_t) { (i) } ) -#define ATOMIC64_INIT(i)	( (atomic64_t) { (i) } ) +#define ATOMIC_INIT(i)		{ (i) } +#define ATOMIC64_INIT(i)	{ (i) }  #define atomic_read(v)		(*(volatile int *)&(v)->counter)  #define atomic64_read(v)	(*(volatile long *)&(v)->counter) diff --git a/arch/alpha/include/asm/fpu.h b/arch/alpha/include/asm/fpu.h index db00f7885fa..e477bcd5b94 100644 --- a/arch/alpha/include/asm/fpu.h +++ b/arch/alpha/include/asm/fpu.h @@ -1,7 +1,9 @@  #ifndef __ASM_ALPHA_FPU_H  #define __ASM_ALPHA_FPU_H +#ifdef __KERNEL__  #include <asm/special_insns.h> +#endif  /*   * Alpha floating-point control register defines: diff --git a/arch/alpha/include/asm/ptrace.h b/arch/alpha/include/asm/ptrace.h index fd698a174f2..b87755a1955 100644 --- a/arch/alpha/include/asm/ptrace.h +++ b/arch/alpha/include/asm/ptrace.h @@ -76,7 +76,10 @@ struct switch_stack {  #define task_pt_regs(task) \    ((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1) -#define force_successful_syscall_return() (task_pt_regs(current)->r0 = 0) +#define current_pt_regs() \ +  ((struct pt_regs *) ((char *)current_thread_info() + 2*PAGE_SIZE) - 1) + +#define force_successful_syscall_return() (current_pt_regs()->r0 = 0)  #endif diff --git a/arch/alpha/include/asm/socket.h b/arch/alpha/include/asm/socket.h index dcb221a4b5b..7d2f75be932 100644 --- a/arch/alpha/include/asm/socket.h +++ b/arch/alpha/include/asm/socket.h @@ -76,9 +76,11 @@  /* Instruct lower device to use last 4-bytes of skb data as FCS */  #define SO_NOFCS		43 +#ifdef __KERNEL__  /* O_NONBLOCK clashes with the bits used for socket types.  Therefore we   * have to define SOCK_NONBLOCK to a different value here.   */  #define SOCK_NONBLOCK	0x40000000 +#endif /* __KERNEL__ */  #endif /* _ASM_SOCKET_H */ diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index b49ec2f8d6e..766fdfde2b7 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -433,36 +433,12 @@ clear_user(void __user *to, long len)  #undef __module_address  #undef __module_call -/* Returns: -EFAULT if exception before terminator, N if the entire -   buffer filled, else strlen.  */ +#define user_addr_max() \ +        (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) -extern long __strncpy_from_user(char *__to, const char __user *__from, long __to_len); - -extern inline long -strncpy_from_user(char *to, const char __user *from, long n) -{ -	long ret = -EFAULT; -	if (__access_ok((unsigned long)from, 0, get_fs())) -		ret = __strncpy_from_user(to, from, n); -	return ret; -} - -/* Returns: 0 if bad, string length+1 (memory size) of string if ok */ -extern long __strlen_user(const char __user *); - -extern inline long strlen_user(const char __user *str) -{ -	return access_ok(VERIFY_READ,str,0) ? __strlen_user(str) : 0; -} - -/* Returns: 0 if exception before NUL or reaching the supplied limit (N), - * a value greater than N if the limit would be exceeded, else strlen.  */ -extern long __strnlen_user(const char __user *, long); - -extern inline long strnlen_user(const char __user *str, long n) -{ -	return access_ok(VERIFY_READ,str,0) ? __strnlen_user(str, n) : 0; -} +extern long strncpy_from_user(char *dest, const char __user *src, long count); +extern __must_check long strlen_user(const char __user *str); +extern __must_check long strnlen_user(const char __user *str, long n);  /*   * About the exception table: diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h index 633b23b0664..a31a78eac9b 100644 --- a/arch/alpha/include/asm/unistd.h +++ b/arch/alpha/include/asm/unistd.h @@ -465,10 +465,12 @@  #define __NR_setns			501  #define __NR_accept4			502  #define __NR_sendmmsg			503 +#define __NR_process_vm_readv		504 +#define __NR_process_vm_writev		505  #ifdef __KERNEL__ -#define NR_SYSCALLS			504 +#define NR_SYSCALLS			506  #define __ARCH_WANT_OLD_READDIR  #define __ARCH_WANT_STAT64 diff --git a/arch/alpha/include/asm/word-at-a-time.h b/arch/alpha/include/asm/word-at-a-time.h new file mode 100644 index 00000000000..6b340d0f152 --- /dev/null +++ b/arch/alpha/include/asm/word-at-a-time.h @@ -0,0 +1,55 @@ +#ifndef _ASM_WORD_AT_A_TIME_H +#define _ASM_WORD_AT_A_TIME_H + +#include <asm/compiler.h> + +/* + * word-at-a-time interface for Alpha. + */ + +/* + * We do not use the word_at_a_time struct on Alpha, but it needs to be + * implemented to humour the generic code. + */ +struct word_at_a_time { +	const unsigned long unused; +}; + +#define WORD_AT_A_TIME_CONSTANTS { 0 } + +/* Return nonzero if val has a zero */ +static inline unsigned long has_zero(unsigned long val, unsigned long *bits, const struct word_at_a_time *c) +{ +	unsigned long zero_locations = __kernel_cmpbge(0, val); +	*bits = zero_locations; +	return zero_locations; +} + +static inline unsigned long prep_zero_mask(unsigned long val, unsigned long bits, const struct word_at_a_time *c) +{ +	return bits; +} + +#define create_zero_mask(bits) (bits) + +static inline unsigned long find_zero(unsigned long bits) +{ +#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) +	/* Simple if have CIX instructions */ +	return __kernel_cttz(bits); +#else +	unsigned long t1, t2, t3; +	/* Retain lowest set bit only */ +	bits &= -bits; +	/* Binary search for lowest set bit */ +	t1 = bits & 0xf0; +	t2 = bits & 0xcc; +	t3 = bits & 0xaa; +	if (t1) t1 = 4; +	if (t2) t2 = 2; +	if (t3) t3 = 1; +	return t1 + t2 + t3; +#endif +} + +#endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c index d96e742d4dc..15fa821d09c 100644 --- a/arch/alpha/kernel/alpha_ksyms.c +++ b/arch/alpha/kernel/alpha_ksyms.c @@ -52,7 +52,6 @@ EXPORT_SYMBOL(alpha_write_fp_reg_s);  /* entry.S */  EXPORT_SYMBOL(kernel_thread); -EXPORT_SYMBOL(kernel_execve);  /* Networking helper routines. */  EXPORT_SYMBOL(csum_tcpudp_magic); @@ -74,8 +73,6 @@ EXPORT_SYMBOL(alpha_fp_emul);   */  EXPORT_SYMBOL(__copy_user);  EXPORT_SYMBOL(__do_clear_user); -EXPORT_SYMBOL(__strncpy_from_user); -EXPORT_SYMBOL(__strnlen_user);  /*    * SMP-specific symbols. diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S index 6d159cee5f2..ec0da0567ab 100644 --- a/arch/alpha/kernel/entry.S +++ b/arch/alpha/kernel/entry.S @@ -663,58 +663,6 @@ kernel_thread:  	br	ret_to_kernel  .end kernel_thread -/* - * kernel_execve(path, argv, envp) - */ -	.align	4 -	.globl	kernel_execve -	.ent	kernel_execve -kernel_execve: -	/* We can be called from a module.  */ -	ldgp	$gp, 0($27) -	lda	$sp, -(32+SIZEOF_PT_REGS+8)($sp) -	.frame	$sp, 32+SIZEOF_PT_REGS+8, $26, 0 -	stq	$26, 0($sp) -	stq	$16, 8($sp) -	stq	$17, 16($sp) -	stq	$18, 24($sp) -	.prologue 1 - -	lda	$16, 32($sp) -	lda	$17, 0 -	lda	$18, SIZEOF_PT_REGS -	bsr	$26, memset		!samegp - -	/* Avoid the HAE being gratuitously wrong, which would cause us -	   to do the whole turn off interrupts thing and restore it.  */ -	ldq	$2, alpha_mv+HAE_CACHE -	stq	$2, 152+32($sp) - -	ldq	$16, 8($sp) -	ldq	$17, 16($sp) -	ldq	$18, 24($sp) -	lda	$19, 32($sp) -	bsr	$26, do_execve		!samegp - -	ldq	$26, 0($sp) -	bne	$0, 1f			/* error! */ - -	/* Move the temporary pt_regs struct from its current location -	   to the top of the kernel stack frame.  See copy_thread for -	   details for a normal process.  */ -	lda	$16, 0x4000 - SIZEOF_PT_REGS($8) -	lda	$17, 32($sp) -	lda	$18, SIZEOF_PT_REGS -	bsr	$26, memmove		!samegp - -	/* Take that over as our new stack frame and visit userland!  */ -	lda	$sp, 0x4000 - SIZEOF_PT_REGS($8) -	br	$31, ret_from_sys_call - -1:	lda	$sp, 32+SIZEOF_PT_REGS+8($sp) -	ret -.end kernel_execve -  /*   * Special system calls.  Most of these are special in that they either @@ -797,115 +745,6 @@ sys_rt_sigreturn:  .end sys_rt_sigreturn  	.align	4 -	.globl	sys_sethae -	.ent	sys_sethae -sys_sethae: -	.prologue 0 -	stq	$16, 152($sp) -	ret -.end sys_sethae - -	.align	4 -	.globl	osf_getpriority -	.ent	osf_getpriority -osf_getpriority: -	lda	$sp, -16($sp) -	stq	$26, 0($sp) -	.prologue 0 - -	jsr	$26, sys_getpriority - -	ldq	$26, 0($sp) -	blt	$0, 1f - -	/* Return value is the unbiased priority, i.e. 20 - prio. -	   This does result in negative return values, so signal -	   no error by writing into the R0 slot.  */ -	lda	$1, 20 -	stq	$31, 16($sp) -	subl	$1, $0, $0 -	unop - -1:	lda	$sp, 16($sp) -	ret -.end osf_getpriority - -	.align	4 -	.globl	sys_getxuid -	.ent	sys_getxuid -sys_getxuid: -	.prologue 0 -	ldq	$2, TI_TASK($8) -	ldq	$3, TASK_CRED($2) -	ldl	$0, CRED_UID($3) -	ldl	$1, CRED_EUID($3) -	stq	$1, 80($sp) -	ret -.end sys_getxuid - -	.align	4 -	.globl	sys_getxgid -	.ent	sys_getxgid -sys_getxgid: -	.prologue 0 -	ldq	$2, TI_TASK($8) -	ldq	$3, TASK_CRED($2) -	ldl	$0, CRED_GID($3) -	ldl	$1, CRED_EGID($3) -	stq	$1, 80($sp) -	ret -.end sys_getxgid - -	.align	4 -	.globl	sys_getxpid -	.ent	sys_getxpid -sys_getxpid: -	.prologue 0 -	ldq	$2, TI_TASK($8) - -	/* See linux/kernel/timer.c sys_getppid for discussion -	   about this loop.  */ -	ldq	$3, TASK_GROUP_LEADER($2) -	ldq	$4, TASK_REAL_PARENT($3) -	ldl	$0, TASK_TGID($2) -1:	ldl	$1, TASK_TGID($4) -#ifdef CONFIG_SMP -	mov	$4, $5 -	mb -	ldq	$3, TASK_GROUP_LEADER($2) -	ldq	$4, TASK_REAL_PARENT($3) -	cmpeq	$4, $5, $5 -	beq	$5, 1b -#endif -	stq	$1, 80($sp) -	ret -.end sys_getxpid - -	.align	4 -	.globl	sys_alpha_pipe -	.ent	sys_alpha_pipe -sys_alpha_pipe: -	lda	$sp, -16($sp) -	stq	$26, 0($sp) -	.prologue 0 - -	mov	$31, $17 -	lda	$16, 8($sp) -	jsr	$26, do_pipe_flags - -	ldq	$26, 0($sp) -	bne	$0, 1f - -	/* The return values are in $0 and $20.  */ -	ldl	$1, 12($sp) -	ldl	$0, 8($sp) - -	stq	$1, 80+16($sp) -1:	lda	$sp, 16($sp) -	ret -.end sys_alpha_pipe - -	.align	4  	.globl	sys_execve  	.ent	sys_execve  sys_execve: diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 98a103621af..bc1acdda7a5 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -1404,3 +1404,52 @@ SYSCALL_DEFINE3(osf_writev, unsigned long, fd,  }  #endif + +SYSCALL_DEFINE2(osf_getpriority, int, which, int, who) +{ +	int prio = sys_getpriority(which, who); +	if (prio >= 0) { +		/* Return value is the unbiased priority, i.e. 20 - prio. +		   This does result in negative return values, so signal +		   no error */ +		force_successful_syscall_return(); +		prio = 20 - prio; +	} +	return prio; +} + +SYSCALL_DEFINE0(getxuid) +{ +	current_pt_regs()->r20 = sys_geteuid(); +	return sys_getuid(); +} + +SYSCALL_DEFINE0(getxgid) +{ +	current_pt_regs()->r20 = sys_getegid(); +	return sys_getgid(); +} + +SYSCALL_DEFINE0(getxpid) +{ +	current_pt_regs()->r20 = sys_getppid(); +	return sys_getpid(); +} + +SYSCALL_DEFINE0(alpha_pipe) +{ +	int fd[2]; +	int res = do_pipe_flags(fd, 0); +	if (!res) { +		/* The return values are in $0 and $20.  */ +		current_pt_regs()->r20 = fd[1]; +		res = fd[0]; +	} +	return res; +} + +SYSCALL_DEFINE1(sethae, unsigned long, val) +{ +	current_pt_regs()->hae = val; +	return 0; +} diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 153d3fce3e8..d6fde98b74b 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -455,3 +455,22 @@ get_wchan(struct task_struct *p)  	}  	return pc;  } + +int kernel_execve(const char *path, const char *const argv[], const char *const envp[]) +{ +	/* Avoid the HAE being gratuitously wrong, which would cause us +	   to do the whole turn off interrupts thing and restore it.  */ +	struct pt_regs regs = {.hae = alpha_mv.hae_cache}; +	int err = do_execve(path, argv, envp, ®s); +	if (!err) { +		struct pt_regs *p = current_pt_regs(); +		/* copy regs to normal position and off to userland we go... */ +		*p = regs; +		__asm__ __volatile__ ( +			"mov	%0, $sp;" +			"br	$31, ret_from_sys_call" +			: : "r"(p)); +	} +	return err; +} +EXPORT_SYMBOL(kernel_execve); diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S index 87835235f11..2ac6b45c3e0 100644 --- a/arch/alpha/kernel/systbls.S +++ b/arch/alpha/kernel/systbls.S @@ -111,7 +111,7 @@ sys_call_table:  	.quad sys_socket  	.quad sys_connect  	.quad sys_accept -	.quad osf_getpriority			/* 100 */ +	.quad sys_osf_getpriority			/* 100 */  	.quad sys_send  	.quad sys_recv  	.quad sys_sigreturn @@ -522,6 +522,8 @@ sys_call_table:  	.quad sys_setns  	.quad sys_accept4  	.quad sys_sendmmsg +	.quad sys_process_vm_readv +	.quad sys_process_vm_writev		/* 505 */  	.size sys_call_table, . - sys_call_table  	.type sys_call_table, @object diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile index c0a83ab62b7..59660743237 100644 --- a/arch/alpha/lib/Makefile +++ b/arch/alpha/lib/Makefile @@ -31,8 +31,6 @@ lib-y =	__divqu.o __remqu.o __divlu.o __remlu.o \  	$(ev6-y)memchr.o \  	$(ev6-y)copy_user.o \  	$(ev6-y)clear_user.o \ -	$(ev6-y)strncpy_from_user.o \ -	$(ev67-y)strlen_user.o \  	$(ev6-y)csum_ipv6_magic.o \  	$(ev6-y)clear_page.o \  	$(ev6-y)copy_page.o \ diff --git a/arch/alpha/lib/ev6-strncpy_from_user.S b/arch/alpha/lib/ev6-strncpy_from_user.S deleted file mode 100644 index d2e28178cac..00000000000 --- a/arch/alpha/lib/ev6-strncpy_from_user.S +++ /dev/null @@ -1,424 +0,0 @@ -/* - * arch/alpha/lib/ev6-strncpy_from_user.S - * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com> - * - * Just like strncpy except in the return value: - * - * -EFAULT       if an exception occurs before the terminator is copied. - * N             if the buffer filled. - * - * Otherwise the length of the string is returned. - * - * Much of the information about 21264 scheduling/coding comes from: - *	Compiler Writer's Guide for the Alpha 21264 - *	abbreviated as 'CWG' in other comments here - *	ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html - * Scheduling notation: - *	E	- either cluster - *	U	- upper subcluster; U0 - subcluster U0; U1 - subcluster U1 - *	L	- lower subcluster; L0 - subcluster L0; L1 - subcluster L1 - * A bunch of instructions got moved and temp registers were changed - * to aid in scheduling.  Control flow was also re-arranged to eliminate - * branches, and to provide longer code sequences to enable better scheduling. - * A total rewrite (using byte load/stores for start & tail sequences) - * is desirable, but very difficult to do without a from-scratch rewrite. - * Save that for the future. - */ - - -#include <asm/errno.h> -#include <asm/regdef.h> - - -/* Allow an exception for an insn; exit if we get one.  */ -#define EX(x,y...)			\ -	99: x,##y;			\ -	.section __ex_table,"a";	\ -	.long 99b - .;			\ -	lda $31, $exception-99b($0); 	\ -	.previous - - -	.set noat -	.set noreorder -	.text - -	.globl __strncpy_from_user -	.ent __strncpy_from_user -	.frame $30, 0, $26 -	.prologue 0 - -	.align 4 -__strncpy_from_user: -	and	a0, 7, t3	# E : find dest misalignment -	beq	a2, $zerolength	# U : - -	/* Are source and destination co-aligned?  */ -	mov	a0, v0		# E : save the string start -	xor	a0, a1, t4	# E : -	EX( ldq_u t1, 0(a1) )	# L : Latency=3 load first quadword -	ldq_u	t0, 0(a0)	# L : load first (partial) aligned dest quadword - -	addq	a2, t3, a2	# E : bias count by dest misalignment -	subq	a2, 1, a3	# E : -	addq	zero, 1, t10	# E : -	and	t4, 7, t4	# E : misalignment between the two - -	and	a3, 7, t6	# E : number of tail bytes -	sll	t10, t6, t10	# E : t10 = bitmask of last count byte -	bne	t4, $unaligned	# U : -	lda	t2, -1		# E : build a mask against false zero - -	/* -	 * We are co-aligned; take care of a partial first word. -	 * On entry to this basic block: -	 * t0 == the first destination word for masking back in -	 * t1 == the first source word. -	 */ - -	srl	a3, 3, a2	# E : a2 = loop counter = (count - 1)/8 -	addq	a1, 8, a1	# E : -	mskqh	t2, a1, t2	# U :   detection in the src word -	nop - -	/* Create the 1st output word and detect 0's in the 1st input word.  */ -	mskqh	t1, a1, t3	# U : -	mskql	t0, a1, t0	# U : assemble the first output word -	ornot	t1, t2, t2	# E : -	nop - -	cmpbge	zero, t2, t8	# E : bits set iff null found -	or	t0, t3, t0	# E : -	beq	a2, $a_eoc	# U : -	bne	t8, $a_eos	# U : 2nd branch in a quad.  Bad. - -	/* On entry to this basic block: -	 * t0 == a source quad not containing a null. -	 * a0 - current aligned destination address -	 * a1 - current aligned source address -	 * a2 - count of quadwords to move. -	 * NOTE: Loop improvement - unrolling this is going to be -	 *	a huge win, since we're going to stall otherwise. -	 *	Fix this later.  For _really_ large copies, look -	 *	at using wh64 on a look-ahead basis.  See the code -	 *	in clear_user.S and copy_user.S. -	 * Presumably, since (a0) and (a1) do not overlap (by C definition) -	 * Lots of nops here: -	 *	- Separate loads from stores -	 *	- Keep it to 1 branch/quadpack so the branch predictor -	 *	  can train. -	 */ -$a_loop: -	stq_u	t0, 0(a0)	# L : -	addq	a0, 8, a0	# E : -	nop -	subq	a2, 1, a2	# E : - -	EX( ldq_u t0, 0(a1) )	# L : -	addq	a1, 8, a1	# E : -	cmpbge	zero, t0, t8	# E : Stall 2 cycles on t0 -	beq	a2, $a_eoc      # U : - -	beq	t8, $a_loop	# U : -	nop -	nop -	nop - -	/* Take care of the final (partial) word store.  At this point -	 * the end-of-count bit is set in t8 iff it applies. -	 * -	 * On entry to this basic block we have: -	 * t0 == the source word containing the null -	 * t8 == the cmpbge mask that found it. -	 */ -$a_eos: -	negq	t8, t12		# E : find low bit set -	and	t8, t12, t12	# E :  - -	/* We're doing a partial word store and so need to combine -	   our source and original destination words.  */ -	ldq_u	t1, 0(a0)	# L : -	subq	t12, 1, t6	# E : - -	or	t12, t6, t8	# E : -	zapnot	t0, t8, t0	# U : clear src bytes > null -	zap	t1, t8, t1	# U : clear dst bytes <= null -	or	t0, t1, t0	# E : - -	stq_u	t0, 0(a0)	# L : -	br	$finish_up	# L0 : -	nop -	nop - -	/* Add the end-of-count bit to the eos detection bitmask.  */ -	.align 4 -$a_eoc: -	or	t10, t8, t8 -	br	$a_eos -	nop -	nop - - -/* The source and destination are not co-aligned.  Align the destination -   and cope.  We have to be very careful about not reading too much and -   causing a SEGV.  */ - -	.align 4 -$u_head: -	/* We know just enough now to be able to assemble the first -	   full source word.  We can still find a zero at the end of it -	   that prevents us from outputting the whole thing. - -	   On entry to this basic block: -	   t0 == the first dest word, unmasked -	   t1 == the shifted low bits of the first source word -	   t6 == bytemask that is -1 in dest word bytes */ - -	EX( ldq_u t2, 8(a1) )	# L : load second src word -	addq	a1, 8, a1	# E : -	mskql	t0, a0, t0	# U : mask trailing garbage in dst -	extqh	t2, a1, t4	# U : - -	or	t1, t4, t1	# E : first aligned src word complete -	mskqh	t1, a0, t1	# U : mask leading garbage in src -	or	t0, t1, t0	# E : first output word complete -	or	t0, t6, t6	# E : mask original data for zero test - -	cmpbge	zero, t6, t8	# E : -	beq	a2, $u_eocfin	# U : -	bne	t8, $u_final	# U : bad news - 2nd branch in a quad -	lda	t6, -1		# E : mask out the bits we have - -	mskql	t6, a1, t6	# U :   already seen -	stq_u	t0, 0(a0)	# L : store first output word -	or      t6, t2, t2	# E : -	cmpbge	zero, t2, t8	# E : find nulls in second partial - -	addq	a0, 8, a0		# E : -	subq	a2, 1, a2		# E : -	bne	t8, $u_late_head_exit	# U : -	nop - -	/* Finally, we've got all the stupid leading edge cases taken care -	   of and we can set up to enter the main loop.  */ - -	extql	t2, a1, t1	# U : position hi-bits of lo word -	EX( ldq_u t2, 8(a1) )	# L : read next high-order source word -	addq	a1, 8, a1	# E : -	cmpbge	zero, t2, t8	# E : - -	beq	a2, $u_eoc	# U : -	bne	t8, $u_eos	# U : -	nop -	nop - -	/* Unaligned copy main loop.  In order to avoid reading too much, -	   the loop is structured to detect zeros in aligned source words. -	   This has, unfortunately, effectively pulled half of a loop -	   iteration out into the head and half into the tail, but it does -	   prevent nastiness from accumulating in the very thing we want -	   to run as fast as possible. - -	   On entry to this basic block: -	   t1 == the shifted high-order bits from the previous source word -	   t2 == the unshifted current source word - -	   We further know that t2 does not contain a null terminator.  */ - -	/* -	 * Extra nops here: -	 *	separate load quads from store quads -	 *	only one branch/quad to permit predictor training -	 */ - -	.align 4 -$u_loop: -	extqh	t2, a1, t0	# U : extract high bits for current word -	addq	a1, 8, a1	# E : -	extql	t2, a1, t3	# U : extract low bits for next time -	addq	a0, 8, a0	# E : - -	or	t0, t1, t0	# E : current dst word now complete -	EX( ldq_u t2, 0(a1) )	# L : load high word for next time -	subq	a2, 1, a2	# E : -	nop - -	stq_u	t0, -8(a0)	# L : save the current word -	mov	t3, t1		# E : -	cmpbge	zero, t2, t8	# E : test new word for eos -	beq	a2, $u_eoc	# U : - -	beq	t8, $u_loop	# U : -	nop -	nop -	nop - -	/* We've found a zero somewhere in the source word we just read. -	   If it resides in the lower half, we have one (probably partial) -	   word to write out, and if it resides in the upper half, we -	   have one full and one partial word left to write out. - -	   On entry to this basic block: -	   t1 == the shifted high-order bits from the previous source word -	   t2 == the unshifted current source word.  */ -	.align 4 -$u_eos: -	extqh	t2, a1, t0	# U : -	or	t0, t1, t0	# E : first (partial) source word complete -	cmpbge	zero, t0, t8	# E : is the null in this first bit? -	nop - -	bne	t8, $u_final	# U : -	stq_u	t0, 0(a0)	# L : the null was in the high-order bits -	addq	a0, 8, a0	# E : -	subq	a2, 1, a2	# E : - -	.align 4 -$u_late_head_exit: -	extql	t2, a1, t0	# U : -	cmpbge	zero, t0, t8	# E : -	or	t8, t10, t6	# E : -	cmoveq	a2, t6, t8	# E : - -	/* Take care of a final (probably partial) result word. -	   On entry to this basic block: -	   t0 == assembled source word -	   t8 == cmpbge mask that found the null.  */ -	.align 4 -$u_final: -	negq	t8, t6		# E : isolate low bit set -	and	t6, t8, t12	# E : -	ldq_u	t1, 0(a0)	# L : -	subq	t12, 1, t6	# E : - -	or	t6, t12, t8	# E : -	zapnot	t0, t8, t0	# U : kill source bytes > null -	zap	t1, t8, t1	# U : kill dest bytes <= null -	or	t0, t1, t0	# E : - -	stq_u	t0, 0(a0)	# E : -	br	$finish_up	# U : -	nop -	nop - -	.align 4 -$u_eoc:				# end-of-count -	extqh	t2, a1, t0	# U : -	or	t0, t1, t0	# E : -	cmpbge	zero, t0, t8	# E : -	nop - -	.align 4 -$u_eocfin:			# end-of-count, final word -	or	t10, t8, t8	# E : -	br	$u_final	# U : -	nop -	nop - -	/* Unaligned copy entry point.  */ -	.align 4 -$unaligned: - -	srl	a3, 3, a2	# U : a2 = loop counter = (count - 1)/8 -	and	a0, 7, t4	# E : find dest misalignment -	and	a1, 7, t5	# E : find src misalignment -	mov	zero, t0	# E : - -	/* Conditionally load the first destination word and a bytemask -	   with 0xff indicating that the destination byte is sacrosanct.  */ - -	mov	zero, t6	# E : -	beq	t4, 1f		# U : -	ldq_u	t0, 0(a0)	# L : -	lda	t6, -1		# E : - -	mskql	t6, a0, t6	# E : -	nop -	nop -	nop - -	.align 4 -1: -	subq	a1, t4, a1	# E : sub dest misalignment from src addr -	/* If source misalignment is larger than dest misalignment, we need -	   extra startup checks to avoid SEGV.  */ -	cmplt	t4, t5, t12	# E : -	extql	t1, a1, t1	# U : shift src into place -	lda	t2, -1		# E : for creating masks later - -	beq	t12, $u_head	# U : -	mskqh	t2, t5, t2	# U : begin src byte validity mask -	cmpbge	zero, t1, t8	# E : is there a zero? -	nop - -	extql	t2, a1, t2	# U : -	or	t8, t10, t5	# E : test for end-of-count too -	cmpbge	zero, t2, t3	# E : -	cmoveq	a2, t5, t8	# E : Latency=2, extra map slot - -	nop			# E : goes with cmov -	andnot	t8, t3, t8	# E : -	beq	t8, $u_head	# U : -	nop - -	/* At this point we've found a zero in the first partial word of -	   the source.  We need to isolate the valid source data and mask -	   it into the original destination data.  (Incidentally, we know -	   that we'll need at least one byte of that original dest word.) */ - -	ldq_u	t0, 0(a0)	# L : -	negq	t8, t6		# E : build bitmask of bytes <= zero -	mskqh	t1, t4, t1	# U : -	and	t6, t8, t12	# E : - -	subq	t12, 1, t6	# E : -	or	t6, t12, t8	# E : -	zapnot	t2, t8, t2	# U : prepare source word; mirror changes -	zapnot	t1, t8, t1	# U : to source validity mask - -	andnot	t0, t2, t0	# E : zero place for source to reside -	or	t0, t1, t0	# E : and put it there -	stq_u	t0, 0(a0)	# L : -	nop - -	.align 4 -$finish_up: -	zapnot	t0, t12, t4	# U : was last byte written null? -	and	t12, 0xf0, t3	# E : binary search for the address of the -	cmovne	t4, 1, t4	# E : Latency=2, extra map slot -	nop			# E : with cmovne - -	and	t12, 0xcc, t2	# E : last byte written -	and	t12, 0xaa, t1	# E : -	cmovne	t3, 4, t3	# E : Latency=2, extra map slot -	nop			# E : with cmovne - -	bic	a0, 7, t0 -	cmovne	t2, 2, t2	# E : Latency=2, extra map slot -	nop			# E : with cmovne -	nop - -	cmovne	t1, 1, t1	# E : Latency=2, extra map slot -	nop			# E : with cmovne -	addq	t0, t3, t0	# E : -	addq	t1, t2, t1	# E : - -	addq	t0, t1, t0	# E : -	addq	t0, t4, t0	# add one if we filled the buffer -	subq	t0, v0, v0	# find string length -	ret			# L0 : - -	.align 4 -$zerolength: -	nop -	nop -	nop -	clr	v0 - -$exception: -	nop -	nop -	nop -	ret - -	.end __strncpy_from_user diff --git a/arch/alpha/lib/ev67-strlen_user.S b/arch/alpha/lib/ev67-strlen_user.S deleted file mode 100644 index 57e0d77b81a..00000000000 --- a/arch/alpha/lib/ev67-strlen_user.S +++ /dev/null @@ -1,107 +0,0 @@ -/* - * arch/alpha/lib/ev67-strlen_user.S - * 21264 version contributed by Rick Gorton <rick.gorton@api-networks.com> - * - * Return the length of the string including the NULL terminator - * (strlen+1) or zero if an error occurred. - * - * In places where it is critical to limit the processing time, - * and the data is not trusted, strnlen_user() should be used. - * It will return a value greater than its second argument if - * that limit would be exceeded. This implementation is allowed - * to access memory beyond the limit, but will not cross a page - * boundary when doing so. - * - * Much of the information about 21264 scheduling/coding comes from: - *      Compiler Writer's Guide for the Alpha 21264 - *      abbreviated as 'CWG' in other comments here - *      ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html - * Scheduling notation: - *      E       - either cluster - *      U       - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 - *      L       - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 - * Try not to change the actual algorithm if possible for consistency. - */ - -#include <asm/regdef.h> - - -/* Allow an exception for an insn; exit if we get one.  */ -#define EX(x,y...)			\ -	99: x,##y;			\ -	.section __ex_table,"a";	\ -	.long 99b - .;			\ -	lda v0, $exception-99b(zero);	\ -	.previous - - -	.set noreorder -	.set noat -	.text - -	.globl __strlen_user -	.ent __strlen_user -	.frame sp, 0, ra - -	.align 4 -__strlen_user: -	ldah	a1, 32767(zero)	# do not use plain strlen_user() for strings -				# that might be almost 2 GB long; you should -				# be using strnlen_user() instead -	nop -	nop -	nop - -	.globl __strnlen_user - -	.align 4 -__strnlen_user: -	.prologue 0 -	EX( ldq_u t0, 0(a0) )	# L : load first quadword (a0 may be misaligned) -	lda     t1, -1(zero)	# E : - -	insqh   t1, a0, t1	# U : -	andnot  a0, 7, v0	# E : -	or      t1, t0, t0	# E : -	subq	a0, 1, a0	# E : get our +1 for the return  - -	cmpbge  zero, t0, t1	# E : t1 <- bitmask: bit i == 1 <==> i-th byte == 0 -	subq	a1, 7, t2	# E : -	subq	a0, v0, t0	# E : -	bne     t1, $found	# U : - -	addq	t2, t0, t2	# E : -	addq	a1, 1, a1	# E : -	nop			# E : -	nop			# E : - -	.align 4 -$loop:	ble	t2, $limit	# U : -	EX( ldq t0, 8(v0) )	# L : -	nop			# E : -	nop			# E : - -	cmpbge  zero, t0, t1	# E : -	subq	t2, 8, t2	# E : -	addq    v0, 8, v0	# E : addr += 8 -	beq     t1, $loop	# U : - -$found: cttz	t1, t2		# U0 : -	addq	v0, t2, v0	# E : -	subq    v0, a0, v0	# E : -	ret			# L0 : - -$exception: -	nop -	nop -	nop -	ret - -	.align 4		# currently redundant -$limit: -	nop -	nop -	subq	a1, t2, v0 -	ret - -	.end __strlen_user diff --git a/arch/alpha/lib/strlen_user.S b/arch/alpha/lib/strlen_user.S deleted file mode 100644 index 508a18e9647..00000000000 --- a/arch/alpha/lib/strlen_user.S +++ /dev/null @@ -1,91 +0,0 @@ -/* - * arch/alpha/lib/strlen_user.S - * - * Return the length of the string including the NUL terminator - * (strlen+1) or zero if an error occurred. - * - * In places where it is critical to limit the processing time, - * and the data is not trusted, strnlen_user() should be used. - * It will return a value greater than its second argument if - * that limit would be exceeded. This implementation is allowed - * to access memory beyond the limit, but will not cross a page - * boundary when doing so. - */ - -#include <asm/regdef.h> - - -/* Allow an exception for an insn; exit if we get one.  */ -#define EX(x,y...)			\ -	99: x,##y;			\ -	.section __ex_table,"a";	\ -	.long 99b - .;			\ -	lda v0, $exception-99b(zero);	\ -	.previous - - -	.set noreorder -	.set noat -	.text - -	.globl __strlen_user -	.ent __strlen_user -	.frame sp, 0, ra - -	.align 3 -__strlen_user: -	ldah	a1, 32767(zero)	# do not use plain strlen_user() for strings -				# that might be almost 2 GB long; you should -				# be using strnlen_user() instead - -	.globl __strnlen_user - -	.align 3 -__strnlen_user: -	.prologue 0 - -	EX( ldq_u t0, 0(a0) )	# load first quadword (a0 may be misaligned) -	lda     t1, -1(zero) -	insqh   t1, a0, t1 -	andnot  a0, 7, v0 -	or      t1, t0, t0 -	subq	a0, 1, a0	# get our +1 for the return  -	cmpbge  zero, t0, t1	# t1 <- bitmask: bit i == 1 <==> i-th byte == 0 -	subq	a1, 7, t2 -	subq	a0, v0, t0 -	bne     t1, $found - -	addq	t2, t0, t2 -	addq	a1, 1, a1 - -	.align 3 -$loop:	ble	t2, $limit -	EX( ldq t0, 8(v0) ) -	subq	t2, 8, t2 -	addq    v0, 8, v0	# addr += 8 -	cmpbge  zero, t0, t1 -	beq     t1, $loop - -$found:	negq    t1, t2		# clear all but least set bit -	and     t1, t2, t1 - -	and     t1, 0xf0, t2	# binary search for that set bit -	and	t1, 0xcc, t3 -	and	t1, 0xaa, t4 -	cmovne	t2, 4, t2 -	cmovne	t3, 2, t3 -	cmovne	t4, 1, t4 -	addq	t2, t3, t2 -	addq	v0, t4, v0 -	addq	v0, t2, v0 -	nop			# dual issue next two on ev4 and ev5 -	subq    v0, a0, v0 -$exception: -	ret - -	.align 3		# currently redundant -$limit: -	subq	a1, t2, v0 -	ret - -	.end __strlen_user diff --git a/arch/alpha/lib/strncpy_from_user.S b/arch/alpha/lib/strncpy_from_user.S deleted file mode 100644 index 73ee21160ff..00000000000 --- a/arch/alpha/lib/strncpy_from_user.S +++ /dev/null @@ -1,339 +0,0 @@ -/* - * arch/alpha/lib/strncpy_from_user.S - * Contributed by Richard Henderson (rth@tamu.edu) - * - * Just like strncpy except in the return value: - * - * -EFAULT       if an exception occurs before the terminator is copied. - * N             if the buffer filled. - * - * Otherwise the length of the string is returned. - */ - - -#include <asm/errno.h> -#include <asm/regdef.h> - - -/* Allow an exception for an insn; exit if we get one.  */ -#define EX(x,y...)			\ -	99: x,##y;			\ -	.section __ex_table,"a";	\ -	.long 99b - .;			\ -	lda $31, $exception-99b($0); 	\ -	.previous - - -	.set noat -	.set noreorder -	.text - -	.globl __strncpy_from_user -	.ent __strncpy_from_user -	.frame $30, 0, $26 -	.prologue 0 - -	.align 3 -$aligned: -	/* On entry to this basic block: -	   t0 == the first destination word for masking back in -	   t1 == the first source word.  */ - -	/* Create the 1st output word and detect 0's in the 1st input word.  */ -	lda	t2, -1		# e1    : build a mask against false zero -	mskqh	t2, a1, t2	# e0    :   detection in the src word -	mskqh	t1, a1, t3	# e0    : -	ornot	t1, t2, t2	# .. e1 : -	mskql	t0, a1, t0	# e0    : assemble the first output word -	cmpbge	zero, t2, t8	# .. e1 : bits set iff null found -	or	t0, t3, t0	# e0    : -	beq	a2, $a_eoc	# .. e1 : -	bne	t8, $a_eos	# .. e1 : - -	/* On entry to this basic block: -	   t0 == a source word not containing a null.  */ - -$a_loop: -	stq_u	t0, 0(a0)	# e0    : -	addq	a0, 8, a0	# .. e1 : -	EX( ldq_u t0, 0(a1) )	# e0    : -	addq	a1, 8, a1	# .. e1 : -	subq	a2, 1, a2	# e0    : -	cmpbge	zero, t0, t8	# .. e1 (stall) -	beq	a2, $a_eoc      # e1    : -	beq	t8, $a_loop	# e1    : - -	/* Take care of the final (partial) word store.  At this point -	   the end-of-count bit is set in t8 iff it applies. - -	   On entry to this basic block we have: -	   t0 == the source word containing the null -	   t8 == the cmpbge mask that found it.  */ - -$a_eos: -	negq	t8, t12		# e0    : find low bit set -	and	t8, t12, t12	# e1 (stall) - -	/* For the sake of the cache, don't read a destination word -	   if we're not going to need it.  */ -	and	t12, 0x80, t6	# e0    : -	bne	t6, 1f		# .. e1 (zdb) - -	/* We're doing a partial word store and so need to combine -	   our source and original destination words.  */ -	ldq_u	t1, 0(a0)	# e0    : -	subq	t12, 1, t6	# .. e1 : -	or	t12, t6, t8	# e0    : -	unop			# -	zapnot	t0, t8, t0	# e0    : clear src bytes > null -	zap	t1, t8, t1	# .. e1 : clear dst bytes <= null -	or	t0, t1, t0	# e1    : - -1:	stq_u	t0, 0(a0) -	br	$finish_up - -	/* Add the end-of-count bit to the eos detection bitmask.  */ -$a_eoc: -	or	t10, t8, t8 -	br	$a_eos - -	/*** The Function Entry Point ***/ -	.align 3 -__strncpy_from_user: -	mov	a0, v0		# save the string start -	beq	a2, $zerolength - -	/* Are source and destination co-aligned?  */ -	xor	a0, a1, t1	# e0    : -	and	a0, 7, t0	# .. e1 : find dest misalignment -	and	t1, 7, t1	# e0    : -	addq	a2, t0, a2	# .. e1 : bias count by dest misalignment -	subq	a2, 1, a2	# e0    : -	and	a2, 7, t2	# e1    : -	srl	a2, 3, a2	# e0    : a2 = loop counter = (count - 1)/8 -	addq	zero, 1, t10	# .. e1 : -	sll	t10, t2, t10	# e0    : t10 = bitmask of last count byte -	bne	t1, $unaligned	# .. e1 : - -	/* We are co-aligned; take care of a partial first word.  */ - -	EX( ldq_u t1, 0(a1) )	# e0    : load first src word -	addq	a1, 8, a1	# .. e1 : - -	beq	t0, $aligned	# avoid loading dest word if not needed -	ldq_u	t0, 0(a0)	# e0    : -	br	$aligned	# .. e1 : - - -/* The source and destination are not co-aligned.  Align the destination -   and cope.  We have to be very careful about not reading too much and -   causing a SEGV.  */ - -	.align 3 -$u_head: -	/* We know just enough now to be able to assemble the first -	   full source word.  We can still find a zero at the end of it -	   that prevents us from outputting the whole thing. - -	   On entry to this basic block: -	   t0 == the first dest word, unmasked -	   t1 == the shifted low bits of the first source word -	   t6 == bytemask that is -1 in dest word bytes */ - -	EX( ldq_u t2, 8(a1) )	# e0    : load second src word -	addq	a1, 8, a1	# .. e1 : -	mskql	t0, a0, t0	# e0    : mask trailing garbage in dst -	extqh	t2, a1, t4	# e0    : -	or	t1, t4, t1	# e1    : first aligned src word complete -	mskqh	t1, a0, t1	# e0    : mask leading garbage in src -	or	t0, t1, t0	# e0    : first output word complete -	or	t0, t6, t6	# e1    : mask original data for zero test -	cmpbge	zero, t6, t8	# e0    : -	beq	a2, $u_eocfin	# .. e1 : -	bne	t8, $u_final	# e1    : - -	lda	t6, -1			# e1    : mask out the bits we have -	mskql	t6, a1, t6		# e0    :   already seen -	stq_u	t0, 0(a0)		# e0    : store first output word -	or      t6, t2, t2		# .. e1 : -	cmpbge	zero, t2, t8		# e0    : find nulls in second partial -	addq	a0, 8, a0		# .. e1 : -	subq	a2, 1, a2		# e0    : -	bne	t8, $u_late_head_exit	# .. e1 : - -	/* Finally, we've got all the stupid leading edge cases taken care -	   of and we can set up to enter the main loop.  */ - -	extql	t2, a1, t1	# e0    : position hi-bits of lo word -	EX( ldq_u t2, 8(a1) )	# .. e1 : read next high-order source word -	addq	a1, 8, a1	# e0    : -	cmpbge	zero, t2, t8	# e1 (stall) -	beq	a2, $u_eoc	# e1    : -	bne	t8, $u_eos	# e1    : - -	/* Unaligned copy main loop.  In order to avoid reading too much, -	   the loop is structured to detect zeros in aligned source words. -	   This has, unfortunately, effectively pulled half of a loop -	   iteration out into the head and half into the tail, but it does -	   prevent nastiness from accumulating in the very thing we want -	   to run as fast as possible. - -	   On entry to this basic block: -	   t1 == the shifted high-order bits from the previous source word -	   t2 == the unshifted current source word - -	   We further know that t2 does not contain a null terminator.  */ - -	.align 3 -$u_loop: -	extqh	t2, a1, t0	# e0    : extract high bits for current word -	addq	a1, 8, a1	# .. e1 : -	extql	t2, a1, t3	# e0    : extract low bits for next time -	addq	a0, 8, a0	# .. e1 : -	or	t0, t1, t0	# e0    : current dst word now complete -	EX( ldq_u t2, 0(a1) )	# .. e1 : load high word for next time -	stq_u	t0, -8(a0)	# e0    : save the current word -	mov	t3, t1		# .. e1 : -	subq	a2, 1, a2	# e0    : -	cmpbge	zero, t2, t8	# .. e1 : test new word for eos -	beq	a2, $u_eoc	# e1    : -	beq	t8, $u_loop	# e1    : - -	/* We've found a zero somewhere in the source word we just read. -	   If it resides in the lower half, we have one (probably partial) -	   word to write out, and if it resides in the upper half, we -	   have one full and one partial word left to write out. - -	   On entry to this basic block: -	   t1 == the shifted high-order bits from the previous source word -	   t2 == the unshifted current source word.  */ -$u_eos: -	extqh	t2, a1, t0	# e0    : -	or	t0, t1, t0	# e1    : first (partial) source word complete - -	cmpbge	zero, t0, t8	# e0    : is the null in this first bit? -	bne	t8, $u_final	# .. e1 (zdb) - -	stq_u	t0, 0(a0)	# e0    : the null was in the high-order bits -	addq	a0, 8, a0	# .. e1 : -	subq	a2, 1, a2	# e1    : - -$u_late_head_exit: -	extql	t2, a1, t0	# .. e0 : -	cmpbge	zero, t0, t8	# e0    : -	or	t8, t10, t6	# e1    : -	cmoveq	a2, t6, t8	# e0    : -	nop			# .. e1 : - -	/* Take care of a final (probably partial) result word. -	   On entry to this basic block: -	   t0 == assembled source word -	   t8 == cmpbge mask that found the null.  */ -$u_final: -	negq	t8, t6		# e0    : isolate low bit set -	and	t6, t8, t12	# e1    : - -	and	t12, 0x80, t6	# e0    : avoid dest word load if we can -	bne	t6, 1f		# .. e1 (zdb) - -	ldq_u	t1, 0(a0)	# e0    : -	subq	t12, 1, t6	# .. e1 : -	or	t6, t12, t8	# e0    : -	zapnot	t0, t8, t0	# .. e1 : kill source bytes > null -	zap	t1, t8, t1	# e0    : kill dest bytes <= null -	or	t0, t1, t0	# e1    : - -1:	stq_u	t0, 0(a0)	# e0    : -	br	$finish_up - -$u_eoc:				# end-of-count -	extqh	t2, a1, t0 -	or	t0, t1, t0 -	cmpbge	zero, t0, t8 - -$u_eocfin:			# end-of-count, final word -	or	t10, t8, t8 -	br	$u_final - -	/* Unaligned copy entry point.  */ -	.align 3 -$unaligned: - -	EX( ldq_u t1, 0(a1) )	# e0    : load first source word - -	and	a0, 7, t4	# .. e1 : find dest misalignment -	and	a1, 7, t5	# e0    : find src misalignment - -	/* Conditionally load the first destination word and a bytemask -	   with 0xff indicating that the destination byte is sacrosanct.  */ - -	mov	zero, t0	# .. e1 : -	mov	zero, t6	# e0    : -	beq	t4, 1f		# .. e1 : -	ldq_u	t0, 0(a0)	# e0    : -	lda	t6, -1		# .. e1 : -	mskql	t6, a0, t6	# e0    : -1: -	subq	a1, t4, a1	# .. e1 : sub dest misalignment from src addr - -	/* If source misalignment is larger than dest misalignment, we need -	   extra startup checks to avoid SEGV.  */ - -	cmplt	t4, t5, t12	# e1    : -	extql	t1, a1, t1	# .. e0 : shift src into place -	lda	t2, -1		# e0    : for creating masks later -	beq	t12, $u_head	# e1    : - -	mskqh	t2, t5, t2	# e0    : begin src byte validity mask -	cmpbge	zero, t1, t8	# .. e1 : is there a zero? -	extql	t2, a1, t2	# e0    : -	or	t8, t10, t5	# .. e1 : test for end-of-count too -	cmpbge	zero, t2, t3	# e0    : -	cmoveq	a2, t5, t8	# .. e1 : -	andnot	t8, t3, t8	# e0    : -	beq	t8, $u_head	# .. e1 (zdb) - -	/* At this point we've found a zero in the first partial word of -	   the source.  We need to isolate the valid source data and mask -	   it into the original destination data.  (Incidentally, we know -	   that we'll need at least one byte of that original dest word.) */ - -	ldq_u	t0, 0(a0)	# e0    : -	negq	t8, t6		# .. e1 : build bitmask of bytes <= zero -	mskqh	t1, t4, t1	# e0    : -	and	t6, t8, t12	# .. e1 : -	subq	t12, 1, t6	# e0    : -	or	t6, t12, t8	# e1    : - -	zapnot	t2, t8, t2	# e0    : prepare source word; mirror changes -	zapnot	t1, t8, t1	# .. e1 : to source validity mask - -	andnot	t0, t2, t0	# e0    : zero place for source to reside -	or	t0, t1, t0	# e1    : and put it there -	stq_u	t0, 0(a0)	# e0    : - -$finish_up: -	zapnot	t0, t12, t4	# was last byte written null? -	cmovne	t4, 1, t4 - -	and	t12, 0xf0, t3	# binary search for the address of the -	and	t12, 0xcc, t2	# last byte written -	and	t12, 0xaa, t1 -	bic	a0, 7, t0 -	cmovne	t3, 4, t3 -	cmovne	t2, 2, t2 -	cmovne	t1, 1, t1 -	addq	t0, t3, t0 -	addq	t1, t2, t1 -	addq	t0, t1, t0 -	addq	t0, t4, t0	# add one if we filled the buffer - -	subq	t0, v0, v0	# find string length -	ret - -$zerolength: -	clr	v0 -$exception: -	ret - -	.end __strncpy_from_user diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 5eecab1a84e..0c4132dd350 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -89,6 +89,8 @@ do_page_fault(unsigned long address, unsigned long mmcsr,  	const struct exception_table_entry *fixup;  	int fault, si_code = SEGV_MAPERR;  	siginfo_t info; +	unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | +			      (cause > 0 ? FAULT_FLAG_WRITE : 0));  	/* As of EV6, a load into $31/$f31 is a prefetch, and never faults  	   (or is suppressed by the PALcode).  Support that for older CPUs @@ -114,6 +116,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,  		goto vmalloc_fault;  #endif +retry:  	down_read(&mm->mmap_sem);  	vma = find_vma(mm, address);  	if (!vma) @@ -144,8 +147,11 @@ do_page_fault(unsigned long address, unsigned long mmcsr,  	/* If for any reason at all we couldn't handle the fault,  	   make sure we exit gracefully rather than endlessly redo  	   the fault.  */ -	fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0); -	up_read(&mm->mmap_sem); +	fault = handle_mm_fault(mm, vma, address, flags); + +	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) +		return; +  	if (unlikely(fault & VM_FAULT_ERROR)) {  		if (fault & VM_FAULT_OOM)  			goto out_of_memory; @@ -153,10 +159,26 @@ do_page_fault(unsigned long address, unsigned long mmcsr,  			goto do_sigbus;  		BUG();  	} -	if (fault & VM_FAULT_MAJOR) -		current->maj_flt++; -	else -		current->min_flt++; + +	if (flags & FAULT_FLAG_ALLOW_RETRY) { +		if (fault & VM_FAULT_MAJOR) +			current->maj_flt++; +		else +			current->min_flt++; +		if (fault & VM_FAULT_RETRY) { +			flags &= ~FAULT_FLAG_ALLOW_RETRY; + +			 /* No need to up_read(&mm->mmap_sem) as we would +			 * have already released it in __lock_page_or_retry +			 * in mm/filemap.c. +			 */ + +			goto retry; +		} +	} + +	up_read(&mm->mmap_sem); +  	return;  	/* Something tried to access memory that isn't in our memory map. @@ -186,12 +208,14 @@ do_page_fault(unsigned long address, unsigned long mmcsr,  	/* We ran out of memory, or some other thing happened to us that  	   made us unable to handle the page fault gracefully.  */   out_of_memory: +	up_read(&mm->mmap_sem);  	if (!user_mode(regs))  		goto no_context;  	pagefault_out_of_memory();  	return;   do_sigbus: +	up_read(&mm->mmap_sem);  	/* Send a sigbus, regardless of whether we were in kernel  	   or user mode.  */  	info.si_signo = SIGBUS; diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c index a0a5d27aa21..b8ce18f485d 100644 --- a/arch/alpha/oprofile/common.c +++ b/arch/alpha/oprofile/common.c @@ -12,6 +12,7 @@  #include <linux/smp.h>  #include <linux/errno.h>  #include <asm/ptrace.h> +#include <asm/special_insns.h>  #include "op_impl.h" diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index e91c7cdc6fe..c5f9ae5dbd1 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -38,7 +38,6 @@ config ARM  	select HARDIRQS_SW_RESEND  	select GENERIC_IRQ_PROBE  	select GENERIC_IRQ_SHOW -	select GENERIC_IRQ_PROBE  	select ARCH_WANT_IPC_PARSE_VERSION  	select HARDIRQS_SW_RESEND  	select CPU_PM if (SUSPEND || CPU_IDLE) @@ -126,11 +125,6 @@ config TRACE_IRQFLAGS_SUPPORT  	bool  	default y -config GENERIC_LOCKBREAK -	bool -	default y -	depends on SMP && PREEMPT -  config RWSEM_GENERIC_SPINLOCK  	bool  	default y @@ -2150,6 +2144,7 @@ source "drivers/cpufreq/Kconfig"  config CPU_FREQ_IMX  	tristate "CPUfreq driver for i.MX CPUs"  	depends on ARCH_MXC && CPU_FREQ +	select CPU_FREQ_TABLE  	help  	  This enables the CPUfreq driver for i.MX CPUs. diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 59509c48d7e..bd0cff3f808 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -154,5 +154,10 @@  			#size-cells = <0>;  			ti,hwmods = "i2c3";  		}; + +		wdt2: wdt@44e35000 { +			compatible = "ti,omap3-wdt"; +			ti,hwmods = "wd_timer2"; +		};  	};  }; diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts index cd86177a3ea..59d9789e550 100644 --- a/arch/arm/boot/dts/imx51-babbage.dts +++ b/arch/arm/boot/dts/imx51-babbage.dts @@ -25,8 +25,8 @@  		aips@70000000 { /* aips-1 */  			spba@70000000 {  				esdhc@70004000 { /* ESDHC1 */ -					fsl,cd-internal; -					fsl,wp-internal; +					fsl,cd-controller; +					fsl,wp-controller;  					status = "okay";  				}; diff --git a/arch/arm/boot/dts/kirkwood-iconnect.dts b/arch/arm/boot/dts/kirkwood-iconnect.dts index 52d94704510..f8ca6fa8819 100644 --- a/arch/arm/boot/dts/kirkwood-iconnect.dts +++ b/arch/arm/boot/dts/kirkwood-iconnect.dts @@ -41,9 +41,13 @@  		};  		power-blue {  			label = "power:blue"; -			gpios = <&gpio1 11 0>; +			gpios = <&gpio1 10 0>;  			linux,default-trigger = "timer";  		}; +		power-red { +			label = "power:red"; +			gpios = <&gpio1 11 0>; +		};  		usb1 {  			label = "usb1:blue";  			gpios = <&gpio1 12 0>; diff --git a/arch/arm/boot/dts/twl6030.dtsi b/arch/arm/boot/dts/twl6030.dtsi index 3b2f3510d7e..d351b27d721 100644 --- a/arch/arm/boot/dts/twl6030.dtsi +++ b/arch/arm/boot/dts/twl6030.dtsi @@ -66,6 +66,7 @@  	vcxio: regulator@8 {  		compatible = "ti,twl6030-vcxio"; +		regulator-always-on;  	};  	vusb: regulator@9 { @@ -74,10 +75,12 @@  	v1v8: regulator@10 {  		compatible = "ti,twl6030-v1v8"; +		regulator-always-on;  	};  	v2v1: regulator@11 {  		compatible = "ti,twl6030-v2v1"; +		regulator-always-on;  	};  	clk32kg: regulator@12 { diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig index 2d4f661d1cf..da6845493ca 100644 --- a/arch/arm/configs/u8500_defconfig +++ b/arch/arm/configs/u8500_defconfig @@ -86,6 +86,7 @@ CONFIG_NEW_LEDS=y  CONFIG_LEDS_CLASS=y  CONFIG_LEDS_LM3530=y  CONFIG_LEDS_LP5521=y +CONFIG_LEDS_GPIO=y  CONFIG_RTC_CLASS=y  CONFIG_RTC_DRV_AB8500=y  CONFIG_RTC_DRV_PL031=y diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index f66626d71e7..41dc31f834c 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -195,6 +195,18 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)  #define pte_clear(mm,addr,ptep)	set_pte_ext(ptep, __pte(0), 0) +#define pte_none(pte)		(!pte_val(pte)) +#define pte_present(pte)	(pte_val(pte) & L_PTE_PRESENT) +#define pte_write(pte)		(!(pte_val(pte) & L_PTE_RDONLY)) +#define pte_dirty(pte)		(pte_val(pte) & L_PTE_DIRTY) +#define pte_young(pte)		(pte_val(pte) & L_PTE_YOUNG) +#define pte_exec(pte)		(!(pte_val(pte) & L_PTE_XN)) +#define pte_special(pte)	(0) + +#define pte_present_user(pte) \ +	((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ +	 (L_PTE_PRESENT | L_PTE_USER)) +  #if __LINUX_ARM_ARCH__ < 6  static inline void __sync_icache_dcache(pte_t pteval)  { @@ -206,25 +218,15 @@ extern void __sync_icache_dcache(pte_t pteval);  static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,  			      pte_t *ptep, pte_t pteval)  { -	if (addr >= TASK_SIZE) -		set_pte_ext(ptep, pteval, 0); -	else { +	unsigned long ext = 0; + +	if (addr < TASK_SIZE && pte_present_user(pteval)) {  		__sync_icache_dcache(pteval); -		set_pte_ext(ptep, pteval, PTE_EXT_NG); +		ext |= PTE_EXT_NG;  	} -} -#define pte_none(pte)		(!pte_val(pte)) -#define pte_present(pte)	(pte_val(pte) & L_PTE_PRESENT) -#define pte_write(pte)		(!(pte_val(pte) & L_PTE_RDONLY)) -#define pte_dirty(pte)		(pte_val(pte) & L_PTE_DIRTY) -#define pte_young(pte)		(pte_val(pte) & L_PTE_YOUNG) -#define pte_exec(pte)		(!(pte_val(pte) & L_PTE_XN)) -#define pte_special(pte)	(0) - -#define pte_present_user(pte) \ -	((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ -	 (L_PTE_PRESENT | L_PTE_USER)) +	set_pte_ext(ptep, pteval, ext); +}  #define PTE_BIT_FUNC(fn,op) \  static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } @@ -251,13 +253,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)   *   *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1   *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 - *   <--------------- offset --------------------> <- type --> 0 0 0 + *   <--------------- offset ----------------------> < type -> 0 0 0   * - * This gives us up to 63 swap files and 32GB per swap file.  Note that + * This gives us up to 31 swap files and 64GB per swap file.  Note that   * the offset field is always non-zero.   */  #define __SWP_TYPE_SHIFT	3 -#define __SWP_TYPE_BITS		6 +#define __SWP_TYPE_BITS		5  #define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)  #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h index e3f75726343..05b8e82ec9f 100644 --- a/arch/arm/include/asm/sched_clock.h +++ b/arch/arm/include/asm/sched_clock.h @@ -10,5 +10,7 @@  extern void sched_clock_postinit(void);  extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); +extern void setup_sched_clock_needs_suspend(u32 (*read)(void), int bits, +		unsigned long rate);  #endif diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index 27d186abbc0..f4515393248 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c @@ -21,6 +21,8 @@ struct clock_data {  	u32 epoch_cyc_copy;  	u32 mult;  	u32 shift; +	bool suspended; +	bool needs_suspend;  };  static void sched_clock_poll(unsigned long wrap_ticks); @@ -49,6 +51,9 @@ static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)  	u64 epoch_ns;  	u32 epoch_cyc; +	if (cd.suspended) +		return cd.epoch_ns; +  	/*  	 * Load the epoch_cyc and epoch_ns atomically.  We do this by  	 * ensuring that we always write epoch_cyc, epoch_ns and @@ -98,6 +103,13 @@ static void sched_clock_poll(unsigned long wrap_ticks)  	update_sched_clock();  } +void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits, +		unsigned long rate) +{ +	setup_sched_clock(read, bits, rate); +	cd.needs_suspend = true; +} +  void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)  {  	unsigned long r, w; @@ -169,11 +181,23 @@ void __init sched_clock_postinit(void)  static int sched_clock_suspend(void)  {  	sched_clock_poll(sched_clock_timer.data); +	if (cd.needs_suspend) +		cd.suspended = true;  	return 0;  } +static void sched_clock_resume(void) +{ +	if (cd.needs_suspend) { +		cd.epoch_cyc = read_sched_clock(); +		cd.epoch_cyc_copy = cd.epoch_cyc; +		cd.suspended = false; +	} +} +  static struct syscore_ops sched_clock_ops = {  	.suspend = sched_clock_suspend, +	.resume = sched_clock_resume,  };  static int __init sched_clock_syscore_init(void) diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 198b08456e9..26c12c6440f 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -321,7 +321,7 @@ void store_cpu_topology(unsigned int cpuid)   * init_cpu_topology is called at boot when only one cpu is running   * which prevent simultaneous write access to cpu_topology array   */ -void init_cpu_topology(void) +void __init init_cpu_topology(void)  {  	unsigned int cpu; diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile index 2473fd1fd51..af72969820b 100644 --- a/arch/arm/lib/Makefile +++ b/arch/arm/lib/Makefile @@ -16,13 +16,30 @@ lib-y		:= backtrace.o changebit.o csumipv6.o csumpartial.o   \  		   call_with_stack.o  mmu-y	:= clear_user.o copy_page.o getuser.o putuser.o -mmu-y	+= copy_from_user.o copy_to_user.o + +# the code in uaccess.S is not preemption safe and +# probably faster on ARMv3 only +ifeq ($(CONFIG_PREEMPT),y) +  mmu-y	+= copy_from_user.o copy_to_user.o +else +ifneq ($(CONFIG_CPU_32v3),y) +  mmu-y	+= copy_from_user.o copy_to_user.o +else +  mmu-y	+= uaccess.o +endif +endif  # using lib_ here won't override already available weak symbols  obj-$(CONFIG_UACCESS_WITH_MEMCPY) += uaccess_with_memcpy.o -lib-$(CONFIG_MMU)		+= $(mmu-y) -lib-y				+= io-readsw-armv4.o io-writesw-armv4.o +lib-$(CONFIG_MMU) += $(mmu-y) + +ifeq ($(CONFIG_CPU_32v3),y) +  lib-y	+= io-readsw-armv3.o io-writesw-armv3.o +else +  lib-y	+= io-readsw-armv4.o io-writesw-armv4.o +endif +  lib-$(CONFIG_ARCH_RPC)		+= ecard.o io-acorn.o floppydma.o  lib-$(CONFIG_ARCH_SHARK)	+= io-shark.o diff --git a/arch/arm/lib/io-readsw-armv3.S b/arch/arm/lib/io-readsw-armv3.S new file mode 100644 index 00000000000..88487c8c4f2 --- /dev/null +++ b/arch/arm/lib/io-readsw-armv3.S @@ -0,0 +1,106 @@ +/* + *  linux/arch/arm/lib/io-readsw-armv3.S + * + *  Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/linkage.h> +#include <asm/assembler.h> + +.Linsw_bad_alignment: +		adr	r0, .Linsw_bad_align_msg +		mov	r2, lr +		b	panic +.Linsw_bad_align_msg: +		.asciz	"insw: bad buffer alignment (0x%p, lr=0x%08lX)\n" +		.align + +.Linsw_align:	tst	r1, #1 +		bne	.Linsw_bad_alignment + +		ldr	r3, [r0] +		strb	r3, [r1], #1 +		mov	r3, r3, lsr #8 +		strb	r3, [r1], #1 + +		subs	r2, r2, #1 +		moveq	pc, lr + +ENTRY(__raw_readsw) +		teq	r2, #0		@ do we have to check for the zero len? +		moveq	pc, lr +		tst	r1, #3 +		bne	.Linsw_align + +.Linsw_aligned:	mov	ip, #0xff +		orr	ip, ip, ip, lsl #8 +		stmfd	sp!, {r4, r5, r6, lr} + +		subs	r2, r2, #8 +		bmi	.Lno_insw_8 + +.Linsw_8_lp:	ldr	r3, [r0] +		and	r3, r3, ip +		ldr	r4, [r0] +		orr	r3, r3, r4, lsl #16 + +		ldr	r4, [r0] +		and	r4, r4, ip +		ldr	r5, [r0] +		orr	r4, r4, r5, lsl #16 + +		ldr	r5, [r0] +		and	r5, r5, ip +		ldr	r6, [r0] +		orr	r5, r5, r6, lsl #16 + +		ldr	r6, [r0] +		and	r6, r6, ip +		ldr	lr, [r0] +		orr	r6, r6, lr, lsl #16 + +		stmia	r1!, {r3 - r6} + +		subs	r2, r2, #8 +		bpl	.Linsw_8_lp + +		tst	r2, #7 +		ldmeqfd	sp!, {r4, r5, r6, pc} + +.Lno_insw_8:	tst	r2, #4 +		beq	.Lno_insw_4 + +		ldr	r3, [r0] +		and	r3, r3, ip +		ldr	r4, [r0] +		orr	r3, r3, r4, lsl #16 + +		ldr	r4, [r0] +		and	r4, r4, ip +		ldr	r5, [r0] +		orr	r4, r4, r5, lsl #16 + +		stmia	r1!, {r3, r4} + +.Lno_insw_4:	tst	r2, #2 +		beq	.Lno_insw_2 + +		ldr	r3, [r0] +		and	r3, r3, ip +		ldr	r4, [r0] +		orr	r3, r3, r4, lsl #16 + +		str	r3, [r1], #4 + +.Lno_insw_2:	tst	r2, #1 +		ldrne	r3, [r0] +		strneb	r3, [r1], #1 +		movne	r3, r3, lsr #8 +		strneb	r3, [r1] + +		ldmfd	sp!, {r4, r5, r6, pc} + + diff --git a/arch/arm/lib/io-writesw-armv3.S b/arch/arm/lib/io-writesw-armv3.S new file mode 100644 index 00000000000..49b800419e3 --- /dev/null +++ b/arch/arm/lib/io-writesw-armv3.S @@ -0,0 +1,126 @@ +/* + *  linux/arch/arm/lib/io-writesw-armv3.S + * + *  Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/linkage.h> +#include <asm/assembler.h> + +.Loutsw_bad_alignment: +		adr	r0, .Loutsw_bad_align_msg +		mov	r2, lr +		b	panic +.Loutsw_bad_align_msg: +		.asciz	"outsw: bad buffer alignment (0x%p, lr=0x%08lX)\n" +		.align + +.Loutsw_align:	tst	r1, #1 +		bne	.Loutsw_bad_alignment + +		add	r1, r1, #2 + +		ldr	r3, [r1, #-4] +		mov	r3, r3, lsr #16 +		orr	r3, r3, r3, lsl #16 +		str	r3, [r0] +		subs	r2, r2, #1 +		moveq	pc, lr + +ENTRY(__raw_writesw) +		teq	r2, #0		@ do we have to check for the zero len? +		moveq	pc, lr +		tst	r1, #3 +		bne	.Loutsw_align + +		stmfd	sp!, {r4, r5, r6, lr} + +		subs	r2, r2, #8 +		bmi	.Lno_outsw_8 + +.Loutsw_8_lp:	ldmia	r1!, {r3, r4, r5, r6} + +		mov	ip, r3, lsl #16 +		orr	ip, ip, ip, lsr #16 +		str	ip, [r0] + +		mov	ip, r3, lsr #16 +		orr	ip, ip, ip, lsl #16 +		str	ip, [r0] + +		mov	ip, r4, lsl #16 +		orr	ip, ip, ip, lsr #16 +		str	ip, [r0] + +		mov	ip, r4, lsr #16 +		orr	ip, ip, ip, lsl #16 +		str	ip, [r0] + +		mov	ip, r5, lsl #16 +		orr	ip, ip, ip, lsr #16 +		str	ip, [r0] + +		mov	ip, r5, lsr #16 +		orr	ip, ip, ip, lsl #16 +		str	ip, [r0] + +		mov	ip, r6, lsl #16 +		orr	ip, ip, ip, lsr #16 +		str	ip, [r0] + +		mov	ip, r6, lsr #16 +		orr	ip, ip, ip, lsl #16 +		str	ip, [r0] + +		subs	r2, r2, #8 +		bpl	.Loutsw_8_lp + +		tst	r2, #7 +		ldmeqfd	sp!, {r4, r5, r6, pc} + +.Lno_outsw_8:	tst	r2, #4 +		beq	.Lno_outsw_4 + +		ldmia	r1!, {r3, r4} + +		mov	ip, r3, lsl #16 +		orr	ip, ip, ip, lsr #16 +		str	ip, [r0] + +		mov	ip, r3, lsr #16 +		orr	ip, ip, ip, lsl #16 +		str	ip, [r0] + +		mov	ip, r4, lsl #16 +		orr	ip, ip, ip, lsr #16 +		str	ip, [r0] + +		mov	ip, r4, lsr #16 +		orr	ip, ip, ip, lsl #16 +		str	ip, [r0] + +.Lno_outsw_4:	tst	r2, #2 +		beq	.Lno_outsw_2 + +		ldr	r3, [r1], #4 + +		mov	ip, r3, lsl #16 +		orr	ip, ip, ip, lsr #16 +		str	ip, [r0] + +		mov	ip, r3, lsr #16 +		orr	ip, ip, ip, lsl #16 +		str	ip, [r0] + +.Lno_outsw_2:	tst	r2, #1 + +		ldrne	r3, [r1] + +		movne	ip, r3, lsl #16 +		orrne	ip, ip, ip, lsr #16 +		strne	ip, [r0] + +		ldmfd	sp!, {r4, r5, r6, pc} diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S new file mode 100644 index 00000000000..5c908b1cb8e --- /dev/null +++ b/arch/arm/lib/uaccess.S @@ -0,0 +1,564 @@ +/* + *  linux/arch/arm/lib/uaccess.S + * + *  Copyright (C) 1995, 1996,1997,1998 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + *  Routines to block copy data to/from user memory + *   These are highly optimised both for the 4k page size + *   and for various alignments. + */ +#include <linux/linkage.h> +#include <asm/assembler.h> +#include <asm/errno.h> +#include <asm/domain.h> + +		.text + +#define PAGE_SHIFT 12 + +/* Prototype: int __copy_to_user(void *to, const char *from, size_t n) + * Purpose  : copy a block to user memory from kernel memory + * Params   : to   - user memory + *          : from - kernel memory + *          : n    - number of bytes to copy + * Returns  : Number of bytes NOT copied. + */ + +.Lc2u_dest_not_aligned: +		rsb	ip, ip, #4 +		cmp	ip, #2 +		ldrb	r3, [r1], #1 +USER(	TUSER(	strb)	r3, [r0], #1)			@ May fault +		ldrgeb	r3, [r1], #1 +USER(	TUSER(	strgeb) r3, [r0], #1)			@ May fault +		ldrgtb	r3, [r1], #1 +USER(	TUSER(	strgtb) r3, [r0], #1)			@ May fault +		sub	r2, r2, ip +		b	.Lc2u_dest_aligned + +ENTRY(__copy_to_user) +		stmfd	sp!, {r2, r4 - r7, lr} +		cmp	r2, #4 +		blt	.Lc2u_not_enough +		ands	ip, r0, #3 +		bne	.Lc2u_dest_not_aligned +.Lc2u_dest_aligned: + +		ands	ip, r1, #3 +		bne	.Lc2u_src_not_aligned +/* + * Seeing as there has to be at least 8 bytes to copy, we can + * copy one word, and force a user-mode page fault... + */ + +.Lc2u_0fupi:	subs	r2, r2, #4 +		addmi	ip, r2, #4 +		bmi	.Lc2u_0nowords +		ldr	r3, [r1], #4 +USER(	TUSER(	str)	r3, [r0], #4)			@ May fault +		mov	ip, r0, lsl #32 - PAGE_SHIFT	@ On each page, use a ld/st??t instruction +		rsb	ip, ip, #0 +		movs	ip, ip, lsr #32 - PAGE_SHIFT +		beq	.Lc2u_0fupi +/* + * ip = max no. of bytes to copy before needing another "strt" insn + */ +		cmp	r2, ip +		movlt	ip, r2 +		sub	r2, r2, ip +		subs	ip, ip, #32 +		blt	.Lc2u_0rem8lp + +.Lc2u_0cpy8lp:	ldmia	r1!, {r3 - r6} +		stmia	r0!, {r3 - r6}			@ Shouldnt fault +		ldmia	r1!, {r3 - r6} +		subs	ip, ip, #32 +		stmia	r0!, {r3 - r6}			@ Shouldnt fault +		bpl	.Lc2u_0cpy8lp + +.Lc2u_0rem8lp:	cmn	ip, #16 +		ldmgeia	r1!, {r3 - r6} +		stmgeia	r0!, {r3 - r6}			@ Shouldnt fault +		tst	ip, #8 +		ldmneia	r1!, {r3 - r4} +		stmneia	r0!, {r3 - r4}			@ Shouldnt fault +		tst	ip, #4 +		ldrne	r3, [r1], #4 +	TUSER(	strne) r3, [r0], #4			@ Shouldnt fault +		ands	ip, ip, #3 +		beq	.Lc2u_0fupi +.Lc2u_0nowords:	teq	ip, #0 +		beq	.Lc2u_finished +.Lc2u_nowords:	cmp	ip, #2 +		ldrb	r3, [r1], #1 +USER(	TUSER(	strb)	r3, [r0], #1)			@ May fault +		ldrgeb	r3, [r1], #1 +USER(	TUSER(	strgeb) r3, [r0], #1)			@ May fault +		ldrgtb	r3, [r1], #1 +USER(	TUSER(	strgtb) r3, [r0], #1)			@ May fault +		b	.Lc2u_finished + +.Lc2u_not_enough: +		movs	ip, r2 +		bne	.Lc2u_nowords +.Lc2u_finished:	mov	r0, #0 +		ldmfd	sp!, {r2, r4 - r7, pc} + +.Lc2u_src_not_aligned: +		bic	r1, r1, #3 +		ldr	r7, [r1], #4 +		cmp	ip, #2 +		bgt	.Lc2u_3fupi +		beq	.Lc2u_2fupi +.Lc2u_1fupi:	subs	r2, r2, #4 +		addmi	ip, r2, #4 +		bmi	.Lc2u_1nowords +		mov	r3, r7, pull #8 +		ldr	r7, [r1], #4 +		orr	r3, r3, r7, push #24 +USER(	TUSER(	str)	r3, [r0], #4)			@ May fault +		mov	ip, r0, lsl #32 - PAGE_SHIFT +		rsb	ip, ip, #0 +		movs	ip, ip, lsr #32 - PAGE_SHIFT +		beq	.Lc2u_1fupi +		cmp	r2, ip +		movlt	ip, r2 +		sub	r2, r2, ip +		subs	ip, ip, #16 +		blt	.Lc2u_1rem8lp + +.Lc2u_1cpy8lp:	mov	r3, r7, pull #8 +		ldmia	r1!, {r4 - r7} +		subs	ip, ip, #16 +		orr	r3, r3, r4, push #24 +		mov	r4, r4, pull #8 +		orr	r4, r4, r5, push #24 +		mov	r5, r5, pull #8 +		orr	r5, r5, r6, push #24 +		mov	r6, r6, pull #8 +		orr	r6, r6, r7, push #24 +		stmia	r0!, {r3 - r6}			@ Shouldnt fault +		bpl	.Lc2u_1cpy8lp + +.Lc2u_1rem8lp:	tst	ip, #8 +		movne	r3, r7, pull #8 +		ldmneia	r1!, {r4, r7} +		orrne	r3, r3, r4, push #24 +		movne	r4, r4, pull #8 +		orrne	r4, r4, r7, push #24 +		stmneia	r0!, {r3 - r4}			@ Shouldnt fault +		tst	ip, #4 +		movne	r3, r7, pull #8 +		ldrne	r7, [r1], #4 +		orrne	r3, r3, r7, push #24 +	TUSER(	strne) r3, [r0], #4			@ Shouldnt fault +		ands	ip, ip, #3 +		beq	.Lc2u_1fupi +.Lc2u_1nowords:	mov	r3, r7, get_byte_1 +		teq	ip, #0 +		beq	.Lc2u_finished +		cmp	ip, #2 +USER(	TUSER(	strb)	r3, [r0], #1)			@ May fault +		movge	r3, r7, get_byte_2 +USER(	TUSER(	strgeb) r3, [r0], #1)			@ May fault +		movgt	r3, r7, get_byte_3 +USER(	TUSER(	strgtb) r3, [r0], #1)			@ May fault +		b	.Lc2u_finished + +.Lc2u_2fupi:	subs	r2, r2, #4 +		addmi	ip, r2, #4 +		bmi	.Lc2u_2nowords +		mov	r3, r7, pull #16 +		ldr	r7, [r1], #4 +		orr	r3, r3, r7, push #16 +USER(	TUSER(	str)	r3, [r0], #4)			@ May fault +		mov	ip, r0, lsl #32 - PAGE_SHIFT +		rsb	ip, ip, #0 +		movs	ip, ip, lsr #32 - PAGE_SHIFT +		beq	.Lc2u_2fupi +		cmp	r2, ip +		movlt	ip, r2 +		sub	r2, r2, ip +		subs	ip, ip, #16 +		blt	.Lc2u_2rem8lp + +.Lc2u_2cpy8lp:	mov	r3, r7, pull #16 +		ldmia	r1!, {r4 - r7} +		subs	ip, ip, #16 +		orr	r3, r3, r4, push #16 +		mov	r4, r4, pull #16 +		orr	r4, r4, r5, push #16 +		mov	r5, r5, pull #16 +		orr	r5, r5, r6, push #16 +		mov	r6, r6, pull #16 +		orr	r6, r6, r7, push #16 +		stmia	r0!, {r3 - r6}			@ Shouldnt fault +		bpl	.Lc2u_2cpy8lp + +.Lc2u_2rem8lp:	tst	ip, #8 +		movne	r3, r7, pull #16 +		ldmneia	r1!, {r4, r7} +		orrne	r3, r3, r4, push #16 +		movne	r4, r4, pull #16 +		orrne	r4, r4, r7, push #16 +		stmneia	r0!, {r3 - r4}			@ Shouldnt fault +		tst	ip, #4 +		movne	r3, r7, pull #16 +		ldrne	r7, [r1], #4 +		orrne	r3, r3, r7, push #16 +	TUSER(	strne) r3, [r0], #4			@ Shouldnt fault +		ands	ip, ip, #3 +		beq	.Lc2u_2fupi +.Lc2u_2nowords:	mov	r3, r7, get_byte_2 +		teq	ip, #0 +		beq	.Lc2u_finished +		cmp	ip, #2 +USER(	TUSER(	strb)	r3, [r0], #1)			@ May fault +		movge	r3, r7, get_byte_3 +USER(	TUSER(	strgeb) r3, [r0], #1)			@ May fault +		ldrgtb	r3, [r1], #0 +USER(	TUSER(	strgtb) r3, [r0], #1)			@ May fault +		b	.Lc2u_finished + +.Lc2u_3fupi:	subs	r2, r2, #4 +		addmi	ip, r2, #4 +		bmi	.Lc2u_3nowords +		mov	r3, r7, pull #24 +		ldr	r7, [r1], #4 +		orr	r3, r3, r7, push #8 +USER(	TUSER(	str)	r3, [r0], #4)			@ May fault +		mov	ip, r0, lsl #32 - PAGE_SHIFT +		rsb	ip, ip, #0 +		movs	ip, ip, lsr #32 - PAGE_SHIFT +		beq	.Lc2u_3fupi +		cmp	r2, ip +		movlt	ip, r2 +		sub	r2, r2, ip +		subs	ip, ip, #16 +		blt	.Lc2u_3rem8lp + +.Lc2u_3cpy8lp:	mov	r3, r7, pull #24 +		ldmia	r1!, {r4 - r7} +		subs	ip, ip, #16 +		orr	r3, r3, r4, push #8 +		mov	r4, r4, pull #24 +		orr	r4, r4, r5, push #8 +		mov	r5, r5, pull #24 +		orr	r5, r5, r6, push #8 +		mov	r6, r6, pull #24 +		orr	r6, r6, r7, push #8 +		stmia	r0!, {r3 - r6}			@ Shouldnt fault +		bpl	.Lc2u_3cpy8lp + +.Lc2u_3rem8lp:	tst	ip, #8 +		movne	r3, r7, pull #24 +		ldmneia	r1!, {r4, r7} +		orrne	r3, r3, r4, push #8 +		movne	r4, r4, pull #24 +		orrne	r4, r4, r7, push #8 +		stmneia	r0!, {r3 - r4}			@ Shouldnt fault +		tst	ip, #4 +		movne	r3, r7, pull #24 +		ldrne	r7, [r1], #4 +		orrne	r3, r3, r7, push #8 +	TUSER(	strne) r3, [r0], #4			@ Shouldnt fault +		ands	ip, ip, #3 +		beq	.Lc2u_3fupi +.Lc2u_3nowords:	mov	r3, r7, get_byte_3 +		teq	ip, #0 +		beq	.Lc2u_finished +		cmp	ip, #2 +USER(	TUSER(	strb)	r3, [r0], #1)			@ May fault +		ldrgeb	r3, [r1], #1 +USER(	TUSER(	strgeb) r3, [r0], #1)			@ May fault +		ldrgtb	r3, [r1], #0 +USER(	TUSER(	strgtb) r3, [r0], #1)			@ May fault +		b	.Lc2u_finished +ENDPROC(__copy_to_user) + +		.pushsection .fixup,"ax" +		.align	0 +9001:		ldmfd	sp!, {r0, r4 - r7, pc} +		.popsection + +/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n); + * Purpose  : copy a block from user memory to kernel memory + * Params   : to   - kernel memory + *          : from - user memory + *          : n    - number of bytes to copy + * Returns  : Number of bytes NOT copied. + */ +.Lcfu_dest_not_aligned: +		rsb	ip, ip, #4 +		cmp	ip, #2 +USER(	TUSER(	ldrb)	r3, [r1], #1)			@ May fault +		strb	r3, [r0], #1 +USER(	TUSER(	ldrgeb) r3, [r1], #1)			@ May fault +		strgeb	r3, [r0], #1 +USER(	TUSER(	ldrgtb) r3, [r1], #1)			@ May fault +		strgtb	r3, [r0], #1 +		sub	r2, r2, ip +		b	.Lcfu_dest_aligned + +ENTRY(__copy_from_user) +		stmfd	sp!, {r0, r2, r4 - r7, lr} +		cmp	r2, #4 +		blt	.Lcfu_not_enough +		ands	ip, r0, #3 +		bne	.Lcfu_dest_not_aligned +.Lcfu_dest_aligned: +		ands	ip, r1, #3 +		bne	.Lcfu_src_not_aligned + +/* + * Seeing as there has to be at least 8 bytes to copy, we can + * copy one word, and force a user-mode page fault... + */ + +.Lcfu_0fupi:	subs	r2, r2, #4 +		addmi	ip, r2, #4 +		bmi	.Lcfu_0nowords +USER(	TUSER(	ldr)	r3, [r1], #4) +		str	r3, [r0], #4 +		mov	ip, r1, lsl #32 - PAGE_SHIFT	@ On each page, use a ld/st??t instruction +		rsb	ip, ip, #0 +		movs	ip, ip, lsr #32 - PAGE_SHIFT +		beq	.Lcfu_0fupi +/* + * ip = max no. of bytes to copy before needing another "strt" insn + */ +		cmp	r2, ip +		movlt	ip, r2 +		sub	r2, r2, ip +		subs	ip, ip, #32 +		blt	.Lcfu_0rem8lp + +.Lcfu_0cpy8lp:	ldmia	r1!, {r3 - r6}			@ Shouldnt fault +		stmia	r0!, {r3 - r6} +		ldmia	r1!, {r3 - r6}			@ Shouldnt fault +		subs	ip, ip, #32 +		stmia	r0!, {r3 - r6} +		bpl	.Lcfu_0cpy8lp + +.Lcfu_0rem8lp:	cmn	ip, #16 +		ldmgeia	r1!, {r3 - r6}			@ Shouldnt fault +		stmgeia	r0!, {r3 - r6} +		tst	ip, #8 +		ldmneia	r1!, {r3 - r4}			@ Shouldnt fault +		stmneia	r0!, {r3 - r4} +		tst	ip, #4 +	TUSER(	ldrne) r3, [r1], #4			@ Shouldnt fault +		strne	r3, [r0], #4 +		ands	ip, ip, #3 +		beq	.Lcfu_0fupi +.Lcfu_0nowords:	teq	ip, #0 +		beq	.Lcfu_finished +.Lcfu_nowords:	cmp	ip, #2 +USER(	TUSER(	ldrb)	r3, [r1], #1)			@ May fault +		strb	r3, [r0], #1 +USER(	TUSER(	ldrgeb) r3, [r1], #1)			@ May fault +		strgeb	r3, [r0], #1 +USER(	TUSER(	ldrgtb) r3, [r1], #1)			@ May fault +		strgtb	r3, [r0], #1 +		b	.Lcfu_finished + +.Lcfu_not_enough: +		movs	ip, r2 +		bne	.Lcfu_nowords +.Lcfu_finished:	mov	r0, #0 +		add	sp, sp, #8 +		ldmfd	sp!, {r4 - r7, pc} + +.Lcfu_src_not_aligned: +		bic	r1, r1, #3 +USER(	TUSER(	ldr)	r7, [r1], #4)			@ May fault +		cmp	ip, #2 +		bgt	.Lcfu_3fupi +		beq	.Lcfu_2fupi +.Lcfu_1fupi:	subs	r2, r2, #4 +		addmi	ip, r2, #4 +		bmi	.Lcfu_1nowords +		mov	r3, r7, pull #8 +USER(	TUSER(	ldr)	r7, [r1], #4)			@ May fault +		orr	r3, r3, r7, push #24 +		str	r3, [r0], #4 +		mov	ip, r1, lsl #32 - PAGE_SHIFT +		rsb	ip, ip, #0 +		movs	ip, ip, lsr #32 - PAGE_SHIFT +		beq	.Lcfu_1fupi +		cmp	r2, ip +		movlt	ip, r2 +		sub	r2, r2, ip +		subs	ip, ip, #16 +		blt	.Lcfu_1rem8lp + +.Lcfu_1cpy8lp:	mov	r3, r7, pull #8 +		ldmia	r1!, {r4 - r7}			@ Shouldnt fault +		subs	ip, ip, #16 +		orr	r3, r3, r4, push #24 +		mov	r4, r4, pull #8 +		orr	r4, r4, r5, push #24 +		mov	r5, r5, pull #8 +		orr	r5, r5, r6, push #24 +		mov	r6, r6, pull #8 +		orr	r6, r6, r7, push #24 +		stmia	r0!, {r3 - r6} +		bpl	.Lcfu_1cpy8lp + +.Lcfu_1rem8lp:	tst	ip, #8 +		movne	r3, r7, pull #8 +		ldmneia	r1!, {r4, r7}			@ Shouldnt fault +		orrne	r3, r3, r4, push #24 +		movne	r4, r4, pull #8 +		orrne	r4, r4, r7, push #24 +		stmneia	r0!, {r3 - r4} +		tst	ip, #4 +		movne	r3, r7, pull #8 +USER(	TUSER(	ldrne) r7, [r1], #4)			@ May fault +		orrne	r3, r3, r7, push #24 +		strne	r3, [r0], #4 +		ands	ip, ip, #3 +		beq	.Lcfu_1fupi +.Lcfu_1nowords:	mov	r3, r7, get_byte_1 +		teq	ip, #0 +		beq	.Lcfu_finished +		cmp	ip, #2 +		strb	r3, [r0], #1 +		movge	r3, r7, get_byte_2 +		strgeb	r3, [r0], #1 +		movgt	r3, r7, get_byte_3 +		strgtb	r3, [r0], #1 +		b	.Lcfu_finished + +.Lcfu_2fupi:	subs	r2, r2, #4 +		addmi	ip, r2, #4 +		bmi	.Lcfu_2nowords +		mov	r3, r7, pull #16 +USER(	TUSER(	ldr)	r7, [r1], #4)			@ May fault +		orr	r3, r3, r7, push #16 +		str	r3, [r0], #4 +		mov	ip, r1, lsl #32 - PAGE_SHIFT +		rsb	ip, ip, #0 +		movs	ip, ip, lsr #32 - PAGE_SHIFT +		beq	.Lcfu_2fupi +		cmp	r2, ip +		movlt	ip, r2 +		sub	r2, r2, ip +		subs	ip, ip, #16 +		blt	.Lcfu_2rem8lp + + +.Lcfu_2cpy8lp:	mov	r3, r7, pull #16 +		ldmia	r1!, {r4 - r7}			@ Shouldnt fault +		subs	ip, ip, #16 +		orr	r3, r3, r4, push #16 +		mov	r4, r4, pull #16 +		orr	r4, r4, r5, push #16 +		mov	r5, r5, pull #16 +		orr	r5, r5, r6, push #16 +		mov	r6, r6, pull #16 +		orr	r6, r6, r7, push #16 +		stmia	r0!, {r3 - r6} +		bpl	.Lcfu_2cpy8lp + +.Lcfu_2rem8lp:	tst	ip, #8 +		movne	r3, r7, pull #16 +		ldmneia	r1!, {r4, r7}			@ Shouldnt fault +		orrne	r3, r3, r4, push #16 +		movne	r4, r4, pull #16 +		orrne	r4, r4, r7, push #16 +		stmneia	r0!, {r3 - r4} +		tst	ip, #4 +		movne	r3, r7, pull #16 +USER(	TUSER(	ldrne) r7, [r1], #4)			@ May fault +		orrne	r3, r3, r7, push #16 +		strne	r3, [r0], #4 +		ands	ip, ip, #3 +		beq	.Lcfu_2fupi +.Lcfu_2nowords:	mov	r3, r7, get_byte_2 +		teq	ip, #0 +		beq	.Lcfu_finished +		cmp	ip, #2 +		strb	r3, [r0], #1 +		movge	r3, r7, get_byte_3 +		strgeb	r3, [r0], #1 +USER(	TUSER(	ldrgtb) r3, [r1], #0)			@ May fault +		strgtb	r3, [r0], #1 +		b	.Lcfu_finished + +.Lcfu_3fupi:	subs	r2, r2, #4 +		addmi	ip, r2, #4 +		bmi	.Lcfu_3nowords +		mov	r3, r7, pull #24 +USER(	TUSER(	ldr)	r7, [r1], #4)			@ May fault +		orr	r3, r3, r7, push #8 +		str	r3, [r0], #4 +		mov	ip, r1, lsl #32 - PAGE_SHIFT +		rsb	ip, ip, #0 +		movs	ip, ip, lsr #32 - PAGE_SHIFT +		beq	.Lcfu_3fupi +		cmp	r2, ip +		movlt	ip, r2 +		sub	r2, r2, ip +		subs	ip, ip, #16 +		blt	.Lcfu_3rem8lp + +.Lcfu_3cpy8lp:	mov	r3, r7, pull #24 +		ldmia	r1!, {r4 - r7}			@ Shouldnt fault +		orr	r3, r3, r4, push #8 +		mov	r4, r4, pull #24 +		orr	r4, r4, r5, push #8 +		mov	r5, r5, pull #24 +		orr	r5, r5, r6, push #8 +		mov	r6, r6, pull #24 +		orr	r6, r6, r7, push #8 +		stmia	r0!, {r3 - r6} +		subs	ip, ip, #16 +		bpl	.Lcfu_3cpy8lp + +.Lcfu_3rem8lp:	tst	ip, #8 +		movne	r3, r7, pull #24 +		ldmneia	r1!, {r4, r7}			@ Shouldnt fault +		orrne	r3, r3, r4, push #8 +		movne	r4, r4, pull #24 +		orrne	r4, r4, r7, push #8 +		stmneia	r0!, {r3 - r4} +		tst	ip, #4 +		movne	r3, r7, pull #24 +USER(	TUSER(	ldrne) r7, [r1], #4)			@ May fault +		orrne	r3, r3, r7, push #8 +		strne	r3, [r0], #4 +		ands	ip, ip, #3 +		beq	.Lcfu_3fupi +.Lcfu_3nowords:	mov	r3, r7, get_byte_3 +		teq	ip, #0 +		beq	.Lcfu_finished +		cmp	ip, #2 +		strb	r3, [r0], #1 +USER(	TUSER(	ldrgeb) r3, [r1], #1)			@ May fault +		strgeb	r3, [r0], #1 +USER(	TUSER(	ldrgtb) r3, [r1], #1)			@ May fault +		strgtb	r3, [r0], #1 +		b	.Lcfu_finished +ENDPROC(__copy_from_user) + +		.pushsection .fixup,"ax" +		.align	0 +		/* +		 * We took an exception.  r0 contains a pointer to +		 * the byte not copied. +		 */ +9001:		ldr	r2, [sp], #4			@ void *to +		sub	r2, r0, r2			@ bytes copied +		ldr	r1, [sp], #4			@ unsigned long count +		subs	r4, r1, r2			@ bytes left to copy +		movne	r1, r4 +		blne	__memzero +		mov	r0, r4 +		ldmfd	sp!, {r4 - r7, pc} +		.popsection + diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c index 4db5de54b6a..6321567d8ea 100644 --- a/arch/arm/mach-dove/common.c +++ b/arch/arm/mach-dove/common.c @@ -102,7 +102,8 @@ void __init dove_ehci1_init(void)  void __init dove_ge00_init(struct mv643xx_eth_platform_data *eth_data)  {  	orion_ge00_init(eth_data, DOVE_GE00_PHYS_BASE, -			IRQ_DOVE_GE00_SUM, IRQ_DOVE_GE00_ERR); +			IRQ_DOVE_GE00_SUM, IRQ_DOVE_GE00_ERR, +			1600);  }  /***************************************************************************** diff --git a/arch/arm/mach-exynos/mach-origen.c b/arch/arm/mach-exynos/mach-origen.c index 5ca80307d6d..4e574c24581 100644 --- a/arch/arm/mach-exynos/mach-origen.c +++ b/arch/arm/mach-exynos/mach-origen.c @@ -42,6 +42,7 @@  #include <plat/backlight.h>  #include <plat/fb.h>  #include <plat/mfc.h> +#include <plat/hdmi.h>  #include <mach/ohci.h>  #include <mach/map.h> @@ -734,6 +735,11 @@ static void __init origen_bt_setup(void)  	s3c_gpio_setpull(EXYNOS4_GPX2(2), S3C_GPIO_PULL_NONE);  } +/* I2C module and id for HDMIPHY */ +static struct i2c_board_info hdmiphy_info = { +	I2C_BOARD_INFO("hdmiphy-exynos4210", 0x38), +}; +  static void s5p_tv_setup(void)  {  	/* Direct HPD to HDMI chip */ @@ -781,6 +787,7 @@ static void __init origen_machine_init(void)  	s5p_tv_setup();  	s5p_i2c_hdmiphy_set_platdata(NULL); +	s5p_hdmi_set_platdata(&hdmiphy_info, NULL, 0);  #ifdef CONFIG_DRM_EXYNOS  	s5p_device_fimd0.dev.platform_data = &drm_fimd_pdata; diff --git a/arch/arm/mach-exynos/mach-smdkv310.c b/arch/arm/mach-exynos/mach-smdkv310.c index 3cfa688d274..73f2bce097e 100644 --- a/arch/arm/mach-exynos/mach-smdkv310.c +++ b/arch/arm/mach-exynos/mach-smdkv310.c @@ -40,6 +40,7 @@  #include <plat/mfc.h>  #include <plat/ehci.h>  #include <plat/clock.h> +#include <plat/hdmi.h>  #include <mach/map.h>  #include <mach/ohci.h> @@ -354,6 +355,11 @@ static struct platform_pwm_backlight_data smdkv310_bl_data = {  	.pwm_period_ns  = 1000,  }; +/* I2C module and id for HDMIPHY */ +static struct i2c_board_info hdmiphy_info = { +	I2C_BOARD_INFO("hdmiphy-exynos4210", 0x38), +}; +  static void s5p_tv_setup(void)  {  	/* direct HPD to HDMI chip */ @@ -388,6 +394,7 @@ static void __init smdkv310_machine_init(void)  	s5p_tv_setup();  	s5p_i2c_hdmiphy_set_platdata(NULL); +	s5p_hdmi_set_platdata(&hdmiphy_info, NULL, 0);  	samsung_keypad_set_platdata(&smdkv310_keypad_data); diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile index 07f7c226e4c..d004d37ad9d 100644 --- a/arch/arm/mach-imx/Makefile +++ b/arch/arm/mach-imx/Makefile @@ -9,7 +9,8 @@ obj-$(CONFIG_SOC_IMX27) += clk-imx27.o mm-imx27.o ehci-imx27.o  obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clk-imx31.o iomux-imx31.o ehci-imx31.o pm-imx3.o  obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clk-imx35.o ehci-imx35.o pm-imx3.o -obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clk-imx51-imx53.o ehci-imx5.o pm-imx5.o cpu_op-mx51.o +imx5-pm-$(CONFIG_PM) += pm-imx5.o +obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clk-imx51-imx53.o ehci-imx5.o $(imx5-pm-y) cpu_op-mx51.o  obj-$(CONFIG_COMMON_CLK) += clk-pllv1.o clk-pllv2.o clk-pllv3.o clk-gate2.o \  			    clk-pfd.o clk-busy.o @@ -70,14 +71,13 @@ obj-$(CONFIG_DEBUG_LL) += lluart.o  obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o  obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o  obj-$(CONFIG_HAVE_IMX_SRC) += src.o -obj-$(CONFIG_CPU_V7) += head-v7.o -AFLAGS_head-v7.o :=-Wa,-march=armv7-a -obj-$(CONFIG_SMP) += platsmp.o +AFLAGS_headsmp.o :=-Wa,-march=armv7-a +obj-$(CONFIG_SMP) += headsmp.o platsmp.o  obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o  obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o  ifeq ($(CONFIG_PM),y) -obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o +obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o headsmp.o  endif  # i.MX5 based machines diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c index ea89520b6e2..4233d9e3531 100644 --- a/arch/arm/mach-imx/clk-imx6q.c +++ b/arch/arm/mach-imx/clk-imx6q.c @@ -152,7 +152,7 @@ enum mx6q_clks {  	ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3,  	usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg,  	pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg, -	ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2, +	ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2, ldb_di0_div_3_5, ldb_di1_div_3_5,  	clk_max  }; @@ -288,8 +288,10 @@ int __init mx6q_clocks_init(void)  	clk[gpu3d_shader]     = imx_clk_divider("gpu3d_shader",     "gpu3d_shader_sel",  base + 0x18, 29, 3);  	clk[ipu1_podf]        = imx_clk_divider("ipu1_podf",        "ipu1_sel",          base + 0x3c, 11, 3);  	clk[ipu2_podf]        = imx_clk_divider("ipu2_podf",        "ipu2_sel",          base + 0x3c, 16, 3); -	clk[ldb_di0_podf]     = imx_clk_divider("ldb_di0_podf",     "ldb_di0_sel",       base + 0x20, 10, 1); -	clk[ldb_di1_podf]     = imx_clk_divider("ldb_di1_podf",     "ldb_di1_sel",       base + 0x20, 11, 1); +	clk[ldb_di0_div_3_5]  = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7); +	clk[ldb_di0_podf]     = imx_clk_divider("ldb_di0_podf",     "ldb_di0_div_3_5",       base + 0x20, 10, 1); +	clk[ldb_di1_div_3_5]  = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7); +	clk[ldb_di1_podf]     = imx_clk_divider("ldb_di1_podf",     "ldb_di1_div_3_5",   base + 0x20, 11, 1);  	clk[ipu1_di0_pre]     = imx_clk_divider("ipu1_di0_pre",     "ipu1_di0_pre_sel",  base + 0x34, 3,  3);  	clk[ipu1_di1_pre]     = imx_clk_divider("ipu1_di1_pre",     "ipu1_di1_pre_sel",  base + 0x34, 12, 3);  	clk[ipu2_di0_pre]     = imx_clk_divider("ipu2_di0_pre",     "ipu2_di0_pre_sel",  base + 0x38, 3,  3); diff --git a/arch/arm/mach-imx/head-v7.S b/arch/arm/mach-imx/headsmp.S index 7e49deb128a..7e49deb128a 100644 --- a/arch/arm/mach-imx/head-v7.S +++ b/arch/arm/mach-imx/headsmp.S diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c index 20ed2d56c1a..f8f7437c83b 100644 --- a/arch/arm/mach-imx/hotplug.c +++ b/arch/arm/mach-imx/hotplug.c @@ -42,22 +42,6 @@ static inline void cpu_enter_lowpower(void)  	  : "cc");  } -static inline void cpu_leave_lowpower(void) -{ -	unsigned int v; - -	asm volatile( -		"mrc	p15, 0, %0, c1, c0, 0\n" -	"	orr	%0, %0, %1\n" -	"	mcr	p15, 0, %0, c1, c0, 0\n" -	"	mrc	p15, 0, %0, c1, c0, 1\n" -	"	orr	%0, %0, %2\n" -	"	mcr	p15, 0, %0, c1, c0, 1\n" -	  : "=&r" (v) -	  : "Ir" (CR_C), "Ir" (0x40) -	  : "cc"); -} -  /*   * platform-specific code to shutdown a CPU   * @@ -67,11 +51,10 @@ void platform_cpu_die(unsigned int cpu)  {  	cpu_enter_lowpower();  	imx_enable_cpu(cpu, false); -	cpu_do_idle(); -	cpu_leave_lowpower(); -	/* We should never return from idle */ -	panic("cpu %d unexpectedly exit from shutdown\n", cpu); +	/* spin here until hardware takes it down */ +	while (1) +		;  }  int platform_cpu_disable(unsigned int cpu) diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c index 5ec0608f2a7..045b3f6a387 100644 --- a/arch/arm/mach-imx/mach-imx6q.c +++ b/arch/arm/mach-imx/mach-imx6q.c @@ -71,7 +71,7 @@ soft:  /* For imx6q sabrelite board: set KSZ9021RN RGMII pad skew */  static int ksz9021rn_phy_fixup(struct phy_device *phydev)  { -	if (IS_ENABLED(CONFIG_PHYLIB)) { +	if (IS_BUILTIN(CONFIG_PHYLIB)) {  		/* min rx data delay */  		phy_write(phydev, 0x0b, 0x8105);  		phy_write(phydev, 0x0c, 0x0000); @@ -112,7 +112,7 @@ put_clk:  static void __init imx6q_sabrelite_init(void)  { -	if (IS_ENABLED(CONFIG_PHYLIB)) +	if (IS_BUILTIN(CONFIG_PHYLIB))  		phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK,  				ksz9021rn_phy_fixup);  	imx6q_sabrelite_cko1_setup(); diff --git a/arch/arm/mach-kirkwood/Makefile.boot b/arch/arm/mach-kirkwood/Makefile.boot index a5717558ee8..a13299d758e 100644 --- a/arch/arm/mach-kirkwood/Makefile.boot +++ b/arch/arm/mach-kirkwood/Makefile.boot @@ -7,7 +7,8 @@ dtb-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += kirkwood-dns320.dtb  dtb-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += kirkwood-dns325.dtb  dtb-$(CONFIG_MACH_ICONNECT_DT) += kirkwood-iconnect.dtb  dtb-$(CONFIG_MACH_IB62X0_DT) += kirkwood-ib62x0.dtb -dtb-$(CONFIG_MACH_TS219_DT)	+= kirkwood-qnap-ts219.dtb +dtb-$(CONFIG_MACH_TS219_DT)	+= kirkwood-ts219-6281.dtb +dtb-$(CONFIG_MACH_TS219_DT)	+= kirkwood-ts219-6282.dtb  dtb-$(CONFIG_MACH_GOFLEXNET_DT) += kirkwood-goflexnet.dtb  dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lschlv2.dtb  dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lsxhl.dtb diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c index c4b64adcbfc..3226077735b 100644 --- a/arch/arm/mach-kirkwood/common.c +++ b/arch/arm/mach-kirkwood/common.c @@ -301,7 +301,7 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)  {  	orion_ge00_init(eth_data,  			GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM, -			IRQ_KIRKWOOD_GE00_ERR); +			IRQ_KIRKWOOD_GE00_ERR, 1600);  	/* The interface forgets the MAC address assigned by u-boot if  	the clock is turned off, so claim the clk now. */  	clk_prepare_enable(ge0); @@ -315,7 +315,7 @@ void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data)  {  	orion_ge01_init(eth_data,  			GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM, -			IRQ_KIRKWOOD_GE01_ERR); +			IRQ_KIRKWOOD_GE01_ERR, 1600);  	clk_prepare_enable(ge1);  } diff --git a/arch/arm/mach-mmp/sram.c b/arch/arm/mach-mmp/sram.c index 4304f951937..7e8a5a2e1ec 100644 --- a/arch/arm/mach-mmp/sram.c +++ b/arch/arm/mach-mmp/sram.c @@ -68,7 +68,7 @@ static int __devinit sram_probe(struct platform_device *pdev)  	struct resource *res;  	int ret = 0; -	if (!pdata && !pdata->pool_name) +	if (!pdata || !pdata->pool_name)  		return -ENODEV;  	info = kzalloc(sizeof(*info), GFP_KERNEL); diff --git a/arch/arm/mach-mv78xx0/addr-map.c b/arch/arm/mach-mv78xx0/addr-map.c index 62b53d710ef..a9bc84180d2 100644 --- a/arch/arm/mach-mv78xx0/addr-map.c +++ b/arch/arm/mach-mv78xx0/addr-map.c @@ -37,7 +37,7 @@  #define WIN0_OFF(n)		(BRIDGE_VIRT_BASE + 0x0000 + ((n) << 4))  #define WIN8_OFF(n)		(BRIDGE_VIRT_BASE + 0x0900 + (((n) - 8) << 4)) -static void __init __iomem *win_cfg_base(int win) +static void __init __iomem *win_cfg_base(const struct orion_addr_map_cfg *cfg, int win)  {  	/*  	 * Find the control register base address for this window. diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c index b4c53b846c9..3057f7d4329 100644 --- a/arch/arm/mach-mv78xx0/common.c +++ b/arch/arm/mach-mv78xx0/common.c @@ -213,7 +213,8 @@ void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data)  {  	orion_ge00_init(eth_data,  			GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM, -			IRQ_MV78XX0_GE_ERR); +			IRQ_MV78XX0_GE_ERR, +			MV643XX_TX_CSUM_DEFAULT_LIMIT);  } @@ -224,7 +225,8 @@ void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data)  {  	orion_ge01_init(eth_data,  			GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM, -			NO_IRQ); +			NO_IRQ, +			MV643XX_TX_CSUM_DEFAULT_LIMIT);  } diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index dd2db025f77..fcd4e85c4dd 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -62,13 +62,14 @@ config ARCH_OMAP4  	select PM_OPP if PM  	select USB_ARCH_HAS_EHCI if USB_SUPPORT  	select ARM_CPU_SUSPEND if PM -	select ARCH_NEEDS_CPU_IDLE_COUPLED +	select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP  config SOC_OMAP5  	bool "TI OMAP5"  	select CPU_V7  	select ARM_GIC  	select HAVE_SMP +	select ARM_CPU_SUSPEND if PM  comment "OMAP Core Type"  	depends on ARCH_OMAP2 diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c index 74915295482..28214483aab 100644 --- a/arch/arm/mach-omap2/board-igep0020.c +++ b/arch/arm/mach-omap2/board-igep0020.c @@ -554,6 +554,8 @@ static const struct usbhs_omap_board_data igep3_usbhs_bdata __initconst = {  #ifdef CONFIG_OMAP_MUX  static struct omap_board_mux board_mux[] __initdata = { +	/* SMSC9221 LAN Controller ETH IRQ (GPIO_176) */ +	OMAP3_MUX(MCSPI1_CS2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),  	{ .reg_offset = OMAP_MUX_TERMINATOR },  };  #endif diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c index ef230a0eb5e..0d362e9f9cb 100644 --- a/arch/arm/mach-omap2/board-omap3evm.c +++ b/arch/arm/mach-omap2/board-omap3evm.c @@ -58,6 +58,7 @@  #include "hsmmc.h"  #include "common-board-devices.h" +#define OMAP3_EVM_TS_GPIO	175  #define OMAP3_EVM_EHCI_VBUS	22  #define OMAP3_EVM_EHCI_SELECT	61 diff --git a/arch/arm/mach-omap2/common-board-devices.c b/arch/arm/mach-omap2/common-board-devices.c index 14734746457..c1875862679 100644 --- a/arch/arm/mach-omap2/common-board-devices.c +++ b/arch/arm/mach-omap2/common-board-devices.c @@ -35,16 +35,6 @@ static struct omap2_mcspi_device_config ads7846_mcspi_config = {  	.turbo_mode	= 0,  }; -/* - * ADS7846 driver maybe request a gpio according to the value - * of pdata->get_pendown_state, but we have done this. So set - * get_pendown_state to avoid twice gpio requesting. - */ -static int omap3_get_pendown_state(void) -{ -	return !gpio_get_value(OMAP3_EVM_TS_GPIO); -} -  static struct ads7846_platform_data ads7846_config = {  	.x_max			= 0x0fff,  	.y_max			= 0x0fff, @@ -55,7 +45,6 @@ static struct ads7846_platform_data ads7846_config = {  	.debounce_rep		= 1,  	.gpio_pendown		= -EINVAL,  	.keep_vref_on		= 1, -	.get_pendown_state	= &omap3_get_pendown_state,  };  static struct spi_board_info ads7846_spi_board_info __initdata = { diff --git a/arch/arm/mach-omap2/common-board-devices.h b/arch/arm/mach-omap2/common-board-devices.h index 4c4ef6a6166..a0b4a42836a 100644 --- a/arch/arm/mach-omap2/common-board-devices.h +++ b/arch/arm/mach-omap2/common-board-devices.h @@ -4,7 +4,6 @@  #include "twl-common.h"  #define NAND_BLOCK_SIZE	SZ_128K -#define OMAP3_EVM_TS_GPIO	175  struct mtd_partition;  struct ads7846_platform_data; diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index ee05e193fc6..288bee6cbb7 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c @@ -238,8 +238,9 @@ int __init omap4_idle_init(void)  	for_each_cpu(cpu_id, cpu_online_mask) {  		dev = &per_cpu(omap4_idle_dev, cpu_id);  		dev->cpu = cpu_id; +#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED  		dev->coupled_cpus = *cpu_online_mask; - +#endif  		cpuidle_register_driver(&omap4_idle_driver);  		if (cpuidle_register_device(dev)) { diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h index 471e62a74a1..76f9b3c2f58 100644 --- a/arch/arm/mach-omap2/mux.h +++ b/arch/arm/mach-omap2/mux.h @@ -127,7 +127,6 @@ struct omap_mux_partition {   * @gpio:	GPIO number   * @muxnames:	available signal modes for a ball   * @balls:	available balls on the package - * @partition:	mux partition   */  struct omap_mux {  	u16	reg_offset; diff --git a/arch/arm/mach-omap2/opp4xxx_data.c b/arch/arm/mach-omap2/opp4xxx_data.c index 2293ba27101..c95415da23c 100644 --- a/arch/arm/mach-omap2/opp4xxx_data.c +++ b/arch/arm/mach-omap2/opp4xxx_data.c @@ -94,7 +94,7 @@ int __init omap4_opp_init(void)  {  	int r = -ENODEV; -	if (!cpu_is_omap44xx()) +	if (!cpu_is_omap443x())  		return r;  	r = omap_init_opp_table(omap44xx_opp_def_list, diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index e4fc88c65db..05bd8f02723 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -272,21 +272,16 @@ void omap_sram_idle(void)  	per_next_state = pwrdm_read_next_pwrst(per_pwrdm);  	core_next_state = pwrdm_read_next_pwrst(core_pwrdm); -	if (mpu_next_state < PWRDM_POWER_ON) { -		pwrdm_pre_transition(mpu_pwrdm); -		pwrdm_pre_transition(neon_pwrdm); -	} +	pwrdm_pre_transition(NULL);  	/* PER */  	if (per_next_state < PWRDM_POWER_ON) { -		pwrdm_pre_transition(per_pwrdm);  		per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0;  		omap2_gpio_prepare_for_idle(per_going_off);  	}  	/* CORE */  	if (core_next_state < PWRDM_POWER_ON) { -		pwrdm_pre_transition(core_pwrdm);  		if (core_next_state == PWRDM_POWER_OFF) {  			omap3_core_save_context();  			omap3_cm_save_context(); @@ -339,20 +334,14 @@ void omap_sram_idle(void)  			omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,  					       OMAP3430_GR_MOD,  					       OMAP3_PRM_VOLTCTRL_OFFSET); -		pwrdm_post_transition(core_pwrdm);  	}  	omap3_intc_resume_idle(); +	pwrdm_post_transition(NULL); +  	/* PER */ -	if (per_next_state < PWRDM_POWER_ON) { +	if (per_next_state < PWRDM_POWER_ON)  		omap2_gpio_resume_after_idle(); -		pwrdm_post_transition(per_pwrdm); -	} - -	if (mpu_next_state < PWRDM_POWER_ON) { -		pwrdm_post_transition(mpu_pwrdm); -		pwrdm_post_transition(neon_pwrdm); -	}  }  static void omap3_pm_idle(void) diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S index 9f6b83d1b19..91e71d8f46f 100644 --- a/arch/arm/mach-omap2/sleep44xx.S +++ b/arch/arm/mach-omap2/sleep44xx.S @@ -56,9 +56,13 @@ ppa_por_params:   * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.   * It returns to the caller for CPU INACTIVE and ON power states or in case   * CPU failed to transition to targeted OFF/DORMANT state. + * + * omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save + * stack frame and it expects the caller to take care of it. Hence the entire + * stack frame is saved to avoid possible stack corruption.   */  ENTRY(omap4_finish_suspend) -	stmfd	sp!, {lr} +	stmfd	sp!, {r4-r12, lr}  	cmp	r0, #0x0  	beq	do_WFI				@ No lowpower state, jump to WFI @@ -226,7 +230,7 @@ scu_gp_clear:  skip_scu_gp_clear:  	isb  	dsb -	ldmfd	sp!, {pc} +	ldmfd	sp!, {r4-r12, pc}  ENDPROC(omap4_finish_suspend)  /* diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c index de47f170ba5..db5ff664237 100644 --- a/arch/arm/mach-omap2/twl-common.c +++ b/arch/arm/mach-omap2/twl-common.c @@ -67,6 +67,7 @@ void __init omap_pmic_init(int bus, u32 clkrate,  			   const char *pmic_type, int pmic_irq,  			   struct twl4030_platform_data *pmic_data)  { +	omap_mux_init_signal("sys_nirq", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);  	strncpy(pmic_i2c_board_info.type, pmic_type,  		sizeof(pmic_i2c_board_info.type));  	pmic_i2c_board_info.irq = pmic_irq; diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c index 9148b229d0d..410291c6766 100644 --- a/arch/arm/mach-orion5x/common.c +++ b/arch/arm/mach-orion5x/common.c @@ -109,7 +109,8 @@ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data)  {  	orion_ge00_init(eth_data,  			ORION5X_ETH_PHYS_BASE, IRQ_ORION5X_ETH_SUM, -			IRQ_ORION5X_ETH_ERR); +			IRQ_ORION5X_ETH_ERR, +			MV643XX_TX_CSUM_DEFAULT_LIMIT);  } diff --git a/arch/arm/mach-s3c24xx/include/mach/dma.h b/arch/arm/mach-s3c24xx/include/mach/dma.h index 454831b6603..ee99fd56c04 100644 --- a/arch/arm/mach-s3c24xx/include/mach/dma.h +++ b/arch/arm/mach-s3c24xx/include/mach/dma.h @@ -24,7 +24,8 @@  */  enum dma_ch { -	DMACH_XD0, +	DMACH_DT_PROP = -1,	/* not yet supported, do not use */ +	DMACH_XD0 = 0,  	DMACH_XD1,  	DMACH_SDI,  	DMACH_SPI0, diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig index c013bbf79ca..53d3d46dec1 100644 --- a/arch/arm/mach-ux500/Kconfig +++ b/arch/arm/mach-ux500/Kconfig @@ -41,7 +41,6 @@ config MACH_HREFV60  config MACH_SNOWBALL  	bool "U8500 Snowball platform"  	select MACH_MOP500 -	select LEDS_GPIO  	help  	  Include support for the snowball development platform. diff --git a/arch/arm/mach-ux500/board-mop500-msp.c b/arch/arm/mach-ux500/board-mop500-msp.c index 99604803874..df15646036a 100644 --- a/arch/arm/mach-ux500/board-mop500-msp.c +++ b/arch/arm/mach-ux500/board-mop500-msp.c @@ -191,9 +191,9 @@ static struct platform_device *db8500_add_msp_i2s(struct device *parent,  	return pdev;  } -/* Platform device for ASoC U8500 machine */ -static struct platform_device snd_soc_u8500 = { -		.name = "snd-soc-u8500", +/* Platform device for ASoC MOP500 machine */ +static struct platform_device snd_soc_mop500 = { +		.name = "snd-soc-mop500",  		.id = 0,  		.dev = {  			.platform_data = NULL, @@ -227,8 +227,8 @@ int mop500_msp_init(struct device *parent)  {  	struct platform_device *msp1; -	pr_info("%s: Register platform-device 'snd-soc-u8500'.\n", __func__); -	platform_device_register(&snd_soc_u8500); +	pr_info("%s: Register platform-device 'snd-soc-mop500'.\n", __func__); +	platform_device_register(&snd_soc_mop500);  	pr_info("Initialize MSP I2S-devices.\n");  	db8500_add_msp_i2s(parent, 0, U8500_MSP0_BASE, IRQ_DB8500_MSP0, diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index 8674a890fd1..a534d8880de 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c @@ -797,6 +797,7 @@ static void __init u8500_init_machine(void)  				ARRAY_SIZE(mop500_platform_devs));  		mop500_sdi_init(parent); +		mop500_msp_init(parent);  		i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);  		i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);  		i2c_register_board_info(2, mop500_i2c2_devices, @@ -804,6 +805,8 @@ static void __init u8500_init_machine(void)  		mop500_uib_init(); +	} else if (of_machine_is_compatible("calaosystems,snowball-a9500")) { +		mop500_msp_init(parent);  	} else if (of_machine_is_compatible("st-ericsson,hrefv60+")) {  		/*  		 * The HREFv60 board removed a GPIO expander and routed @@ -815,6 +818,7 @@ static void __init u8500_init_machine(void)  				ARRAY_SIZE(mop500_platform_devs));  		hrefv60_sdi_init(parent); +		mop500_msp_init(parent);  		i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);  		i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES; diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 77458548e03..40ca11ed6e5 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -231,8 +231,6 @@ void __sync_icache_dcache(pte_t pteval)  	struct page *page;  	struct address_space *mapping; -	if (!pte_present_user(pteval)) -		return;  	if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))  		/* only flush non-aliasing VIPT caches for exec mappings */  		return; diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index c2021139cb5..ea94765acf9 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S @@ -38,10 +38,10 @@ ENTRY(v7wbi_flush_user_tlb_range)  	dsb  	mov	r0, r0, lsr #PAGE_SHIFT		@ align address  	mov	r1, r1, lsr #PAGE_SHIFT -#ifdef CONFIG_ARM_ERRATA_720789 -	mov	r3, #0 -#else  	asid	r3, r3				@ mask ASID +#ifdef CONFIG_ARM_ERRATA_720789 +	ALT_SMP(W(mov)	r3, #0	) +	ALT_UP(W(nop)		)  #endif  	orr	r0, r3, r0, lsl #PAGE_SHIFT	@ Create initial MVA  	mov	r1, r1, lsl #PAGE_SHIFT diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c index 626ad8cad7a..938b50a3343 100644 --- a/arch/arm/plat-omap/dmtimer.c +++ b/arch/arm/plat-omap/dmtimer.c @@ -189,6 +189,7 @@ struct omap_dm_timer *omap_dm_timer_request(void)  		timer->reserved = 1;  		break;  	} +	spin_unlock_irqrestore(&dm_timer_lock, flags);  	if (timer) {  		ret = omap_dm_timer_prepare(timer); @@ -197,7 +198,6 @@ struct omap_dm_timer *omap_dm_timer_request(void)  			timer = NULL;  		}  	} -	spin_unlock_irqrestore(&dm_timer_lock, flags);  	if (!timer)  		pr_debug("%s: timer request failed!\n", __func__); @@ -220,6 +220,7 @@ struct omap_dm_timer *omap_dm_timer_request_specific(int id)  			break;  		}  	} +	spin_unlock_irqrestore(&dm_timer_lock, flags);  	if (timer) {  		ret = omap_dm_timer_prepare(timer); @@ -228,7 +229,6 @@ struct omap_dm_timer *omap_dm_timer_request_specific(int id)  			timer = NULL;  		}  	} -	spin_unlock_irqrestore(&dm_timer_lock, flags);  	if (!timer)  		pr_debug("%s: timer%d request failed!\n", __func__, id); @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_enable);  void omap_dm_timer_disable(struct omap_dm_timer *timer)  { -	pm_runtime_put(&timer->pdev->dev); +	pm_runtime_put_sync(&timer->pdev->dev);  }  EXPORT_SYMBOL_GPL(omap_dm_timer_disable); diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h index 68b180edcff..bb5d08a70db 100644 --- a/arch/arm/plat-omap/include/plat/cpu.h +++ b/arch/arm/plat-omap/include/plat/cpu.h @@ -372,7 +372,8 @@ IS_OMAP_TYPE(3430, 0x3430)  #define cpu_class_is_omap1()	(cpu_is_omap7xx() || cpu_is_omap15xx() || \  				cpu_is_omap16xx())  #define cpu_class_is_omap2()	(cpu_is_omap24xx() || cpu_is_omap34xx() || \ -				cpu_is_omap44xx() || soc_is_omap54xx()) +				cpu_is_omap44xx() || soc_is_omap54xx() || \ +				soc_is_am33xx())  /* Various silicon revisions for omap2 */  #define OMAP242X_CLASS		0x24200024 diff --git a/arch/arm/plat-omap/include/plat/multi.h b/arch/arm/plat-omap/include/plat/multi.h index 045e320f106..324d31b1485 100644 --- a/arch/arm/plat-omap/include/plat/multi.h +++ b/arch/arm/plat-omap/include/plat/multi.h @@ -108,4 +108,13 @@  # endif  #endif +#ifdef CONFIG_SOC_AM33XX +# ifdef OMAP_NAME +#  undef  MULTI_OMAP2 +#  define MULTI_OMAP2 +# else +#  define OMAP_NAME am33xx +# endif +#endif +  #endif	/* __PLAT_OMAP_MULTI_H */ diff --git a/arch/arm/plat-omap/include/plat/uncompress.h b/arch/arm/plat-omap/include/plat/uncompress.h index b8d19a13678..7f7b112accc 100644 --- a/arch/arm/plat-omap/include/plat/uncompress.h +++ b/arch/arm/plat-omap/include/plat/uncompress.h @@ -110,7 +110,7 @@ static inline void flush(void)  	_DEBUG_LL_ENTRY(mach, AM33XX_UART##p##_BASE, OMAP_PORT_SHIFT,	\  		AM33XXUART##p) -static inline void __arch_decomp_setup(unsigned long arch_id) +static inline void arch_decomp_setup(void)  {  	int port = 0; @@ -198,8 +198,6 @@ static inline void __arch_decomp_setup(unsigned long arch_id)  	} while (0);  } -#define arch_decomp_setup()	__arch_decomp_setup(arch_id) -  /*   * nothing to do   */ diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index d245a87dc01..b8b747a9d36 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c @@ -291,10 +291,12 @@ static struct platform_device orion_ge00 = {  void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,  			    unsigned long mapbase,  			    unsigned long irq, -			    unsigned long irq_err) +			    unsigned long irq_err, +			    unsigned int tx_csum_limit)  {  	fill_resources(&orion_ge00_shared, orion_ge00_shared_resources,  		       mapbase + 0x2000, SZ_16K - 1, irq_err); +	orion_ge00_shared_data.tx_csum_limit = tx_csum_limit;  	ge_complete(&orion_ge00_shared_data,  		    orion_ge00_resources, irq, &orion_ge00_shared,  		    eth_data, &orion_ge00); @@ -343,10 +345,12 @@ static struct platform_device orion_ge01 = {  void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,  			    unsigned long mapbase,  			    unsigned long irq, -			    unsigned long irq_err) +			    unsigned long irq_err, +			    unsigned int tx_csum_limit)  {  	fill_resources(&orion_ge01_shared, orion_ge01_shared_resources,  		       mapbase + 0x2000, SZ_16K - 1, irq_err); +	orion_ge01_shared_data.tx_csum_limit = tx_csum_limit;  	ge_complete(&orion_ge01_shared_data,  		    orion_ge01_resources, irq, &orion_ge01_shared,  		    eth_data, &orion_ge01); diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h index e00fdb21360..ae2377ef63e 100644 --- a/arch/arm/plat-orion/include/plat/common.h +++ b/arch/arm/plat-orion/include/plat/common.h @@ -39,12 +39,14 @@ void __init orion_rtc_init(unsigned long mapbase,  void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,  			    unsigned long mapbase,  			    unsigned long irq, -			    unsigned long irq_err); +			    unsigned long irq_err, +			    unsigned int tx_csum_limit);  void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,  			    unsigned long mapbase,  			    unsigned long irq, -			    unsigned long irq_err); +			    unsigned long irq_err, +			    unsigned int tx_csum_limit);  void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,  			    unsigned long mapbase, diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c index 28f898f7538..db98e7021f0 100644 --- a/arch/arm/plat-s3c24xx/dma.c +++ b/arch/arm/plat-s3c24xx/dma.c @@ -430,7 +430,7 @@ s3c2410_dma_canload(struct s3c2410_dma_chan *chan)   * when necessary.  */ -int s3c2410_dma_enqueue(unsigned int channel, void *id, +int s3c2410_dma_enqueue(enum dma_ch channel, void *id,  			dma_addr_t data, int size)  {  	struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c index 74e31ce3553..fc49f3dabd7 100644 --- a/arch/arm/plat-samsung/devs.c +++ b/arch/arm/plat-samsung/devs.c @@ -32,6 +32,8 @@  #include <linux/platform_data/s3c-hsudc.h>  #include <linux/platform_data/s3c-hsotg.h> +#include <media/s5p_hdmi.h> +  #include <asm/irq.h>  #include <asm/pmu.h>  #include <asm/mach/arch.h> @@ -748,7 +750,8 @@ void __init s5p_i2c_hdmiphy_set_platdata(struct s3c2410_platform_i2c *pd)  	if (!pd) {  		pd = &default_i2c_data; -		if (soc_is_exynos4210()) +		if (soc_is_exynos4210() || +		    soc_is_exynos4212() || soc_is_exynos4412())  			pd->bus_num = 8;  		else if (soc_is_s5pv210())  			pd->bus_num = 3; @@ -759,6 +762,30 @@ void __init s5p_i2c_hdmiphy_set_platdata(struct s3c2410_platform_i2c *pd)  	npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c),  			       &s5p_device_i2c_hdmiphy);  } + +struct s5p_hdmi_platform_data s5p_hdmi_def_platdata; + +void __init s5p_hdmi_set_platdata(struct i2c_board_info *hdmiphy_info, +				  struct i2c_board_info *mhl_info, int mhl_bus) +{ +	struct s5p_hdmi_platform_data *pd = &s5p_hdmi_def_platdata; + +	if (soc_is_exynos4210() || +	    soc_is_exynos4212() || soc_is_exynos4412()) +		pd->hdmiphy_bus = 8; +	else if (soc_is_s5pv210()) +		pd->hdmiphy_bus = 3; +	else +		pd->hdmiphy_bus = 0; + +	pd->hdmiphy_info = hdmiphy_info; +	pd->mhl_info = mhl_info; +	pd->mhl_bus = mhl_bus; + +	s3c_set_platdata(pd, sizeof(struct s5p_hdmi_platform_data), +			 &s5p_device_hdmi); +} +  #endif /* CONFIG_S5P_DEV_I2C_HDMIPHY */  /* I2S */ diff --git a/arch/arm/plat-samsung/include/plat/hdmi.h b/arch/arm/plat-samsung/include/plat/hdmi.h new file mode 100644 index 00000000000..331d046ac2c --- /dev/null +++ b/arch/arm/plat-samsung/include/plat/hdmi.h @@ -0,0 +1,16 @@ +/* + * Copyright (C) 2012 Samsung Electronics Co.Ltd + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#ifndef __PLAT_SAMSUNG_HDMI_H +#define __PLAT_SAMSUNG_HDMI_H __FILE__ + +extern void s5p_hdmi_set_platdata(struct i2c_board_info *hdmiphy_info, +				  struct i2c_board_info *mhl_info, int mhl_bus); + +#endif /* __PLAT_SAMSUNG_HDMI_H */ diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c index 64ab65f0fdb..15070284343 100644 --- a/arch/arm/plat-samsung/pm.c +++ b/arch/arm/plat-samsung/pm.c @@ -74,7 +74,7 @@ unsigned char pm_uart_udivslot;  #ifdef CONFIG_SAMSUNG_PM_DEBUG -struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS]; +static struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS];  static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save)  { diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index fb849d044bd..c834b32af27 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -719,8 +719,10 @@ static int __init vfp_init(void)  			if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)  				elf_hwcap |= HWCAP_NEON;  #endif +#ifdef CONFIG_VFPv3  			if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)  				elf_hwcap |= HWCAP_VFPv4; +#endif  		}  	}  	return 0; diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig index 052f81a7623..983c859e40b 100644 --- a/arch/c6x/Kconfig +++ b/arch/c6x/Kconfig @@ -6,6 +6,7 @@  config C6X  	def_bool y  	select CLKDEV_LOOKUP +	select GENERIC_ATOMIC64  	select GENERIC_IRQ_SHOW  	select HAVE_ARCH_TRACEHOOK  	select HAVE_DMA_API_DEBUG diff --git a/arch/c6x/include/asm/cache.h b/arch/c6x/include/asm/cache.h index 6d521d96d94..09c5a0f5f4d 100644 --- a/arch/c6x/include/asm/cache.h +++ b/arch/c6x/include/asm/cache.h @@ -1,7 +1,7 @@  /*   *  Port on Texas Instruments TMS320C6x architecture   * - *  Copyright (C) 2005, 2006, 2009, 2010 Texas Instruments Incorporated + *  Copyright (C) 2005, 2006, 2009, 2010, 2012 Texas Instruments Incorporated   *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)   *   *  This program is free software; you can redistribute it and/or modify @@ -16,9 +16,14 @@  /*   * Cache line size   */ -#define L1D_CACHE_BYTES   64 -#define L1P_CACHE_BYTES   32 -#define L2_CACHE_BYTES	  128 +#define L1D_CACHE_SHIFT   6 +#define L1D_CACHE_BYTES   (1 << L1D_CACHE_SHIFT) + +#define L1P_CACHE_SHIFT   5 +#define L1P_CACHE_BYTES   (1 << L1P_CACHE_SHIFT) + +#define L2_CACHE_SHIFT    7 +#define L2_CACHE_BYTES    (1 << L2_CACHE_SHIFT)  /*   * L2 used as cache @@ -29,7 +34,8 @@   * For practical reasons the L1_CACHE_BYTES defines should not be smaller than   * the L2 line size   */ -#define L1_CACHE_BYTES        L2_CACHE_BYTES +#define L1_CACHE_SHIFT        L2_CACHE_SHIFT +#define L1_CACHE_BYTES        (1 << L1_CACHE_SHIFT)  #define L2_CACHE_ALIGN_LOW(x) \  	(((x) & ~(L2_CACHE_BYTES - 1))) diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig index 954d81e2e83..7913695b2fc 100644 --- a/arch/ia64/configs/generic_defconfig +++ b/arch/ia64/configs/generic_defconfig @@ -234,5 +234,4 @@ CONFIG_CRYPTO_PCBC=m  CONFIG_CRYPTO_MD5=y  # CONFIG_CRYPTO_ANSI_CPRNG is not set  CONFIG_CRC_T10DIF=y -CONFIG_MISC_DEVICES=y  CONFIG_INTEL_IOMMU=y diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig index 91c41ecfa6d..f8e91336542 100644 --- a/arch/ia64/configs/gensparse_defconfig +++ b/arch/ia64/configs/gensparse_defconfig @@ -209,4 +209,3 @@ CONFIG_MAGIC_SYSRQ=y  CONFIG_DEBUG_KERNEL=y  CONFIG_DEBUG_MUTEXES=y  CONFIG_CRYPTO_MD5=y -CONFIG_MISC_DEVICES=y diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 4a469907f04..b22df9410dc 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -5,6 +5,7 @@ config M68K  	select HAVE_AOUT if MMU  	select HAVE_GENERIC_HARDIRQS  	select GENERIC_IRQ_SHOW +	select GENERIC_ATOMIC64  	select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS  	select GENERIC_CPU_DEVICES  	select GENERIC_STRNCPY_FROM_USER if MMU diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu index 82068349a2b..c4eb79edece 100644 --- a/arch/m68k/Kconfig.cpu +++ b/arch/m68k/Kconfig.cpu @@ -28,6 +28,7 @@ config COLDFIRE  	select CPU_HAS_NO_BITFIELDS  	select CPU_HAS_NO_MULDIV64  	select GENERIC_CSUM +	select HAVE_CLK  endchoice @@ -58,7 +59,6 @@ config MCPU32  config M68020  	bool "68020 support"  	depends on MMU -	select GENERIC_ATOMIC64  	select CPU_HAS_ADDRESS_SPACES  	help  	  If you anticipate running this kernel on a computer with a MC68020 @@ -69,7 +69,6 @@ config M68020  config M68030  	bool "68030 support"  	depends on MMU && !MMU_SUN3 -	select GENERIC_ATOMIC64  	select CPU_HAS_ADDRESS_SPACES  	help  	  If you anticipate running this kernel on a computer with a MC68030 @@ -79,7 +78,6 @@ config M68030  config M68040  	bool "68040 support"  	depends on MMU && !MMU_SUN3 -	select GENERIC_ATOMIC64  	select CPU_HAS_ADDRESS_SPACES  	help  	  If you anticipate running this kernel on a computer with a MC68LC040 @@ -90,7 +88,6 @@ config M68040  config M68060  	bool "68060 support"  	depends on MMU && !MMU_SUN3 -	select GENERIC_ATOMIC64  	select CPU_HAS_ADDRESS_SPACES  	help  	  If you anticipate running this kernel on a computer with a MC68060 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 331d574df99..faf65286574 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -89,6 +89,7 @@ config ATH79  	select CEVT_R4K  	select CSRC_R4K  	select DMA_NONCOHERENT +	select HAVE_CLK  	select IRQ_CPU  	select MIPS_MACHINE  	select SYS_HAS_CPU_MIPS32_R2 diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c index 99969484c47..a124c251c0c 100644 --- a/arch/mips/alchemy/board-mtx1.c +++ b/arch/mips/alchemy/board-mtx1.c @@ -228,6 +228,8 @@ static int mtx1_pci_idsel(unsigned int devsel, int assert)  	 * adapter on the mtx-1 "singleboard" variant. It triggers a custom  	 * logic chip connected to EXT_IO3 (GPIO1) to suppress IDSEL signals.  	 */ +	udelay(1); +  	if (assert && devsel != 0)  		/* Suppress signal to Cardbus */  		alchemy_gpio_set_value(1, 0);	/* set EXT_IO3 OFF */ diff --git a/arch/mips/ath79/dev-usb.c b/arch/mips/ath79/dev-usb.c index 36e9570e7bc..b2a2311ec85 100644 --- a/arch/mips/ath79/dev-usb.c +++ b/arch/mips/ath79/dev-usb.c @@ -145,6 +145,8 @@ static void __init ar7240_usb_setup(void)  	ath79_ohci_resources[0].start = AR7240_OHCI_BASE;  	ath79_ohci_resources[0].end = AR7240_OHCI_BASE + AR7240_OHCI_SIZE - 1; +	ath79_ohci_resources[1].start = ATH79_CPU_IRQ_USB; +	ath79_ohci_resources[1].end = ATH79_CPU_IRQ_USB;  	platform_device_register(&ath79_ohci_device);  } diff --git a/arch/mips/ath79/gpio.c b/arch/mips/ath79/gpio.c index 29054f21183..48fe762d252 100644 --- a/arch/mips/ath79/gpio.c +++ b/arch/mips/ath79/gpio.c @@ -188,8 +188,10 @@ void __init ath79_gpio_init(void)  	if (soc_is_ar71xx())  		ath79_gpio_count = AR71XX_GPIO_COUNT; -	else if (soc_is_ar724x()) -		ath79_gpio_count = AR724X_GPIO_COUNT; +	else if (soc_is_ar7240()) +		ath79_gpio_count = AR7240_GPIO_COUNT; +	else if (soc_is_ar7241() || soc_is_ar7242()) +		ath79_gpio_count = AR7241_GPIO_COUNT;  	else if (soc_is_ar913x())  		ath79_gpio_count = AR913X_GPIO_COUNT;  	else if (soc_is_ar933x()) diff --git a/arch/mips/bcm63xx/dev-spi.c b/arch/mips/bcm63xx/dev-spi.c index e39f73048d4..f1c9c3e2f67 100644 --- a/arch/mips/bcm63xx/dev-spi.c +++ b/arch/mips/bcm63xx/dev-spi.c @@ -106,11 +106,15 @@ int __init bcm63xx_spi_register(void)  	if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) {  		spi_resources[0].end += BCM_6338_RSET_SPI_SIZE - 1;  		spi_pdata.fifo_size = SPI_6338_MSG_DATA_SIZE; +		spi_pdata.msg_type_shift = SPI_6338_MSG_TYPE_SHIFT; +		spi_pdata.msg_ctl_width = SPI_6338_MSG_CTL_WIDTH;  	}  	if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) {  		spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1;  		spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE; +		spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT; +		spi_pdata.msg_ctl_width = SPI_6358_MSG_CTL_WIDTH;  	}  	bcm63xx_spi_regs_init(); diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 7fb1f222b8a..274cd4fad30 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -61,6 +61,12 @@ static void octeon_irq_set_ciu_mapping(int irq, int line, int bit,  	octeon_irq_ciu_to_irq[line][bit] = irq;  } +static void octeon_irq_force_ciu_mapping(struct irq_domain *domain, +					 int irq, int line, int bit) +{ +	irq_domain_associate(domain, irq, line << 6 | bit); +} +  static int octeon_coreid_for_cpu(int cpu)  {  #ifdef CONFIG_SMP @@ -183,19 +189,9 @@ static void __init octeon_irq_init_core(void)  		mutex_init(&cd->core_irq_mutex);  		irq = OCTEON_IRQ_SW0 + i; -		switch (irq) { -		case OCTEON_IRQ_TIMER: -		case OCTEON_IRQ_SW0: -		case OCTEON_IRQ_SW1: -		case OCTEON_IRQ_5: -		case OCTEON_IRQ_PERF: -			irq_set_chip_data(irq, cd); -			irq_set_chip_and_handler(irq, &octeon_irq_chip_core, -						 handle_percpu_irq); -			break; -		default: -			break; -		} +		irq_set_chip_data(irq, cd); +		irq_set_chip_and_handler(irq, &octeon_irq_chip_core, +					 handle_percpu_irq);  	}  } @@ -890,7 +886,6 @@ static int octeon_irq_gpio_xlat(struct irq_domain *d,  	unsigned int type;  	unsigned int pin;  	unsigned int trigger; -	struct octeon_irq_gpio_domain_data *gpiod;  	if (d->of_node != node)  		return -EINVAL; @@ -925,8 +920,7 @@ static int octeon_irq_gpio_xlat(struct irq_domain *d,  		break;  	}  	*out_type = type; -	gpiod = d->host_data; -	*out_hwirq = gpiod->base_hwirq + pin; +	*out_hwirq = pin;  	return 0;  } @@ -996,19 +990,21 @@ static int octeon_irq_ciu_map(struct irq_domain *d,  static int octeon_irq_gpio_map(struct irq_domain *d,  			       unsigned int virq, irq_hw_number_t hw)  { -	unsigned int line = hw >> 6; -	unsigned int bit = hw & 63; +	struct octeon_irq_gpio_domain_data *gpiod = d->host_data; +	unsigned int line, bit;  	if (!octeon_irq_virq_in_range(virq))  		return -EINVAL; +	hw += gpiod->base_hwirq; +	line = hw >> 6; +	bit = hw & 63;  	if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0)  		return -EINVAL;  	octeon_irq_set_ciu_mapping(virq, line, bit,  				   octeon_irq_gpio_chip,  				   octeon_irq_handle_gpio); -  	return 0;  } @@ -1149,6 +1145,7 @@ static void __init octeon_irq_init_ciu(void)  	struct irq_chip *chip_wd;  	struct device_node *gpio_node;  	struct device_node *ciu_node; +	struct irq_domain *ciu_domain = NULL;  	octeon_irq_init_ciu_percpu();  	octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; @@ -1177,31 +1174,6 @@ static void __init octeon_irq_init_ciu(void)  	/* Mips internal */  	octeon_irq_init_core(); -	/* CIU_0 */ -	for (i = 0; i < 16; i++) -		octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WORKQ0, 0, i + 0, chip, handle_level_irq); - -	octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq); -	octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq); - -	for (i = 0; i < 4; i++) -		octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_INT0, 0, i + 36, chip, handle_level_irq); -	for (i = 0; i < 4; i++) -		octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_MSI0, 0, i + 40, chip, handle_level_irq); - -	octeon_irq_set_ciu_mapping(OCTEON_IRQ_RML, 0, 46, chip, handle_level_irq); -	for (i = 0; i < 4; i++) -		octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_TIMER0, 0, i + 52, chip, handle_edge_irq); - -	octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB0, 0, 56, chip, handle_level_irq); -	octeon_irq_set_ciu_mapping(OCTEON_IRQ_BOOTDMA, 0, 63, chip, handle_level_irq); - -	/* CIU_1 */ -	for (i = 0; i < 16; i++) -		octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq); - -	octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB1, 1, 17, chip, handle_level_irq); -  	gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio");  	if (gpio_node) {  		struct octeon_irq_gpio_domain_data *gpiod; @@ -1219,10 +1191,35 @@ static void __init octeon_irq_init_ciu(void)  	ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu");  	if (ciu_node) { -		irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL); +		ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL);  		of_node_put(ciu_node);  	} else -		pr_warn("Cannot find device node for cavium,octeon-3860-ciu.\n"); +		panic("Cannot find device node for cavium,octeon-3860-ciu."); + +	/* CIU_0 */ +	for (i = 0; i < 16; i++) +		octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); + +	octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq); +	octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq); + +	for (i = 0; i < 4; i++) +		octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); +	for (i = 0; i < 4; i++) +		octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); + +	octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); +	for (i = 0; i < 4; i++) +		octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); + +	octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); +	octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_BOOTDMA, 0, 63); + +	/* CIU_1 */ +	for (i = 0; i < 16; i++) +		octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq); + +	octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);  	/* Enable the CIU lines */  	set_c0_status(STATUSF_IP3 | STATUSF_IP2); diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h index 1caa78ad06d..dde504477fa 100644 --- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h +++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h @@ -393,7 +393,8 @@  #define AR71XX_GPIO_REG_FUNC		0x28  #define AR71XX_GPIO_COUNT		16 -#define AR724X_GPIO_COUNT		18 +#define AR7240_GPIO_COUNT		18 +#define AR7241_GPIO_COUNT		20  #define AR913X_GPIO_COUNT		22  #define AR933X_GPIO_COUNT		30  #define AR934X_GPIO_COUNT		23 diff --git a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h index 4476fa03bf3..6ddae926bf7 100644 --- a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h @@ -42,7 +42,6 @@  #define cpu_has_mips64r1	0  #define cpu_has_mips64r2	0 -#define cpu_has_dsp		0  #define cpu_has_mipsmt		0  #define cpu_has_64bits		0 diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h index 7d98dbe5d4b..c9bae136260 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h @@ -9,6 +9,8 @@ int __init bcm63xx_spi_register(void);  struct bcm63xx_spi_pdata {  	unsigned int	fifo_size; +	unsigned int	msg_type_shift; +	unsigned int	msg_ctl_width;  	int		bus_num;  	int		num_chipselect;  	u32		speed_hz; diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h index 4ccc2a748af..61f2a2a5099 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h @@ -1054,7 +1054,8 @@  #define SPI_6338_FILL_BYTE		0x07  #define SPI_6338_MSG_TAIL		0x09  #define SPI_6338_RX_TAIL		0x0b -#define SPI_6338_MSG_CTL		0x40 +#define SPI_6338_MSG_CTL		0x40	/* 8-bits register */ +#define SPI_6338_MSG_CTL_WIDTH		8  #define SPI_6338_MSG_DATA		0x41  #define SPI_6338_MSG_DATA_SIZE		0x3f  #define SPI_6338_RX_DATA		0x80 @@ -1070,7 +1071,8 @@  #define SPI_6348_FILL_BYTE		0x07  #define SPI_6348_MSG_TAIL		0x09  #define SPI_6348_RX_TAIL		0x0b -#define SPI_6348_MSG_CTL		0x40 +#define SPI_6348_MSG_CTL		0x40	/* 8-bits register */ +#define SPI_6348_MSG_CTL_WIDTH		8  #define SPI_6348_MSG_DATA		0x41  #define SPI_6348_MSG_DATA_SIZE		0x3f  #define SPI_6348_RX_DATA		0x80 @@ -1078,6 +1080,7 @@  /* BCM 6358 SPI core */  #define SPI_6358_MSG_CTL		0x00	/* 16-bits register */ +#define SPI_6358_MSG_CTL_WIDTH		16  #define SPI_6358_MSG_DATA		0x02  #define SPI_6358_MSG_DATA_SIZE		0x21e  #define SPI_6358_RX_DATA		0x400 @@ -1094,6 +1097,7 @@  /* BCM 6358 SPI core */  #define SPI_6368_MSG_CTL		0x00	/* 16-bits register */ +#define SPI_6368_MSG_CTL_WIDTH		16  #define SPI_6368_MSG_DATA		0x02  #define SPI_6368_MSG_DATA_SIZE		0x21e  #define SPI_6368_RX_DATA		0x400 @@ -1115,7 +1119,10 @@  #define SPI_HD_W			0x01  #define SPI_HD_R			0x02  #define SPI_BYTE_CNT_SHIFT		0 -#define SPI_MSG_TYPE_SHIFT		14 +#define SPI_6338_MSG_TYPE_SHIFT		6 +#define SPI_6348_MSG_TYPE_SHIFT		6 +#define SPI_6358_MSG_TYPE_SHIFT		14 +#define SPI_6368_MSG_TYPE_SHIFT		14  /* Command */  #define SPI_CMD_NOOP			0x00 diff --git a/arch/mips/include/asm/mach-cavium-octeon/irq.h b/arch/mips/include/asm/mach-cavium-octeon/irq.h index 418992042f6..c22a3078bf1 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/irq.h +++ b/arch/mips/include/asm/mach-cavium-octeon/irq.h @@ -21,14 +21,10 @@ enum octeon_irq {  	OCTEON_IRQ_TIMER,  /* sources in CIU_INTX_EN0 */  	OCTEON_IRQ_WORKQ0, -	OCTEON_IRQ_GPIO0 = OCTEON_IRQ_WORKQ0 + 16, -	OCTEON_IRQ_WDOG0 = OCTEON_IRQ_GPIO0 + 16, +	OCTEON_IRQ_WDOG0 = OCTEON_IRQ_WORKQ0 + 16,  	OCTEON_IRQ_WDOG15 = OCTEON_IRQ_WDOG0 + 15,  	OCTEON_IRQ_MBOX0 = OCTEON_IRQ_WDOG0 + 16,  	OCTEON_IRQ_MBOX1, -	OCTEON_IRQ_UART0, -	OCTEON_IRQ_UART1, -	OCTEON_IRQ_UART2,  	OCTEON_IRQ_PCI_INT0,  	OCTEON_IRQ_PCI_INT1,  	OCTEON_IRQ_PCI_INT2, @@ -38,8 +34,6 @@ enum octeon_irq {  	OCTEON_IRQ_PCI_MSI2,  	OCTEON_IRQ_PCI_MSI3, -	OCTEON_IRQ_TWSI, -	OCTEON_IRQ_TWSI2,  	OCTEON_IRQ_RML,  	OCTEON_IRQ_TIMER0,  	OCTEON_IRQ_TIMER1, @@ -47,8 +41,6 @@ enum octeon_irq {  	OCTEON_IRQ_TIMER3,  	OCTEON_IRQ_USB0,  	OCTEON_IRQ_USB1, -	OCTEON_IRQ_MII0, -	OCTEON_IRQ_MII1,  	OCTEON_IRQ_BOOTDMA,  #ifndef CONFIG_PCI_MSI  	OCTEON_IRQ_LAST = 127 diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h index 7531ecd654d..dca8bce8c7a 100644 --- a/arch/mips/include/asm/module.h +++ b/arch/mips/include/asm/module.h @@ -10,6 +10,7 @@ struct mod_arch_specific {  	struct list_head dbe_list;  	const struct exception_table_entry *dbe_start;  	const struct exception_table_entry *dbe_end; +	struct mips_hi16 *r_mips_hi16_list;  };  typedef uint8_t Elf64_Byte;		/* Type for a 8-bit quantity.  */ diff --git a/arch/mips/include/asm/r4k-timer.h b/arch/mips/include/asm/r4k-timer.h index a37d12b3b61..afe9e0e03fe 100644 --- a/arch/mips/include/asm/r4k-timer.h +++ b/arch/mips/include/asm/r4k-timer.h @@ -12,16 +12,16 @@  #ifdef CONFIG_SYNC_R4K -extern void synchronise_count_master(void); -extern void synchronise_count_slave(void); +extern void synchronise_count_master(int cpu); +extern void synchronise_count_slave(int cpu);  #else -static inline void synchronise_count_master(void) +static inline void synchronise_count_master(int cpu)  {  } -static inline void synchronise_count_slave(void) +static inline void synchronise_count_slave(int cpu)  {  } diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c index a5066b1c3de..4f8c3cba8c0 100644 --- a/arch/mips/kernel/module.c +++ b/arch/mips/kernel/module.c @@ -39,8 +39,6 @@ struct mips_hi16 {  	Elf_Addr value;  }; -static struct mips_hi16 *mips_hi16_list; -  static LIST_HEAD(dbe_list);  static DEFINE_SPINLOCK(dbe_lock); @@ -128,8 +126,8 @@ static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v)  	n->addr = (Elf_Addr *)location;  	n->value = v; -	n->next = mips_hi16_list; -	mips_hi16_list = n; +	n->next = me->arch.r_mips_hi16_list; +	me->arch.r_mips_hi16_list = n;  	return 0;  } @@ -142,18 +140,28 @@ static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v)  	return 0;  } +static void free_relocation_chain(struct mips_hi16 *l) +{ +	struct mips_hi16 *next; + +	while (l) { +		next = l->next; +		kfree(l); +		l = next; +	} +} +  static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)  {  	unsigned long insnlo = *location; +	struct mips_hi16 *l;  	Elf_Addr val, vallo;  	/* Sign extend the addend we extract from the lo insn.  */  	vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; -	if (mips_hi16_list != NULL) { -		struct mips_hi16 *l; - -		l = mips_hi16_list; +	if (me->arch.r_mips_hi16_list != NULL) { +		l = me->arch.r_mips_hi16_list;  		while (l != NULL) {  			struct mips_hi16 *next;  			unsigned long insn; @@ -188,7 +196,7 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)  			l = next;  		} -		mips_hi16_list = NULL; +		me->arch.r_mips_hi16_list = NULL;  	}  	/* @@ -201,6 +209,9 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)  	return 0;  out_danger: +	free_relocation_chain(l); +	me->arch.r_mips_hi16_list = NULL; +  	pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name);  	return -ENOEXEC; @@ -273,6 +284,7 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,  	pr_debug("Applying relocate section %u to %u\n", relsec,  	       sechdrs[relsec].sh_info); +	me->arch.r_mips_hi16_list = NULL;  	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {  		/* This is where to make the change */  		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr @@ -296,6 +308,19 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,  			return res;  	} +	/* +	 * Normally the hi16 list should be deallocated at this point.  A +	 * malformed binary however could contain a series of R_MIPS_HI16 +	 * relocations not followed by a R_MIPS_LO16 relocation.  In that +	 * case, free up the list and return an error. +	 */ +	if (me->arch.r_mips_hi16_list) { +		free_relocation_chain(me->arch.r_mips_hi16_list); +		me->arch.r_mips_hi16_list = NULL; + +		return -ENOEXEC; +	} +  	return 0;  } diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 31637d8c873..9005bf9fb85 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -130,7 +130,7 @@ asmlinkage __cpuinit void start_secondary(void)  	cpu_set(cpu, cpu_callin_map); -	synchronise_count_slave(); +	synchronise_count_slave(cpu);  	/*  	 * irq will be enabled in ->smp_finish(), enabling it too early @@ -173,7 +173,6 @@ void smp_send_stop(void)  void __init smp_cpus_done(unsigned int max_cpus)  {  	mp_ops->cpus_done(); -	synchronise_count_master();  }  /* called from main before smp_init() */ @@ -206,6 +205,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)  	while (!cpu_isset(cpu, cpu_callin_map))  		udelay(100); +	synchronise_count_master(cpu);  	return 0;  } diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c index 842d55e411f..7f1eca3858d 100644 --- a/arch/mips/kernel/sync-r4k.c +++ b/arch/mips/kernel/sync-r4k.c @@ -28,12 +28,11 @@ static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0);  #define COUNTON	100  #define NR_LOOPS 5 -void __cpuinit synchronise_count_master(void) +void __cpuinit synchronise_count_master(int cpu)  {  	int i;  	unsigned long flags;  	unsigned int initcount; -	int nslaves;  #ifdef CONFIG_MIPS_MT_SMTC  	/* @@ -43,8 +42,7 @@ void __cpuinit synchronise_count_master(void)  	return;  #endif -	printk(KERN_INFO "Synchronize counters across %u CPUs: ", -	       num_online_cpus()); +	printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu);  	local_irq_save(flags); @@ -52,7 +50,7 @@ void __cpuinit synchronise_count_master(void)  	 * Notify the slaves that it's time to start  	 */  	atomic_set(&count_reference, read_c0_count()); -	atomic_set(&count_start_flag, 1); +	atomic_set(&count_start_flag, cpu);  	smp_wmb();  	/* Count will be initialised to current timer for all CPU's */ @@ -69,10 +67,9 @@ void __cpuinit synchronise_count_master(void)  	 * two CPUs.  	 */ -	nslaves = num_online_cpus()-1;  	for (i = 0; i < NR_LOOPS; i++) { -		/* slaves loop on '!= ncpus' */ -		while (atomic_read(&count_count_start) != nslaves) +		/* slaves loop on '!= 2' */ +		while (atomic_read(&count_count_start) != 1)  			mb();  		atomic_set(&count_count_stop, 0);  		smp_wmb(); @@ -89,7 +86,7 @@ void __cpuinit synchronise_count_master(void)  		/*  		 * Wait for all slaves to leave the synchronization point:  		 */ -		while (atomic_read(&count_count_stop) != nslaves) +		while (atomic_read(&count_count_stop) != 1)  			mb();  		atomic_set(&count_count_start, 0);  		smp_wmb(); @@ -97,6 +94,7 @@ void __cpuinit synchronise_count_master(void)  	}  	/* Arrange for an interrupt in a short while */  	write_c0_compare(read_c0_count() + COUNTON); +	atomic_set(&count_start_flag, 0);  	local_irq_restore(flags); @@ -108,11 +106,10 @@ void __cpuinit synchronise_count_master(void)  	printk("done.\n");  } -void __cpuinit synchronise_count_slave(void) +void __cpuinit synchronise_count_slave(int cpu)  {  	int i;  	unsigned int initcount; -	int ncpus;  #ifdef CONFIG_MIPS_MT_SMTC  	/* @@ -127,16 +124,15 @@ void __cpuinit synchronise_count_slave(void)  	 * so we first wait for the master to say everyone is ready  	 */ -	while (!atomic_read(&count_start_flag)) +	while (atomic_read(&count_start_flag) != cpu)  		mb();  	/* Count will be initialised to next expire for all CPU's */  	initcount = atomic_read(&count_reference); -	ncpus = num_online_cpus();  	for (i = 0; i < NR_LOOPS; i++) {  		atomic_inc(&count_count_start); -		while (atomic_read(&count_count_start) != ncpus) +		while (atomic_read(&count_count_start) != 2)  			mb();  		/* @@ -146,7 +142,7 @@ void __cpuinit synchronise_count_slave(void)  			write_c0_count(initcount);  		atomic_inc(&count_count_stop); -		while (atomic_read(&count_count_stop) != ncpus) +		while (atomic_read(&count_count_stop) != 2)  			mb();  	}  	/* Arrange for an interrupt in a short while */ diff --git a/arch/mips/mti-malta/malta-pci.c b/arch/mips/mti-malta/malta-pci.c index 284dea54faf..2147cb34e70 100644 --- a/arch/mips/mti-malta/malta-pci.c +++ b/arch/mips/mti-malta/malta-pci.c @@ -252,16 +252,3 @@ void __init mips_pcibios_init(void)  	register_pci_controller(controller);  } - -/* Enable PCI 2.1 compatibility in PIIX4 */ -static void __devinit quirk_dlcsetup(struct pci_dev *dev) -{ -	u8 odlc, ndlc; -	(void) pci_read_config_byte(dev, 0x82, &odlc); -	/* Enable passive releases and delayed transaction */ -	ndlc = odlc | 7; -	(void) pci_write_config_byte(dev, 0x82, ndlc); -} - -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0, -	quirk_dlcsetup); diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c index 414a7459858..86d77a66645 100644 --- a/arch/mips/pci/pci-ar724x.c +++ b/arch/mips/pci/pci-ar724x.c @@ -23,9 +23,12 @@  #define AR724X_PCI_MEM_BASE	0x10000000  #define AR724X_PCI_MEM_SIZE	0x08000000 +#define AR724X_PCI_REG_RESET		0x18  #define AR724X_PCI_REG_INT_STATUS	0x4c  #define AR724X_PCI_REG_INT_MASK		0x50 +#define AR724X_PCI_RESET_LINK_UP	BIT(0) +  #define AR724X_PCI_INT_DEV0		BIT(14)  #define AR724X_PCI_IRQ_COUNT		1 @@ -38,6 +41,15 @@ static void __iomem *ar724x_pci_ctrl_base;  static u32 ar724x_pci_bar0_value;  static bool ar724x_pci_bar0_is_cached; +static bool ar724x_pci_link_up; + +static inline bool ar724x_pci_check_link(void) +{ +	u32 reset; + +	reset = __raw_readl(ar724x_pci_ctrl_base + AR724X_PCI_REG_RESET); +	return reset & AR724X_PCI_RESET_LINK_UP; +}  static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,  			    int size, uint32_t *value) @@ -46,6 +58,9 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,  	void __iomem *base;  	u32 data; +	if (!ar724x_pci_link_up) +		return PCIBIOS_DEVICE_NOT_FOUND; +  	if (devfn)  		return PCIBIOS_DEVICE_NOT_FOUND; @@ -96,6 +111,9 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,  	u32 data;  	int s; +	if (!ar724x_pci_link_up) +		return PCIBIOS_DEVICE_NOT_FOUND; +  	if (devfn)  		return PCIBIOS_DEVICE_NOT_FOUND; @@ -280,6 +298,10 @@ int __init ar724x_pcibios_init(int irq)  	if (ar724x_pci_ctrl_base == NULL)  		goto err_unmap_devcfg; +	ar724x_pci_link_up = ar724x_pci_check_link(); +	if (!ar724x_pci_link_up) +		pr_warn("ar724x: PCIe link is down\n"); +  	ar724x_pci_irq_init(irq);  	register_pci_controller(&ar724x_pci_controller); diff --git a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi index 8d35d2c1f69..4f9c9f682ec 100644 --- a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi @@ -345,6 +345,13 @@  /include/ "qoriq-duart-1.dtsi"  /include/ "qoriq-gpio-0.dtsi"  /include/ "qoriq-usb2-mph-0.dtsi" +	usb@210000 { +		compatible = "fsl-usb2-mph-v1.6", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph"; +		port0; +	};  /include/ "qoriq-usb2-dr-0.dtsi" +	usb@211000 { +		compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr"; +	};  /include/ "qoriq-sec4.0-0.dtsi"  }; diff --git a/arch/powerpc/configs/85xx/p1023rds_defconfig b/arch/powerpc/configs/85xx/p1023rds_defconfig index f4337bacd0e..26e541c4662 100644 --- a/arch/powerpc/configs/85xx/p1023rds_defconfig +++ b/arch/powerpc/configs/85xx/p1023rds_defconfig @@ -6,28 +6,27 @@ CONFIG_SYSVIPC=y  CONFIG_POSIX_MQUEUE=y  CONFIG_BSD_PROCESS_ACCT=y  CONFIG_AUDIT=y -CONFIG_SPARSE_IRQ=y +CONFIG_IRQ_DOMAIN_DEBUG=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y  CONFIG_IKCONFIG=y  CONFIG_IKCONFIG_PROC=y  CONFIG_LOG_BUF_SHIFT=14  CONFIG_BLK_DEV_INITRD=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set  CONFIG_KALLSYMS_ALL=y -CONFIG_KALLSYMS_EXTRA_PASS=y  CONFIG_EMBEDDED=y  CONFIG_MODULES=y  CONFIG_MODULE_UNLOAD=y  CONFIG_MODULE_FORCE_UNLOAD=y  CONFIG_MODVERSIONS=y  # CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +CONFIG_MAC_PARTITION=y  CONFIG_P1023_RDS=y  CONFIG_QUICC_ENGINE=y  CONFIG_QE_GPIO=y  CONFIG_CPM2=y -CONFIG_GPIO_MPC8XXX=y  CONFIG_HIGHMEM=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y  # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set  CONFIG_BINFMT_MISC=m  CONFIG_MATH_EMULATION=y @@ -63,11 +62,11 @@ CONFIG_INET_ESP=y  CONFIG_IPV6=y  CONFIG_IP_SCTP=m  CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y  CONFIG_PROC_DEVICETREE=y  CONFIG_BLK_DEV_LOOP=y  CONFIG_BLK_DEV_RAM=y  CONFIG_BLK_DEV_RAM_SIZE=131072 -CONFIG_MISC_DEVICES=y  CONFIG_EEPROM_LEGACY=y  CONFIG_BLK_DEV_SD=y  CONFIG_CHR_DEV_ST=y @@ -80,15 +79,14 @@ CONFIG_SATA_FSL=y  CONFIG_SATA_SIL24=y  CONFIG_NETDEVICES=y  CONFIG_DUMMY=y +CONFIG_FS_ENET=y +CONFIG_FSL_PQ_MDIO=y +CONFIG_E1000E=y  CONFIG_MARVELL_PHY=y  CONFIG_DAVICOM_PHY=y  CONFIG_CICADA_PHY=y  CONFIG_VITESSE_PHY=y  CONFIG_FIXED_PHY=y -CONFIG_NET_ETHERNET=y -CONFIG_FS_ENET=y -CONFIG_E1000E=y -CONFIG_FSL_PQ_MDIO=y  CONFIG_INPUT_FF_MEMLESS=m  # CONFIG_INPUT_MOUSEDEV is not set  # CONFIG_INPUT_KEYBOARD is not set @@ -98,16 +96,15 @@ CONFIG_SERIAL_8250=y  CONFIG_SERIAL_8250_CONSOLE=y  CONFIG_SERIAL_8250_NR_UARTS=2  CONFIG_SERIAL_8250_RUNTIME_UARTS=2 -CONFIG_SERIAL_8250_EXTENDED=y  CONFIG_SERIAL_8250_MANY_PORTS=y  CONFIG_SERIAL_8250_DETECT_IRQ=y  CONFIG_SERIAL_8250_RSA=y  CONFIG_SERIAL_QE=m -CONFIG_HW_RANDOM=y  CONFIG_NVRAM=y  CONFIG_I2C=y  CONFIG_I2C_CPM=m  CONFIG_I2C_MPC=y +CONFIG_GPIO_MPC8XXX=y  # CONFIG_HWMON is not set  CONFIG_VIDEO_OUTPUT_CONTROL=y  CONFIG_SOUND=y @@ -123,7 +120,6 @@ CONFIG_DMADEVICES=y  CONFIG_FSL_DMA=y  # CONFIG_NET_DMA is not set  CONFIG_STAGING=y -# CONFIG_STAGING_EXCLUDE_BUILD is not set  CONFIG_EXT2_FS=y  CONFIG_EXT3_FS=y  # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set @@ -150,22 +146,15 @@ CONFIG_QNX4FS_FS=m  CONFIG_SYSV_FS=m  CONFIG_UFS_FS=m  CONFIG_NFS_FS=y -CONFIG_NFS_V3=y  CONFIG_NFS_V4=y  CONFIG_ROOT_NFS=y  CONFIG_NFSD=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_MAC_PARTITION=y  CONFIG_CRC_T10DIF=y  CONFIG_FRAME_WARN=8092  CONFIG_DEBUG_FS=y -CONFIG_DEBUG_KERNEL=y  CONFIG_DETECT_HUNG_TASK=y  # CONFIG_DEBUG_BUGVERBOSE is not set  CONFIG_DEBUG_INFO=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y -CONFIG_IRQ_DOMAIN_DEBUG=y  CONFIG_CRYPTO_PCBC=m  CONFIG_CRYPTO_SHA256=y  CONFIG_CRYPTO_SHA512=y diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig index cbb98c1234f..8b3d57c1ebe 100644 --- a/arch/powerpc/configs/corenet32_smp_defconfig +++ b/arch/powerpc/configs/corenet32_smp_defconfig @@ -6,8 +6,8 @@ CONFIG_SYSVIPC=y  CONFIG_POSIX_MQUEUE=y  CONFIG_BSD_PROCESS_ACCT=y  CONFIG_AUDIT=y -CONFIG_SPARSE_IRQ=y -CONFIG_RCU_TRACE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y  CONFIG_IKCONFIG=y  CONFIG_IKCONFIG_PROC=y  CONFIG_LOG_BUF_SHIFT=14 @@ -21,23 +21,22 @@ CONFIG_MODULE_UNLOAD=y  CONFIG_MODULE_FORCE_UNLOAD=y  CONFIG_MODVERSIONS=y  # CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +CONFIG_MAC_PARTITION=y  CONFIG_P2041_RDB=y  CONFIG_P3041_DS=y  CONFIG_P4080_DS=y  CONFIG_P5020_DS=y  CONFIG_HIGHMEM=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y  # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set  CONFIG_BINFMT_MISC=m  CONFIG_KEXEC=y  CONFIG_IRQ_ALL_CPUS=y  CONFIG_FORCE_MAX_ZONEORDER=13 -CONFIG_FSL_LBC=y  CONFIG_PCI=y  CONFIG_PCIEPORTBUS=y -CONFIG_PCI_MSI=y  # CONFIG_PCIEASPM is not set +CONFIG_PCI_MSI=y  CONFIG_RAPIDIO=y  CONFIG_FSL_RIO=y  CONFIG_NET=y @@ -70,6 +69,7 @@ CONFIG_INET_IPCOMP=y  CONFIG_IPV6=y  CONFIG_IP_SCTP=m  CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y  CONFIG_MTD=y  CONFIG_MTD_CMDLINE_PARTS=y  CONFIG_MTD_CHAR=y @@ -77,17 +77,14 @@ CONFIG_MTD_BLOCK=y  CONFIG_MTD_CFI=y  CONFIG_MTD_CFI_AMDSTD=y  CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_M25P80=y  CONFIG_MTD_NAND=y -CONFIG_MTD_NAND_ECC=y -CONFIG_MTD_NAND_IDS=y -CONFIG_MTD_NAND_FSL_IFC=y  CONFIG_MTD_NAND_FSL_ELBC=y -CONFIG_MTD_M25P80=y +CONFIG_MTD_NAND_FSL_IFC=y  CONFIG_PROC_DEVICETREE=y  CONFIG_BLK_DEV_LOOP=y  CONFIG_BLK_DEV_RAM=y  CONFIG_BLK_DEV_RAM_SIZE=131072 -CONFIG_MISC_DEVICES=y  CONFIG_BLK_DEV_SD=y  CONFIG_CHR_DEV_ST=y  CONFIG_BLK_DEV_SR=y @@ -115,11 +112,9 @@ CONFIG_SERIO_LIBPS2=y  CONFIG_PPC_EPAPR_HV_BYTECHAN=y  CONFIG_SERIAL_8250=y  CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_EXTENDED=y  CONFIG_SERIAL_8250_MANY_PORTS=y  CONFIG_SERIAL_8250_DETECT_IRQ=y  CONFIG_SERIAL_8250_RSA=y -CONFIG_HW_RANDOM=y  CONFIG_NVRAM=y  CONFIG_I2C=y  CONFIG_I2C_CHARDEV=y @@ -132,7 +127,6 @@ CONFIG_SPI_FSL_ESPI=y  CONFIG_VIDEO_OUTPUT_CONTROL=y  CONFIG_USB_HID=m  CONFIG_USB=y -CONFIG_USB_DEVICEFS=y  CONFIG_USB_MON=y  CONFIG_USB_EHCI_HCD=y  CONFIG_USB_EHCI_FSL=y @@ -142,8 +136,6 @@ CONFIG_USB_OHCI_HCD_PPC_OF_LE=y  CONFIG_USB_STORAGE=y  CONFIG_MMC=y  CONFIG_MMC_SDHCI=y -CONFIG_MMC_SDHCI_OF=y -CONFIG_MMC_SDHCI_OF_ESDHC=y  CONFIG_EDAC=y  CONFIG_EDAC_MM_EDAC=y  CONFIG_EDAC_MPC85XX=y @@ -170,19 +162,16 @@ CONFIG_HUGETLBFS=y  CONFIG_JFFS2_FS=y  CONFIG_CRAMFS=y  CONFIG_NFS_FS=y -CONFIG_NFS_V3=y  CONFIG_NFS_V4=y  CONFIG_ROOT_NFS=y  CONFIG_NFSD=m -CONFIG_PARTITION_ADVANCED=y -CONFIG_MAC_PARTITION=y  CONFIG_NLS_ISO8859_1=y  CONFIG_NLS_UTF8=m  CONFIG_MAGIC_SYSRQ=y  CONFIG_DEBUG_SHIRQ=y  CONFIG_DETECT_HUNG_TASK=y  CONFIG_DEBUG_INFO=y -CONFIG_SYSCTL_SYSCALL_CHECK=y +CONFIG_RCU_TRACE=y  CONFIG_CRYPTO_NULL=y  CONFIG_CRYPTO_PCBC=m  CONFIG_CRYPTO_MD4=y diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig index dd89de8b0b7..0516e22ca3d 100644 --- a/arch/powerpc/configs/corenet64_smp_defconfig +++ b/arch/powerpc/configs/corenet64_smp_defconfig @@ -56,6 +56,7 @@ CONFIG_INET_ESP=y  CONFIG_IPV6=y  CONFIG_IP_SCTP=m  CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y  CONFIG_MTD=y  CONFIG_MTD_CMDLINE_PARTS=y  CONFIG_MTD_CHAR=y diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig index 15130066e5e..07b7f2af2dc 100644 --- a/arch/powerpc/configs/g5_defconfig +++ b/arch/powerpc/configs/g5_defconfig @@ -1,8 +1,10 @@ +CONFIG_PPC64=y +CONFIG_ALTIVEC=y +CONFIG_SMP=y +CONFIG_NR_CPUS=4  CONFIG_EXPERIMENTAL=y  CONFIG_SYSVIPC=y  CONFIG_POSIX_MQUEUE=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y  CONFIG_IKCONFIG=y  CONFIG_IKCONFIG_PROC=y  CONFIG_BLK_DEV_INITRD=y @@ -13,15 +15,16 @@ CONFIG_MODULES=y  CONFIG_MODULE_UNLOAD=y  CONFIG_MODVERSIONS=y  CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_MAC_PARTITION=y -CONFIG_SMP=y -CONFIG_NR_CPUS=4 -CONFIG_KEXEC=y -# CONFIG_RELOCATABLE is not set +# CONFIG_PPC_PSERIES is not set  CONFIG_CPU_FREQ=y  CONFIG_CPU_FREQ_GOV_POWERSAVE=y  CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_PMAC64=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_KEXEC=y +CONFIG_IRQ_ALL_CPUS=y +# CONFIG_MIGRATION is not set  CONFIG_PCI_MSI=y  CONFIG_NET=y  CONFIG_PACKET=y @@ -49,6 +52,7 @@ CONFIG_NF_CT_NETLINK=m  CONFIG_NF_CONNTRACK_IPV4=m  CONFIG_IP_NF_QUEUE=m  CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_PROC_DEVICETREE=y  CONFIG_BLK_DEV_LOOP=y  CONFIG_BLK_DEV_NBD=m  CONFIG_BLK_DEV_RAM=y @@ -56,6 +60,8 @@ CONFIG_BLK_DEV_RAM_SIZE=65536  CONFIG_CDROM_PKTCDVD=m  CONFIG_IDE=y  CONFIG_BLK_DEV_IDECD=y +CONFIG_BLK_DEV_IDE_PMAC=y +CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y  CONFIG_BLK_DEV_SD=y  CONFIG_CHR_DEV_ST=y  CONFIG_BLK_DEV_SR=y @@ -79,24 +85,33 @@ CONFIG_DM_CRYPT=m  CONFIG_DM_SNAPSHOT=m  CONFIG_DM_MIRROR=m  CONFIG_DM_ZERO=m -CONFIG_MACINTOSH_DRIVERS=y +CONFIG_IEEE1394=y +CONFIG_IEEE1394_OHCI1394=y +CONFIG_IEEE1394_SBP2=m +CONFIG_IEEE1394_ETH1394=m +CONFIG_IEEE1394_RAWIO=y +CONFIG_IEEE1394_VIDEO1394=m +CONFIG_IEEE1394_DV1394=m +CONFIG_ADB_PMU=y +CONFIG_PMAC_SMU=y  CONFIG_MAC_EMUMOUSEBTN=y +CONFIG_THERM_PM72=y +CONFIG_WINDFARM=y +CONFIG_WINDFARM_PM81=y +CONFIG_WINDFARM_PM91=y +CONFIG_WINDFARM_PM112=y +CONFIG_WINDFARM_PM121=y  CONFIG_NETDEVICES=y -CONFIG_BONDING=m  CONFIG_DUMMY=m -CONFIG_MII=y +CONFIG_BONDING=m  CONFIG_TUN=m +CONFIG_NET_ETHERNET=y +CONFIG_MII=y +CONFIG_SUNGEM=y  CONFIG_ACENIC=m  CONFIG_ACENIC_OMIT_TIGON_I=y -CONFIG_TIGON3=y  CONFIG_E1000=y -CONFIG_SUNGEM=y -CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPPOE=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m +CONFIG_TIGON3=y  CONFIG_USB_CATC=m  CONFIG_USB_KAWETH=m  CONFIG_USB_PEGASUS=m @@ -106,24 +121,36 @@ CONFIG_USB_USBNET=m  # CONFIG_USB_NET_NET1080 is not set  # CONFIG_USB_NET_CDC_SUBSET is not set  # CONFIG_USB_NET_ZAURUS is not set +CONFIG_PPP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPPOE=m  # CONFIG_INPUT_MOUSEDEV_PSAUX is not set  CONFIG_INPUT_JOYDEV=m  CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set  # CONFIG_MOUSE_PS2 is not set +# CONFIG_SERIO_I8042 is not set  # CONFIG_SERIO_SERPORT is not set -CONFIG_VT_HW_CONSOLE_BINDING=y  # CONFIG_HW_RANDOM is not set  CONFIG_GEN_RTC=y  CONFIG_RAW_DRIVER=y  CONFIG_I2C_CHARDEV=y  # CONFIG_HWMON is not set -CONFIG_AGP=y -CONFIG_DRM=y -CONFIG_DRM_NOUVEAU=y +CONFIG_AGP=m +CONFIG_AGP_UNINORTH=m  CONFIG_VIDEO_OUTPUT_CONTROL=m +CONFIG_FB=y  CONFIG_FIRMWARE_EDID=y  CONFIG_FB_TILEBLITTING=y +CONFIG_FB_OF=y +CONFIG_FB_NVIDIA=y +CONFIG_FB_NVIDIA_I2C=y  CONFIG_FB_RADEON=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y  CONFIG_LOGO=y  CONFIG_SOUND=m  CONFIG_SND=m @@ -131,7 +158,15 @@ CONFIG_SND_SEQUENCER=m  CONFIG_SND_MIXER_OSS=m  CONFIG_SND_PCM_OSS=m  CONFIG_SND_SEQUENCER_OSS=y +CONFIG_SND_POWERMAC=m +CONFIG_SND_AOA=m +CONFIG_SND_AOA_FABRIC_LAYOUT=m +CONFIG_SND_AOA_ONYX=m +CONFIG_SND_AOA_TAS=m +CONFIG_SND_AOA_TOONIE=m  CONFIG_SND_USB_AUDIO=m +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y  CONFIG_HID_GYRATION=y  CONFIG_LOGITECH_FF=y  CONFIG_HID_PANTHERLORD=y @@ -139,12 +174,13 @@ CONFIG_HID_PETALYNX=y  CONFIG_HID_SAMSUNG=y  CONFIG_HID_SONY=y  CONFIG_HID_SUNPLUS=y -CONFIG_HID_PID=y -CONFIG_USB_HIDDEV=y  CONFIG_USB=y +CONFIG_USB_DEVICEFS=y  CONFIG_USB_MON=y  CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_HCD_PPC_OF is not set  CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PPC_OF_BE=y  CONFIG_USB_ACM=m  CONFIG_USB_PRINTER=y  CONFIG_USB_STORAGE=y @@ -208,6 +244,8 @@ CONFIG_REISERFS_FS_POSIX_ACL=y  CONFIG_REISERFS_FS_SECURITY=y  CONFIG_XFS_FS=m  CONFIG_XFS_POSIX_ACL=y +CONFIG_INOTIFY=y +CONFIG_AUTOFS_FS=m  CONFIG_ISO9660_FS=y  CONFIG_JOLIET=y  CONFIG_ZISOFS=y @@ -221,12 +259,14 @@ CONFIG_HFS_FS=m  CONFIG_HFSPLUS_FS=m  CONFIG_CRAMFS=y  CONFIG_NFS_FS=y +CONFIG_NFS_V3=y  CONFIG_NFS_V3_ACL=y  CONFIG_NFS_V4=y  CONFIG_NFSD=y  CONFIG_NFSD_V3_ACL=y  CONFIG_NFSD_V4=y  CONFIG_CIFS=m +CONFIG_PARTITION_ADVANCED=y  CONFIG_NLS_CODEPAGE_437=y  CONFIG_NLS_CODEPAGE_1250=y  CONFIG_NLS_CODEPAGE_1251=y @@ -234,23 +274,29 @@ CONFIG_NLS_ASCII=y  CONFIG_NLS_ISO8859_1=y  CONFIG_NLS_ISO8859_15=y  CONFIG_NLS_UTF8=y +CONFIG_CRC_T10DIF=y +CONFIG_LIBCRC32C=m  CONFIG_MAGIC_SYSRQ=y -# CONFIG_UNUSED_SYMBOLS is not set  CONFIG_DEBUG_FS=y  CONFIG_DEBUG_KERNEL=y  CONFIG_DEBUG_MUTEXES=y +# CONFIG_RCU_CPU_STALL_DETECTOR is not set  CONFIG_LATENCYTOP=y -CONFIG_STRICT_DEVMEM=y +CONFIG_SYSCTL_SYSCALL_CHECK=y +CONFIG_BOOTX_TEXT=y  CONFIG_CRYPTO_NULL=m  CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_ECB=m  CONFIG_CRYPTO_PCBC=m  CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD4=m  CONFIG_CRYPTO_MICHAEL_MIC=m  CONFIG_CRYPTO_SHA256=m  CONFIG_CRYPTO_SHA512=m  CONFIG_CRYPTO_WP512=m  CONFIG_CRYPTO_AES=m  CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m  CONFIG_CRYPTO_BLOWFISH=m  CONFIG_CRYPTO_CAST5=m  CONFIG_CRYPTO_CAST6=m @@ -260,6 +306,3 @@ CONFIG_CRYPTO_TEA=m  CONFIG_CRYPTO_TWOFISH=m  # CONFIG_CRYPTO_ANSI_CPRNG is not set  # CONFIG_CRYPTO_HW is not set -# CONFIG_VIRTUALIZATION is not set -CONFIG_CRC_T10DIF=y -CONFIG_LIBCRC32C=m diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig index 5aac9a8bc53..9352e4430c3 100644 --- a/arch/powerpc/configs/mpc83xx_defconfig +++ b/arch/powerpc/configs/mpc83xx_defconfig @@ -2,12 +2,12 @@ CONFIG_EXPERIMENTAL=y  CONFIG_SYSVIPC=y  CONFIG_LOG_BUF_SHIFT=14  CONFIG_BLK_DEV_INITRD=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set  CONFIG_EXPERT=y  CONFIG_SLAB=y  CONFIG_MODULES=y  CONFIG_MODULE_UNLOAD=y  # CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y  # CONFIG_PPC_CHRP is not set  # CONFIG_PPC_PMAC is not set  CONFIG_PPC_83xx=y @@ -25,7 +25,6 @@ CONFIG_ASP834x=y  CONFIG_QUICC_ENGINE=y  CONFIG_QE_GPIO=y  CONFIG_MATH_EMULATION=y -CONFIG_SPARSE_IRQ=y  CONFIG_PCI=y  CONFIG_NET=y  CONFIG_PACKET=y @@ -42,10 +41,9 @@ CONFIG_INET_ESP=y  # CONFIG_INET_LRO is not set  # CONFIG_IPV6 is not set  CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y  # CONFIG_FW_LOADER is not set  CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_OF_PARTS=y  CONFIG_MTD_CHAR=y  CONFIG_MTD_BLOCK=y  CONFIG_MTD_CFI=y @@ -64,15 +62,14 @@ CONFIG_ATA=y  CONFIG_SATA_FSL=y  CONFIG_SATA_SIL=y  CONFIG_NETDEVICES=y +CONFIG_MII=y +CONFIG_UCC_GETH=y +CONFIG_GIANFAR=y  CONFIG_MARVELL_PHY=y  CONFIG_DAVICOM_PHY=y  CONFIG_VITESSE_PHY=y  CONFIG_ICPLUS_PHY=y  CONFIG_FIXED_PHY=y -CONFIG_NET_ETHERNET=y -CONFIG_MII=y -CONFIG_GIANFAR=y -CONFIG_UCC_GETH=y  CONFIG_INPUT_FF_MEMLESS=m  # CONFIG_INPUT_MOUSEDEV is not set  # CONFIG_INPUT_KEYBOARD is not set @@ -112,17 +109,12 @@ CONFIG_RTC_DRV_DS1374=y  CONFIG_EXT2_FS=y  CONFIG_EXT3_FS=y  # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -CONFIG_INOTIFY=y  CONFIG_PROC_KCORE=y  CONFIG_TMPFS=y  CONFIG_NFS_FS=y -CONFIG_NFS_V3=y  CONFIG_NFS_V4=y  CONFIG_ROOT_NFS=y -CONFIG_PARTITION_ADVANCED=y  CONFIG_CRC_T10DIF=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y  CONFIG_CRYPTO_ECB=m  CONFIG_CRYPTO_PCBC=m  CONFIG_CRYPTO_SHA256=y diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig index 03ee911c457..8b5bda27d24 100644 --- a/arch/powerpc/configs/mpc85xx_defconfig +++ b/arch/powerpc/configs/mpc85xx_defconfig @@ -5,7 +5,9 @@ CONFIG_SYSVIPC=y  CONFIG_POSIX_MQUEUE=y  CONFIG_BSD_PROCESS_ACCT=y  CONFIG_AUDIT=y -CONFIG_SPARSE_IRQ=y +CONFIG_IRQ_DOMAIN_DEBUG=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y  CONFIG_IKCONFIG=y  CONFIG_IKCONFIG_PROC=y  CONFIG_LOG_BUF_SHIFT=14 @@ -17,6 +19,8 @@ CONFIG_MODULE_UNLOAD=y  CONFIG_MODULE_FORCE_UNLOAD=y  CONFIG_MODVERSIONS=y  # CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +CONFIG_MAC_PARTITION=y  CONFIG_MPC8540_ADS=y  CONFIG_MPC8560_ADS=y  CONFIG_MPC85xx_CDS=y @@ -40,8 +44,6 @@ CONFIG_SBC8548=y  CONFIG_QUICC_ENGINE=y  CONFIG_QE_GPIO=y  CONFIG_HIGHMEM=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y  CONFIG_BINFMT_MISC=m  CONFIG_MATH_EMULATION=y  CONFIG_FORCE_MAX_ZONEORDER=12 @@ -74,36 +76,25 @@ CONFIG_INET_ESP=y  CONFIG_IPV6=y  CONFIG_IP_SCTP=m  CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y  CONFIG_MTD=y  CONFIG_MTD_CMDLINE_PARTS=y  CONFIG_MTD_CHAR=y  CONFIG_MTD_BLOCK=y -CONFIG_MTD_CFI=y  CONFIG_FTL=y -CONFIG_MTD_GEN_PROBE=y -CONFIG_MTD_MAP_BANK_WIDTH_1=y -CONFIG_MTD_MAP_BANK_WIDTH_2=y -CONFIG_MTD_MAP_BANK_WIDTH_4=y -CONFIG_MTD_CFI_I1=y -CONFIG_MTD_CFI_I2=y +CONFIG_MTD_CFI=y  CONFIG_MTD_CFI_INTELEXT=y  CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_CFI_UTIL=y  CONFIG_MTD_PHYSMAP_OF=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_OF_PARTS=y +CONFIG_MTD_M25P80=y  CONFIG_MTD_NAND=y  CONFIG_MTD_NAND_FSL_ELBC=y  CONFIG_MTD_NAND_FSL_IFC=y -CONFIG_MTD_NAND_IDS=y -CONFIG_MTD_NAND_ECC=y -CONFIG_MTD_M25P80=y  CONFIG_PROC_DEVICETREE=y  CONFIG_BLK_DEV_LOOP=y  CONFIG_BLK_DEV_NBD=y  CONFIG_BLK_DEV_RAM=y  CONFIG_BLK_DEV_RAM_SIZE=131072 -CONFIG_MISC_DEVICES=y  CONFIG_EEPROM_LEGACY=y  CONFIG_BLK_DEV_SD=y  CONFIG_CHR_DEV_ST=y @@ -115,6 +106,7 @@ CONFIG_ATA=y  CONFIG_SATA_AHCI=y  CONFIG_SATA_FSL=y  CONFIG_PATA_ALI=y +CONFIG_PATA_VIA=y  CONFIG_NETDEVICES=y  CONFIG_DUMMY=y  CONFIG_FS_ENET=y @@ -134,7 +126,6 @@ CONFIG_SERIAL_8250=y  CONFIG_SERIAL_8250_CONSOLE=y  CONFIG_SERIAL_8250_NR_UARTS=2  CONFIG_SERIAL_8250_RUNTIME_UARTS=2 -CONFIG_SERIAL_8250_EXTENDED=y  CONFIG_SERIAL_8250_MANY_PORTS=y  CONFIG_SERIAL_8250_DETECT_IRQ=y  CONFIG_SERIAL_8250_RSA=y @@ -183,7 +174,6 @@ CONFIG_HID_SAMSUNG=y  CONFIG_HID_SONY=y  CONFIG_HID_SUNPLUS=y  CONFIG_USB=y -CONFIG_USB_DEVICEFS=y  CONFIG_USB_MON=y  CONFIG_USB_EHCI_HCD=y  CONFIG_USB_EHCI_FSL=y @@ -229,18 +219,13 @@ CONFIG_QNX4FS_FS=m  CONFIG_SYSV_FS=m  CONFIG_UFS_FS=m  CONFIG_NFS_FS=y -CONFIG_NFS_V3=y  CONFIG_NFS_V4=y  CONFIG_ROOT_NFS=y  CONFIG_NFSD=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_MAC_PARTITION=y  CONFIG_CRC_T10DIF=y  CONFIG_DEBUG_FS=y  CONFIG_DETECT_HUNG_TASK=y  CONFIG_DEBUG_INFO=y -CONFIG_SYSCTL_SYSCALL_CHECK=y -CONFIG_IRQ_DOMAIN_DEBUG=y  CONFIG_CRYPTO_PCBC=m  CONFIG_CRYPTO_SHA256=y  CONFIG_CRYPTO_SHA512=y diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig index fdfa84dc908..b0974e7e98a 100644 --- a/arch/powerpc/configs/mpc85xx_smp_defconfig +++ b/arch/powerpc/configs/mpc85xx_smp_defconfig @@ -7,7 +7,9 @@ CONFIG_SYSVIPC=y  CONFIG_POSIX_MQUEUE=y  CONFIG_BSD_PROCESS_ACCT=y  CONFIG_AUDIT=y -CONFIG_SPARSE_IRQ=y +CONFIG_IRQ_DOMAIN_DEBUG=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y  CONFIG_IKCONFIG=y  CONFIG_IKCONFIG_PROC=y  CONFIG_LOG_BUF_SHIFT=14 @@ -19,6 +21,8 @@ CONFIG_MODULE_UNLOAD=y  CONFIG_MODULE_FORCE_UNLOAD=y  CONFIG_MODVERSIONS=y  # CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +CONFIG_MAC_PARTITION=y  CONFIG_MPC8540_ADS=y  CONFIG_MPC8560_ADS=y  CONFIG_MPC85xx_CDS=y @@ -42,8 +46,6 @@ CONFIG_SBC8548=y  CONFIG_QUICC_ENGINE=y  CONFIG_QE_GPIO=y  CONFIG_HIGHMEM=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y  CONFIG_BINFMT_MISC=m  CONFIG_MATH_EMULATION=y  CONFIG_IRQ_ALL_CPUS=y @@ -77,36 +79,25 @@ CONFIG_INET_ESP=y  CONFIG_IPV6=y  CONFIG_IP_SCTP=m  CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y  CONFIG_MTD=y  CONFIG_MTD_CMDLINE_PARTS=y  CONFIG_MTD_CHAR=y  CONFIG_MTD_BLOCK=y -CONFIG_MTD_CFI=y  CONFIG_FTL=y -CONFIG_MTD_GEN_PROBE=y -CONFIG_MTD_MAP_BANK_WIDTH_1=y -CONFIG_MTD_MAP_BANK_WIDTH_2=y -CONFIG_MTD_MAP_BANK_WIDTH_4=y -CONFIG_MTD_CFI_I1=y -CONFIG_MTD_CFI_I2=y +CONFIG_MTD_CFI=y  CONFIG_MTD_CFI_INTELEXT=y  CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_CFI_UTIL=y  CONFIG_MTD_PHYSMAP_OF=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_OF_PARTS=y +CONFIG_MTD_M25P80=y  CONFIG_MTD_NAND=y  CONFIG_MTD_NAND_FSL_ELBC=y  CONFIG_MTD_NAND_FSL_IFC=y -CONFIG_MTD_NAND_IDS=y -CONFIG_MTD_NAND_ECC=y -CONFIG_MTD_M25P80=y  CONFIG_PROC_DEVICETREE=y  CONFIG_BLK_DEV_LOOP=y  CONFIG_BLK_DEV_NBD=y  CONFIG_BLK_DEV_RAM=y  CONFIG_BLK_DEV_RAM_SIZE=131072 -CONFIG_MISC_DEVICES=y  CONFIG_EEPROM_LEGACY=y  CONFIG_BLK_DEV_SD=y  CONFIG_CHR_DEV_ST=y @@ -137,7 +128,6 @@ CONFIG_SERIAL_8250=y  CONFIG_SERIAL_8250_CONSOLE=y  CONFIG_SERIAL_8250_NR_UARTS=2  CONFIG_SERIAL_8250_RUNTIME_UARTS=2 -CONFIG_SERIAL_8250_EXTENDED=y  CONFIG_SERIAL_8250_MANY_PORTS=y  CONFIG_SERIAL_8250_DETECT_IRQ=y  CONFIG_SERIAL_8250_RSA=y @@ -186,7 +176,6 @@ CONFIG_HID_SAMSUNG=y  CONFIG_HID_SONY=y  CONFIG_HID_SUNPLUS=y  CONFIG_USB=y -CONFIG_USB_DEVICEFS=y  CONFIG_USB_MON=y  CONFIG_USB_EHCI_HCD=y  CONFIG_USB_EHCI_FSL=y @@ -232,18 +221,13 @@ CONFIG_QNX4FS_FS=m  CONFIG_SYSV_FS=m  CONFIG_UFS_FS=m  CONFIG_NFS_FS=y -CONFIG_NFS_V3=y  CONFIG_NFS_V4=y  CONFIG_ROOT_NFS=y  CONFIG_NFSD=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_MAC_PARTITION=y  CONFIG_CRC_T10DIF=y  CONFIG_DEBUG_FS=y  CONFIG_DETECT_HUNG_TASK=y  CONFIG_DEBUG_INFO=y -CONFIG_SYSCTL_SYSCALL_CHECK=y -CONFIG_IRQ_DOMAIN_DEBUG=y  CONFIG_CRYPTO_PCBC=m  CONFIG_CRYPTO_SHA256=y  CONFIG_CRYPTO_SHA512=y diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 50d82c8a037..b3c083de17a 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -553,9 +553,7 @@ static inline int cpu_has_feature(unsigned long feature)  		& feature);  } -#ifdef CONFIG_HAVE_HW_BREAKPOINT  #define HBP_NUM 1 -#endif /* CONFIG_HAVE_HW_BREAKPOINT */  #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 50ea12fd7bf..a8bf5c673a3 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -33,6 +33,7 @@  #include <asm/kvm_asm.h>  #include <asm/processor.h>  #include <asm/page.h> +#include <asm/cacheflush.h>  #define KVM_MAX_VCPUS		NR_CPUS  #define KVM_MAX_VCORES		NR_CPUS diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 0124937a23b..e006f0bdea9 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -219,4 +219,16 @@ void kvmppc_claim_lpid(long lpid);  void kvmppc_free_lpid(long lpid);  void kvmppc_init_lpid(unsigned long nr_lpids); +static inline void kvmppc_mmu_flush_icache(pfn_t pfn) +{ +	/* Clear i-cache for new pages */ +	struct page *page; +	page = pfn_to_page(pfn); +	if (!test_bit(PG_arch_1, &page->flags)) { +		flush_dcache_icache_page(page); +		set_bit(PG_arch_1, &page->flags); +	} +} + +  #endif /* __POWERPC_KVM_PPC_H__ */ diff --git a/arch/powerpc/include/asm/mpic_msgr.h b/arch/powerpc/include/asm/mpic_msgr.h index 326d33ca55c..d4f471fb103 100644 --- a/arch/powerpc/include/asm/mpic_msgr.h +++ b/arch/powerpc/include/asm/mpic_msgr.h @@ -14,6 +14,7 @@  #include <linux/types.h>  #include <linux/spinlock.h>  #include <asm/smp.h> +#include <asm/io.h>  struct mpic_msgr {  	u32 __iomem *base; diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index 2d7bb8ced13..e4897523de4 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -83,11 +83,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)  		return 0;  	} -	if ((tbl->it_offset + tbl->it_size) > (mask >> IOMMU_PAGE_SHIFT)) { -		dev_info(dev, "Warning: IOMMU window too big for device mask\n"); -		dev_info(dev, "mask: 0x%08llx, table end: 0x%08lx\n", -				mask, (tbl->it_offset + tbl->it_size) << -				IOMMU_PAGE_SHIFT); +	if (tbl->it_offset > (mask >> IOMMU_PAGE_SHIFT)) { +		dev_info(dev, "Warning: IOMMU offset too big for device mask\n"); +		dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n", +				mask, tbl->it_offset << IOMMU_PAGE_SHIFT);  		return 0;  	} else  		return 1; diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index f3a82dde61d..956a4c496de 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -253,7 +253,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)  	/* Do not emulate user-space instructions, instead single-step them */  	if (user_mode(regs)) { -		bp->ctx->task->thread.last_hit_ubp = bp; +		current->thread.last_hit_ubp = bp;  		regs->msr |= MSR_SE;  		goto out;  	} diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index 782bd0a3c2f..c470a40b29f 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -25,6 +25,7 @@  #include <asm/processor.h>  #include <asm/machdep.h>  #include <asm/debug.h> +#include <linux/slab.h>  /*   * This table contains the mapping between PowerPC hardware trap types, and @@ -101,6 +102,21 @@ static int computeSignal(unsigned int tt)  	return SIGHUP;		/* default for things we don't know about */  } +/** + * + *	kgdb_skipexception - Bail out of KGDB when we've been triggered. + *	@exception: Exception vector number + *	@regs: Current &struct pt_regs. + * + *	On some architectures we need to skip a breakpoint exception when + *	it occurs after a breakpoint has been removed. + * + */ +int kgdb_skipexception(int exception, struct pt_regs *regs) +{ +	return kgdb_isremovedbreak(regs->nip); +} +  static int kgdb_call_nmi_hook(struct pt_regs *regs)  {  	kgdb_nmicallback(raw_smp_processor_id(), regs); @@ -138,6 +154,8 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)  static int kgdb_singlestep(struct pt_regs *regs)  {  	struct thread_info *thread_info, *exception_thread_info; +	struct thread_info *backup_current_thread_info = \ +		(struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL);  	if (user_mode(regs))  		return 0; @@ -155,13 +173,17 @@ static int kgdb_singlestep(struct pt_regs *regs)  	thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1));  	exception_thread_info = current_thread_info(); -	if (thread_info != exception_thread_info) +	if (thread_info != exception_thread_info) { +		/* Save the original current_thread_info. */ +		memcpy(backup_current_thread_info, exception_thread_info, sizeof *thread_info);  		memcpy(exception_thread_info, thread_info, sizeof *thread_info); +	}  	kgdb_handle_exception(0, SIGTRAP, 0, regs);  	if (thread_info != exception_thread_info) -		memcpy(thread_info, exception_thread_info, sizeof *thread_info); +		/* Restore current_thread_info lastly. */ +		memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info);  	return 1;  } @@ -410,7 +432,6 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,  #else  			linux_regs->msr |= MSR_SE;  #endif -			kgdb_single_step = 1;  			atomic_set(&kgdb_cpu_doing_single_step,  				   raw_smp_processor_id());  		} diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index f2496f2faec..4e3cc47f26b 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c @@ -107,11 +107,11 @@ long ppc64_personality(unsigned long personality)  	long ret;  	if (personality(current->personality) == PER_LINUX32 -	    && personality == PER_LINUX) -		personality = PER_LINUX32; +	    && personality(personality) == PER_LINUX) +		personality = (personality & ~PER_MASK) | PER_LINUX32;  	ret = sys_personality(personality); -	if (ret == PER_LINUX32) -		ret = PER_LINUX; +	if (personality(ret) == PER_LINUX32) +		ret = (ret & ~PER_MASK) | PER_LINUX;  	return ret;  }  #endif diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c index f922c29bb23..837f13e7b6b 100644 --- a/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c @@ -211,6 +211,9 @@ next_pteg:  		pteg1 |= PP_RWRX;  	} +	if (orig_pte->may_execute) +		kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); +  	local_irq_disable();  	if (pteg[rr]) { diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index 10fc8ec9d2a..0688b6b3958 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -126,6 +126,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)  	if (!orig_pte->may_execute)  		rflags |= HPTE_R_N; +	else +		kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);  	hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M); diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 5a84c8d3d04..44b72feaff7 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1421,13 +1421,13 @@ _GLOBAL(kvmppc_h_cede)  	sync			/* order setting ceded vs. testing prodded */  	lbz	r5,VCPU_PRODDED(r3)  	cmpwi	r5,0 -	bne	1f +	bne	kvm_cede_prodded  	li	r0,0		/* set trap to 0 to say hcall is handled */  	stw	r0,VCPU_TRAP(r3)  	li	r0,H_SUCCESS  	std	r0,VCPU_GPR(R3)(r3)  BEGIN_FTR_SECTION -	b	2f		/* just send it up to host on 970 */ +	b	kvm_cede_exit	/* just send it up to host on 970 */  END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)  	/* @@ -1446,7 +1446,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)  	or	r4,r4,r0  	PPC_POPCNTW(R7,R4)  	cmpw	r7,r8 -	bge	2f +	bge	kvm_cede_exit  	stwcx.	r4,0,r6  	bne	31b  	li	r0,1 @@ -1555,7 +1555,8 @@ kvm_end_cede:  	b	hcall_real_fallback  	/* cede when already previously prodded case */ -1:	li	r0,0 +kvm_cede_prodded: +	li	r0,0  	stb	r0,VCPU_PRODDED(r3)  	sync			/* order testing prodded vs. clearing ceded */  	stb	r0,VCPU_CEDED(r3) @@ -1563,7 +1564,8 @@ kvm_end_cede:  	blr  	/* we've ceded but we want to give control to the host */ -2:	li	r3,H_TOO_HARD +kvm_cede_exit: +	li	r3,H_TOO_HARD  	blr  secondary_too_late: diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index c510fc96130..a2b66717813 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c @@ -322,11 +322,11 @@ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)  static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)  {  	if (vcpu_e500->g2h_tlb1_map) -		memset(vcpu_e500->g2h_tlb1_map, -		       sizeof(u64) * vcpu_e500->gtlb_params[1].entries, 0); +		memset(vcpu_e500->g2h_tlb1_map, 0, +		       sizeof(u64) * vcpu_e500->gtlb_params[1].entries);  	if (vcpu_e500->h2g_tlb1_rmap) -		memset(vcpu_e500->h2g_tlb1_rmap, -		       sizeof(unsigned int) * host_tlb_params[1].entries, 0); +		memset(vcpu_e500->h2g_tlb1_rmap, 0, +		       sizeof(unsigned int) * host_tlb_params[1].entries);  }  static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) @@ -539,6 +539,9 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,  	kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,  				ref, gvaddr, stlbe); + +	/* Clear i-cache for new pages */ +	kvmppc_mmu_flush_icache(pfn);  }  /* XXX only map the one-one case, for now use TLB0 */ diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S index f9ede7c6606..0d24ff15f5f 100644 --- a/arch/powerpc/lib/copyuser_power7.S +++ b/arch/powerpc/lib/copyuser_power7.S @@ -288,7 +288,7 @@ err1;	stb	r0,0(r3)  	std	r0,16(r1)  	stdu	r1,-STACKFRAMESIZE(r1)  	bl	.enter_vmx_usercopy -	cmpwi	r3,0 +	cmpwi	cr1,r3,0  	ld	r0,STACKFRAMESIZE+16(r1)  	ld	r3,STACKFRAMESIZE+48(r1)  	ld	r4,STACKFRAMESIZE+56(r1) @@ -326,38 +326,7 @@ err1;	stb	r0,0(r3)  	dcbt	r0,r8,0b01010	/* GO */  .machine pop -	/* -	 * We prefetch both the source and destination using enhanced touch -	 * instructions. We use a stream ID of 0 for the load side and -	 * 1 for the store side. -	 */ -	clrrdi	r6,r4,7 -	clrrdi	r9,r3,7 -	ori	r9,r9,1		/* stream=1 */ - -	srdi	r7,r5,7		/* length in cachelines, capped at 0x3FF */ -	cmpldi	cr1,r7,0x3FF -	ble	cr1,1f -	li	r7,0x3FF -1:	lis	r0,0x0E00	/* depth=7 */ -	sldi	r7,r7,7 -	or	r7,r7,r0 -	ori	r10,r7,1	/* stream=1 */ - -	lis	r8,0x8000	/* GO=1 */ -	clrldi	r8,r8,32 - -.machine push -.machine "power4" -	dcbt	r0,r6,0b01000 -	dcbt	r0,r7,0b01010 -	dcbtst	r0,r9,0b01000 -	dcbtst	r0,r10,0b01010 -	eieio -	dcbt	r0,r8,0b01010	/* GO */ -.machine pop - -	beq	.Lunwind_stack_nonvmx_copy +	beq	cr1,.Lunwind_stack_nonvmx_copy  	/*  	 * If source and destination are not relatively aligned we use a diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S index 0efdc51bc71..7ba6c96de77 100644 --- a/arch/powerpc/lib/memcpy_power7.S +++ b/arch/powerpc/lib/memcpy_power7.S @@ -222,7 +222,7 @@ _GLOBAL(memcpy_power7)  	std	r0,16(r1)  	stdu	r1,-STACKFRAMESIZE(r1)  	bl	.enter_vmx_copy -	cmpwi	r3,0 +	cmpwi	cr1,r3,0  	ld	r0,STACKFRAMESIZE+16(r1)  	ld	r3,STACKFRAMESIZE+48(r1)  	ld	r4,STACKFRAMESIZE+56(r1) @@ -260,7 +260,7 @@ _GLOBAL(memcpy_power7)  	dcbt	r0,r8,0b01010	/* GO */  .machine pop -	beq	.Lunwind_stack_nonvmx_copy +	beq	cr1,.Lunwind_stack_nonvmx_copy  	/*  	 * If source and destination are not relatively aligned we use a diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index baaafde7d13..fbdad0e3929 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -469,6 +469,7 @@ void flush_dcache_icache_page(struct page *page)  	__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);  #endif  } +EXPORT_SYMBOL(flush_dcache_icache_page);  void clear_user_page(void *page, unsigned long vaddr, struct page *pg)  { diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 77b49ddda9d..7cd2dbd6e4c 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -1431,7 +1431,7 @@ static void perf_event_interrupt(struct pt_regs *regs)  		if (!event->hw.idx || is_limited_pmc(event->hw.idx))  			continue;  		val = read_pmc(event->hw.idx); -		if ((int)val < 0) { +		if (pmc_overflow(val)) {  			/* event has overflowed */  			found = 1;  			record_and_restart(event, val, regs); diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index a7b2a600d0a..c37f4613632 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -465,7 +465,7 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)  			iounmap(hose->cfg_data);  		iounmap(hose->cfg_addr);  		pcibios_free_controller(hose); -		return 0; +		return -ENODEV;  	}  	setup_pci_cmd(hose); @@ -827,6 +827,7 @@ struct device_node *fsl_pci_primary;  void __devinit fsl_pci_init(void)  { +	int ret;  	struct device_node *node;  	struct pci_controller *hose;  	dma_addr_t max = 0xffffffff; @@ -855,10 +856,12 @@ void __devinit fsl_pci_init(void)  			if (!fsl_pci_primary)  				fsl_pci_primary = node; -			fsl_add_bridge(node, fsl_pci_primary == node); -			hose = pci_find_hose_for_OF_device(node); -			max = min(max, hose->dma_window_base_cur + -					hose->dma_window_size); +			ret = fsl_add_bridge(node, fsl_pci_primary == node); +			if (ret == 0) { +				hose = pci_find_hose_for_OF_device(node); +				max = min(max, hose->dma_window_base_cur + +						hose->dma_window_size); +			}  		}  	} diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c index 483d8fa72e8..e961f8c4a8f 100644 --- a/arch/powerpc/sysdev/mpic_msgr.c +++ b/arch/powerpc/sysdev/mpic_msgr.c @@ -14,6 +14,9 @@  #include <linux/list.h>  #include <linux/of_platform.h>  #include <linux/errno.h> +#include <linux/err.h> +#include <linux/export.h> +#include <linux/slab.h>  #include <asm/prom.h>  #include <asm/hw_irq.h>  #include <asm/ppc-pci.h> diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index eab3492a45c..9b49c65ee7a 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -17,6 +17,7 @@  #include <linux/reboot.h>  #include <linux/delay.h>  #include <linux/kallsyms.h> +#include <linux/kmsg_dump.h>  #include <linux/cpumask.h>  #include <linux/export.h>  #include <linux/sysrq.h> @@ -894,13 +895,13 @@ cmds(struct pt_regs *excp)  #endif  		default:  			printf("Unrecognized command: "); -		        do { +			do {  				if (' ' < cmd && cmd <= '~')  					putchar(cmd);  				else  					printf("\\x%x", cmd);  				cmd = inchar(); -		        } while (cmd != '\n');  +			} while (cmd != '\n');  			printf(" (type ? for help)\n");  			break;  		} @@ -1097,7 +1098,7 @@ static long check_bp_loc(unsigned long addr)  	return 1;  } -static char *breakpoint_help_string =  +static char *breakpoint_help_string =      "Breakpoint command usage:\n"      "b                show breakpoints\n"      "b <addr> [cnt]   set breakpoint at given instr addr\n" @@ -1193,7 +1194,7 @@ bpt_cmds(void)  	default:  		termch = cmd; -	        cmd = skipbl(); +		cmd = skipbl();  		if (cmd == '?') {  			printf(breakpoint_help_string);  			break; @@ -1359,7 +1360,7 @@ static void xmon_show_stack(unsigned long sp, unsigned long lr,  				       sp + REGS_OFFSET);  				break;  			} -                        printf("--- Exception: %lx %s at ", regs.trap, +			printf("--- Exception: %lx %s at ", regs.trap,  			       getvecname(TRAP(®s)));  			pc = regs.nip;  			lr = regs.link; @@ -1623,14 +1624,14 @@ static void super_regs(void)  	cmd = skipbl();  	if (cmd == '\n') { -	        unsigned long sp, toc; +		unsigned long sp, toc;  		asm("mr %0,1" : "=r" (sp) :);  		asm("mr %0,2" : "=r" (toc) :);  		printf("msr  = "REG"  sprg0= "REG"\n",  		       mfmsr(), mfspr(SPRN_SPRG0));  		printf("pvr  = "REG"  sprg1= "REG"\n", -		       mfspr(SPRN_PVR), mfspr(SPRN_SPRG1));  +		       mfspr(SPRN_PVR), mfspr(SPRN_SPRG1));  		printf("dec  = "REG"  sprg2= "REG"\n",  		       mfspr(SPRN_DEC), mfspr(SPRN_SPRG2));  		printf("sp   = "REG"  sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3)); @@ -1783,7 +1784,7 @@ byterev(unsigned char *val, int size)  static int brev;  static int mnoread; -static char *memex_help_string =  +static char *memex_help_string =      "Memory examine command usage:\n"      "m [addr] [flags] examine/change memory\n"      "  addr is optional.  will start where left off.\n" @@ -1798,7 +1799,7 @@ static char *memex_help_string =      "NOTE: flags are saved as defaults\n"      ""; -static char *memex_subcmd_help_string =  +static char *memex_subcmd_help_string =      "Memory examine subcommands:\n"      "  hexval   write this val to current location\n"      "  'string' write chars from string to this location\n" @@ -2064,7 +2065,7 @@ prdump(unsigned long adrs, long ndump)  		nr = mread(adrs, temp, r);  		adrs += nr;  		for (m = 0; m < r; ++m) { -		        if ((m & (sizeof(long) - 1)) == 0 && m > 0) +			if ((m & (sizeof(long) - 1)) == 0 && m > 0)  				putchar(' ');  			if (m < nr)  				printf("%.2x", temp[m]); @@ -2072,7 +2073,7 @@ prdump(unsigned long adrs, long ndump)  				printf("%s", fault_chars[fault_type]);  		}  		for (; m < 16; ++m) { -		        if ((m & (sizeof(long) - 1)) == 0) +			if ((m & (sizeof(long) - 1)) == 0)  				putchar(' ');  			printf("  ");  		} @@ -2148,45 +2149,28 @@ print_address(unsigned long addr)  void  dump_log_buf(void)  { -        const unsigned long size = 128; -        unsigned long end, addr; -        unsigned char buf[size + 1]; - -        addr = 0; -        buf[size] = '\0'; - -        if (setjmp(bus_error_jmp) != 0) { -                printf("Unable to lookup symbol __log_buf!\n"); -                return; -        } - -        catch_memory_errors = 1; -        sync(); -        addr = kallsyms_lookup_name("__log_buf"); - -        if (! addr) -                printf("Symbol __log_buf not found!\n"); -        else { -                end = addr + (1 << CONFIG_LOG_BUF_SHIFT); -                while (addr < end) { -                        if (! mread(addr, buf, size)) { -                                printf("Can't read memory at address 0x%lx\n", addr); -                                break; -                        } +	struct kmsg_dumper dumper = { .active = 1 }; +	unsigned char buf[128]; +	size_t len; -                        printf("%s", buf); +	if (setjmp(bus_error_jmp) != 0) { +		printf("Error dumping printk buffer!\n"); +		return; +	} -                        if (strlen(buf) < size) -                                break; +	catch_memory_errors = 1; +	sync(); -                        addr += size; -                } -        } +	kmsg_dump_rewind_nolock(&dumper); +	while (kmsg_dump_get_line_nolock(&dumper, false, buf, sizeof(buf), &len)) { +		buf[len] = '\0'; +		printf("%s", buf); +	} -        sync(); -        /* wait a little while to see if we get a machine check */ -        __delay(200); -        catch_memory_errors = 0; +	sync(); +	/* wait a little while to see if we get a machine check */ +	__delay(200); +	catch_memory_errors = 0;  }  /* diff --git a/arch/x86/Makefile b/arch/x86/Makefile index b0c5276861e..682e9c210ba 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -27,6 +27,10 @@ ifeq ($(CONFIG_X86_32),y)          KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return +        # Never want PIC in a 32-bit kernel, prevent breakage with GCC built +        # with nonstandard options +        KBUILD_CFLAGS += -fno-pic +          # prevent gcc from keeping the stack 16 byte aligned          KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2) diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index 5a747dd884d..f7535bedc33 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -57,7 +57,7 @@ KBUILD_CFLAGS	:= $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \  		   -Wall -Wstrict-prototypes \  		   -march=i386 -mregparm=3 \  		   -include $(srctree)/$(src)/code16gcc.h \ -		   -fno-strict-aliasing -fomit-frame-pointer \ +		   -fno-strict-aliasing -fomit-frame-pointer -fno-pic \  		   $(call cc-option, -ffreestanding) \  		   $(call cc-option, -fno-toplevel-reorder,\  			$(call cc-option, -fno-unit-at-a-time)) \ diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index b315a33867f..33692eaabab 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -12,8 +12,7 @@   * Simple spin lock operations.  There are two variants, one clears IRQ's   * on the local processor, one does not.   * - * These are fair FIFO ticket locks, which are currently limited to 256 - * CPUs. + * These are fair FIFO ticket locks, which support up to 2^16 CPUs.   *   * (the type definitions are in asm/spinlock_types.h)   */ diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index afb7ff79a29..ced4534baed 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -165,7 +165,7 @@ static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =  #endif  #ifdef P6_NOP1 -static const unsigned char  __initconst_or_module p6nops[] = +static const unsigned char p6nops[] =  {  	P6_NOP1,  	P6_NOP2, diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index a6c64aaddf9..c265593ec2c 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1356,6 +1356,16 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,  	if (!IO_APIC_IRQ(irq))  		return; +	/* +	 * For legacy irqs, cfg->domain starts with cpu 0. Now that IO-APIC +	 * can handle this irq and the apic driver is finialized at this point, +	 * update the cfg->domain. +	 */ +	if (irq < legacy_pic->nr_legacy_irqs && +	    cpumask_equal(cfg->domain, cpumask_of(0))) +		apic->vector_allocation_domain(0, cfg->domain, +					       apic->target_cpus()); +  	if (assign_irq_vector(irq, cfg, apic->target_cpus()))  		return; diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 46d8786d655..a5fbc3c5fcc 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -144,6 +144,8 @@ static int __init x86_xsave_setup(char *s)  {  	setup_clear_cpu_cap(X86_FEATURE_XSAVE);  	setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); +	setup_clear_cpu_cap(X86_FEATURE_AVX); +	setup_clear_cpu_cap(X86_FEATURE_AVX2);  	return 1;  }  __setup("noxsave", x86_xsave_setup); diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 382366977d4..7f2739e03e7 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1522,8 +1522,16 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)  	arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;  	arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;  	arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask; +	/* +	 * If PMU counter has PEBS enabled it is not enough to disable counter +	 * on a guest entry since PEBS memory write can overshoot guest entry +	 * and corrupt guest memory. Disabling PEBS solves the problem. +	 */ +	arr[1].msr = MSR_IA32_PEBS_ENABLE; +	arr[1].host = cpuc->pebs_enabled; +	arr[1].guest = 0; -	*nr = 1; +	*nr = 2;  	return arr;  } diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 7563fda9f03..0a5571080e7 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -796,7 +796,6 @@ static struct intel_uncore_type *nhm_msr_uncores[] = {  DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");  DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7"); -DEFINE_UNCORE_FORMAT_ATTR(mm_cfg, mm_cfg, "config:63");  DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");  DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63"); @@ -902,16 +901,21 @@ static struct attribute_group nhmex_uncore_cbox_format_group = {  	.attrs = nhmex_uncore_cbox_formats_attr,  }; +/* msr offset for each instance of cbox */ +static unsigned nhmex_cbox_msr_offsets[] = { +	0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0, +}; +  static struct intel_uncore_type nhmex_uncore_cbox = {  	.name			= "cbox",  	.num_counters		= 6, -	.num_boxes		= 8, +	.num_boxes		= 10,  	.perf_ctr_bits		= 48,  	.event_ctl		= NHMEX_C0_MSR_PMON_EV_SEL0,  	.perf_ctr		= NHMEX_C0_MSR_PMON_CTR0,  	.event_mask		= NHMEX_PMON_RAW_EVENT_MASK,  	.box_ctl		= NHMEX_C0_MSR_PMON_GLOBAL_CTL, -	.msr_offset		= NHMEX_C_MSR_OFFSET, +	.msr_offsets		= nhmex_cbox_msr_offsets,  	.pair_ctr_ctl		= 1,  	.ops			= &nhmex_uncore_ops,  	.format_group		= &nhmex_uncore_cbox_format_group @@ -1032,24 +1036,22 @@ static struct intel_uncore_type nhmex_uncore_bbox = {  static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)  { -	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; -	struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; +	struct hw_perf_event *hwc = &event->hw; +	struct hw_perf_event_extra *reg1 = &hwc->extra_reg; +	struct hw_perf_event_extra *reg2 = &hwc->branch_reg; -	if (event->attr.config & NHMEX_S_PMON_MM_CFG_EN) { -		reg1->config = event->attr.config1; -		reg2->config = event->attr.config2; -	} else { -		reg1->config = ~0ULL; -		reg2->config = ~0ULL; -	} +	/* only TO_R_PROG_EV event uses the match/mask register */ +	if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) != +	    NHMEX_S_EVENT_TO_R_PROG_EV) +		return 0;  	if (box->pmu->pmu_idx == 0)  		reg1->reg = NHMEX_S0_MSR_MM_CFG;  	else  		reg1->reg = NHMEX_S1_MSR_MM_CFG; -  	reg1->idx = 0; - +	reg1->config = event->attr.config1; +	reg2->config = event->attr.config2;  	return 0;  } @@ -1059,8 +1061,8 @@ static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct per  	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;  	struct hw_perf_event_extra *reg2 = &hwc->branch_reg; -	wrmsrl(reg1->reg, 0); -	if (reg1->config != ~0ULL || reg2->config != ~0ULL) { +	if (reg1->idx != EXTRA_REG_NONE) { +		wrmsrl(reg1->reg, 0);  		wrmsrl(reg1->reg + 1, reg1->config);  		wrmsrl(reg1->reg + 2, reg2->config);  		wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN); @@ -1074,7 +1076,6 @@ static struct attribute *nhmex_uncore_sbox_formats_attr[] = {  	&format_attr_edge.attr,  	&format_attr_inv.attr,  	&format_attr_thresh8.attr, -	&format_attr_mm_cfg.attr,  	&format_attr_match.attr,  	&format_attr_mask.attr,  	NULL, @@ -1142,6 +1143,9 @@ static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {  	EVENT_EXTRA_END  }; +/* Nehalem-EX or Westmere-EX ? */ +bool uncore_nhmex; +  static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)  {  	struct intel_uncore_extra_reg *er; @@ -1171,18 +1175,29 @@ static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64  		return false;  	/* mask of the shared fields */ -	mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK; +	if (uncore_nhmex) +		mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK; +	else +		mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;  	er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];  	raw_spin_lock_irqsave(&er->lock, flags);  	/* add mask of the non-shared field if it's in use */ -	if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) -		mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); +	if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) { +		if (uncore_nhmex) +			mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); +		else +			mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); +	}  	if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {  		atomic_add(1 << (idx * 8), &er->ref); -		mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK | -			NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); +		if (uncore_nhmex) +			mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK | +				NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); +		else +			mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK | +				WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);  		er->config &= ~mask;  		er->config |= (config & mask);  		ret = true; @@ -1216,7 +1231,10 @@ u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)  	/* get the non-shared control bits and shift them */  	idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; -	config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); +	if (uncore_nhmex) +		config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); +	else +		config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);  	if (new_idx > orig_idx) {  		idx = new_idx - orig_idx;  		config <<= 3 * idx; @@ -1226,6 +1244,10 @@ u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)  	}  	/* add the shared control bits back */ +	if (uncore_nhmex) +		config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; +	else +		config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;  	config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;  	if (modify) {  		/* adjust the main event selector */ @@ -1264,7 +1286,8 @@ again:  	}  	/* for the match/mask registers */ -	if ((uncore_box_is_fake(box) || !reg2->alloc) && +	if (reg2->idx != EXTRA_REG_NONE && +	    (uncore_box_is_fake(box) || !reg2->alloc) &&  	    !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))  		goto fail; @@ -1278,7 +1301,8 @@ again:  		if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))  			nhmex_mbox_alter_er(event, idx[0], true);  		reg1->alloc |= alloc; -		reg2->alloc = 1; +		if (reg2->idx != EXTRA_REG_NONE) +			reg2->alloc = 1;  	}  	return NULL;  fail: @@ -1342,9 +1366,6 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event  	struct extra_reg *er;  	unsigned msr;  	int reg_idx = 0; - -	if (WARN_ON_ONCE(reg1->idx != -1)) -		return -EINVAL;  	/*  	 * The mbox events may require 2 extra MSRs at the most. But only  	 * the lower 32 bits in these MSRs are significant, so we can use @@ -1355,11 +1376,6 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event  			continue;  		if (event->attr.config1 & ~er->valid_mask)  			return -EINVAL; -		if (er->idx == __BITS_VALUE(reg1->idx, 0, 8) || -		    er->idx == __BITS_VALUE(reg1->idx, 1, 8)) -			continue; -		if (WARN_ON_ONCE(reg_idx >= 2)) -			return -EINVAL;  		msr = er->msr + type->msr_offset * box->pmu->pmu_idx;  		if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff)) @@ -1368,6 +1384,8 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event  		/* always use the 32~63 bits to pass the PLD config */  		if (er->idx == EXTRA_REG_NHMEX_M_PLD)  			reg_idx = 1; +		else if (WARN_ON_ONCE(reg_idx > 0)) +			return -EINVAL;  		reg1->idx &= ~(0xff << (reg_idx * 8));  		reg1->reg &= ~(0xffff << (reg_idx * 16)); @@ -1376,17 +1394,21 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event  		reg1->config = event->attr.config1;  		reg_idx++;  	} -	/* use config2 to pass the filter config */ -	reg2->idx = EXTRA_REG_NHMEX_M_FILTER; -	if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN) -		reg2->config = event->attr.config2; -	else -		reg2->config = ~0ULL; -	if (box->pmu->pmu_idx == 0) -		reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG; -	else -		reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG; - +	/* +	 * The mbox only provides ability to perform address matching +	 * for the PLD events. +	 */ +	if (reg_idx == 2) { +		reg2->idx = EXTRA_REG_NHMEX_M_FILTER; +		if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN) +			reg2->config = event->attr.config2; +		else +			reg2->config = ~0ULL; +		if (box->pmu->pmu_idx == 0) +			reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG; +		else +			reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG; +	}  	return 0;  } @@ -1422,34 +1444,36 @@ static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct per  		wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),  			nhmex_mbox_shared_reg_config(box, idx)); -	wrmsrl(reg2->reg, 0); -	if (reg2->config != ~0ULL) { -		wrmsrl(reg2->reg + 1, -			reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK); -		wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK & -			(reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT)); -		wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN); +	if (reg2->idx != EXTRA_REG_NONE) { +		wrmsrl(reg2->reg, 0); +		if (reg2->config != ~0ULL) { +			wrmsrl(reg2->reg + 1, +				reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK); +			wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK & +				(reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT)); +			wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN); +		}  	}  	wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);  } -DEFINE_UNCORE_FORMAT_ATTR(count_mode,	count_mode,	"config:2-3"); -DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode,	"config:4-5"); -DEFINE_UNCORE_FORMAT_ATTR(wrap_mode,	wrap_mode,	"config:6"); -DEFINE_UNCORE_FORMAT_ATTR(flag_mode,	flag_mode,	"config:7"); -DEFINE_UNCORE_FORMAT_ATTR(inc_sel,	inc_sel,	"config:9-13"); -DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel,	set_flag_sel,	"config:19-21"); -DEFINE_UNCORE_FORMAT_ATTR(filter_cfg,	filter_cfg,	"config2:63"); -DEFINE_UNCORE_FORMAT_ATTR(filter_match,	filter_match,	"config2:0-33"); -DEFINE_UNCORE_FORMAT_ATTR(filter_mask,	filter_mask,	"config2:34-61"); -DEFINE_UNCORE_FORMAT_ATTR(dsp,		dsp,		"config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(thr,		thr,		"config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(fvc,		fvc,		"config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(pgt,		pgt,		"config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(map,		map,		"config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(iss,		iss,		"config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(pld,		pld,		"config1:32-63"); +DEFINE_UNCORE_FORMAT_ATTR(count_mode,		count_mode,	"config:2-3"); +DEFINE_UNCORE_FORMAT_ATTR(storage_mode,		storage_mode,	"config:4-5"); +DEFINE_UNCORE_FORMAT_ATTR(wrap_mode,		wrap_mode,	"config:6"); +DEFINE_UNCORE_FORMAT_ATTR(flag_mode,		flag_mode,	"config:7"); +DEFINE_UNCORE_FORMAT_ATTR(inc_sel,		inc_sel,	"config:9-13"); +DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel,		set_flag_sel,	"config:19-21"); +DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en,	filter_cfg_en,	"config2:63"); +DEFINE_UNCORE_FORMAT_ATTR(filter_match,		filter_match,	"config2:0-33"); +DEFINE_UNCORE_FORMAT_ATTR(filter_mask,		filter_mask,	"config2:34-61"); +DEFINE_UNCORE_FORMAT_ATTR(dsp,			dsp,		"config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(thr,			thr,		"config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(fvc,			fvc,		"config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(pgt,			pgt,		"config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(map,			map,		"config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(iss,			iss,		"config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(pld,			pld,		"config1:32-63");  static struct attribute *nhmex_uncore_mbox_formats_attr[] = {  	&format_attr_count_mode.attr, @@ -1458,7 +1482,7 @@ static struct attribute *nhmex_uncore_mbox_formats_attr[] = {  	&format_attr_flag_mode.attr,  	&format_attr_inc_sel.attr,  	&format_attr_set_flag_sel.attr, -	&format_attr_filter_cfg.attr, +	&format_attr_filter_cfg_en.attr,  	&format_attr_filter_match.attr,  	&format_attr_filter_mask.attr,  	&format_attr_dsp.attr, @@ -1482,6 +1506,12 @@ static struct uncore_event_desc nhmex_uncore_mbox_events[] = {  	{ /* end: all zeroes */ },  }; +static struct uncore_event_desc wsmex_uncore_mbox_events[] = { +	INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"), +	INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"), +	{ /* end: all zeroes */ }, +}; +  static struct intel_uncore_ops nhmex_uncore_mbox_ops = {  	NHMEX_UNCORE_OPS_COMMON_INIT(),  	.enable_event	= nhmex_mbox_msr_enable_event, @@ -1513,7 +1543,7 @@ void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)  	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;  	int port; -	/* adjust the main event selector */ +	/* adjust the main event selector and extra register index */  	if (reg1->idx % 2) {  		reg1->idx--;  		hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; @@ -1522,29 +1552,17 @@ void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)  		hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;  	} -	/* adjust address or config of extra register */ +	/* adjust extra register config */  	port = reg1->idx / 6 + box->pmu->pmu_idx * 4;  	switch (reg1->idx % 6) { -	case 0: -		reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port); -		break; -	case 1: -		reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port); -		break;  	case 2: -		/* the 8~15 bits to the 0~7 bits */ +		/* shift the 8~15 bits to the 0~7 bits */  		reg1->config >>= 8;  		break;  	case 3: -		/* the 0~7 bits to the 8~15 bits */ +		/* shift the 0~7 bits to the 8~15 bits */  		reg1->config <<= 8;  		break; -	case 4: -		reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port); -		break; -	case 5: -		reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port); -		break;  	};  } @@ -1671,7 +1689,7 @@ static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event  	struct hw_perf_event *hwc = &event->hw;  	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;  	struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; -	int port, idx; +	int idx;  	idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>  		NHMEX_R_PMON_CTL_EV_SEL_SHIFT; @@ -1681,27 +1699,11 @@ static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event  	reg1->idx = idx;  	reg1->config = event->attr.config1; -	port = idx / 6 + box->pmu->pmu_idx * 4; -	idx %= 6; -	switch (idx) { -	case 0: -		reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port); -		break; -	case 1: -		reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port); -		break; -	case 2: -	case 3: -		reg1->reg = NHMEX_R_MSR_PORTN_QLX_CFG(port); -		break; +	switch (idx % 6) {  	case 4:  	case 5: -		if (idx == 4) -			reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port); -		else -			reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port); -		reg2->config = event->attr.config2;  		hwc->config |= event->attr.config & (~0ULL << 32); +		reg2->config = event->attr.config2;  		break;  	};  	return 0; @@ -1727,28 +1729,34 @@ static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct per  	struct hw_perf_event *hwc = &event->hw;  	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;  	struct hw_perf_event_extra *reg2 = &hwc->branch_reg; -	int idx, er_idx; +	int idx, port; -	idx = reg1->idx % 6; -	er_idx = idx; -	if (er_idx > 2) -		er_idx--; -	er_idx += (reg1->idx / 6) * 5; +	idx = reg1->idx; +	port = idx / 6 + box->pmu->pmu_idx * 4; -	switch (idx) { +	switch (idx % 6) {  	case 0: +		wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config); +		break;  	case 1: -		wrmsrl(reg1->reg, reg1->config); +		wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);  		break;  	case 2:  	case 3: -		wrmsrl(reg1->reg, nhmex_rbox_shared_reg_config(box, er_idx)); +		wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port), +			nhmex_rbox_shared_reg_config(box, 2 + (idx / 6) * 5));  		break;  	case 4: +		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port), +			hwc->config >> 32); +		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config); +		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config); +		break;  	case 5: -		wrmsrl(reg1->reg, reg1->config); -		wrmsrl(reg1->reg + 1, hwc->config >> 32); -		wrmsrl(reg1->reg + 2, reg2->config); +		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port), +			hwc->config >> 32); +		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config); +		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);  		break;  	}; @@ -1756,8 +1764,8 @@ static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct per  		(hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));  } -DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config:32-63"); -DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config1:0-63"); +DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63"); +DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");  DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");  DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");  DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31"); @@ -2303,6 +2311,7 @@ int uncore_pmu_event_init(struct perf_event *event)  	event->hw.idx = -1;  	event->hw.last_tag = ~0ULL;  	event->hw.extra_reg.idx = EXTRA_REG_NONE; +	event->hw.branch_reg.idx = EXTRA_REG_NONE;  	if (event->attr.config == UNCORE_FIXED_EVENT) {  		/* no fixed counter */ @@ -2373,7 +2382,7 @@ static void __init uncore_type_exit(struct intel_uncore_type *type)  	type->attr_groups[1] = NULL;  } -static void uncore_types_exit(struct intel_uncore_type **types) +static void __init uncore_types_exit(struct intel_uncore_type **types)  {  	int i;  	for (i = 0; types[i]; i++) @@ -2814,7 +2823,13 @@ static int __init uncore_cpu_init(void)  			snbep_uncore_cbox.num_boxes = max_cores;  		msr_uncores = snbep_msr_uncores;  		break; -	case 46: +	case 46: /* Nehalem-EX */ +		uncore_nhmex = true; +	case 47: /* Westmere-EX aka. Xeon E7 */ +		if (!uncore_nhmex) +			nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events; +		if (nhmex_uncore_cbox.num_boxes > max_cores) +			nhmex_uncore_cbox.num_boxes = max_cores;  		msr_uncores = nhmex_msr_uncores;  		break;  	default: diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index c9e5dc56630..5b81c1856aa 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h @@ -230,6 +230,7 @@  #define NHMEX_S1_MSR_MASK			0xe5a  #define NHMEX_S_PMON_MM_CFG_EN			(0x1ULL << 63) +#define NHMEX_S_EVENT_TO_R_PROG_EV		0  /* NHM-EX Mbox */  #define NHMEX_M0_MSR_GLOBAL_CTL			0xca0 @@ -275,18 +276,12 @@  		 NHMEX_M_PMON_CTL_INC_SEL_MASK |	\  		 NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK) - -#define NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK	0x1f -#define NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK	(0x7 << 5) -#define NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK	(0x7 << 8) -#define NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR	(1 << 23) -#define NHMEX_M_PMON_ZDP_CTL_FVC_MASK			\ -		(NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK |	\ -		 NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK |	\ -		 NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK  |	\ -		 NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR) +#define NHMEX_M_PMON_ZDP_CTL_FVC_MASK		(((1 << 11) - 1) | (1 << 23))  #define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n)	(0x7 << (11 + 3 * (n))) +#define WSMEX_M_PMON_ZDP_CTL_FVC_MASK		(((1 << 12) - 1) | (1 << 24)) +#define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n)	(0x7 << (12 + 3 * (n))) +  /*   * use the 9~13 bits to select event If the 7th bit is not set,   * otherwise use the 19~21 bits to select event. @@ -368,6 +363,7 @@ struct intel_uncore_type {  	unsigned num_shared_regs:8;  	unsigned single_fixed:1;  	unsigned pair_ctr_ctl:1; +	unsigned *msr_offsets;  	struct event_constraint unconstrainted;  	struct event_constraint *constraints;  	struct intel_uncore_pmu *pmus; @@ -485,29 +481,31 @@ unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)  	return idx * 8 + box->pmu->type->perf_ctr;  } -static inline -unsigned uncore_msr_box_ctl(struct intel_uncore_box *box) +static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box) +{ +	struct intel_uncore_pmu *pmu = box->pmu; +	return pmu->type->msr_offsets ? +		pmu->type->msr_offsets[pmu->pmu_idx] : +		pmu->type->msr_offset * pmu->pmu_idx; +} + +static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)  {  	if (!box->pmu->type->box_ctl)  		return 0; -	return box->pmu->type->box_ctl + -		box->pmu->type->msr_offset * box->pmu->pmu_idx; +	return box->pmu->type->box_ctl + uncore_msr_box_offset(box);  } -static inline -unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box) +static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)  {  	if (!box->pmu->type->fixed_ctl)  		return 0; -	return box->pmu->type->fixed_ctl + -		box->pmu->type->msr_offset * box->pmu->pmu_idx; +	return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);  } -static inline -unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box) +static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)  { -	return box->pmu->type->fixed_ctr + -		box->pmu->type->msr_offset * box->pmu->pmu_idx; +	return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);  }  static inline @@ -515,7 +513,7 @@ unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)  {  	return box->pmu->type->event_ctl +  		(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + -		box->pmu->type->msr_offset * box->pmu->pmu_idx; +		uncore_msr_box_offset(box);  }  static inline @@ -523,7 +521,7 @@ unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)  {  	return box->pmu->type->perf_ctr +  		(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + -		box->pmu->type->msr_offset * box->pmu->pmu_idx; +		uncore_msr_box_offset(box);  }  static inline diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 7ad683d7864..d44f7829968 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -270,7 +270,7 @@ void fixup_irqs(void)  		if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {  			break_affinity = 1; -			affinity = cpu_all_mask; +			affinity = cpu_online_mask;  		}  		chip = irq_data_get_irq_chip(data); diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 8a2ce8fd41c..82746f942cd 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c @@ -143,11 +143,12 @@ static int get_matching_microcode(int cpu, const u8 *ucode_ptr,  				  unsigned int *current_size)  {  	struct microcode_header_amd *mc_hdr; -	unsigned int actual_size; +	unsigned int actual_size, patch_size;  	u16 equiv_cpu_id;  	/* size of the current patch we're staring at */ -	*current_size = *(u32 *)(ucode_ptr + 4) + SECTION_HDR_SIZE; +	patch_size = *(u32 *)(ucode_ptr + 4); +	*current_size = patch_size + SECTION_HDR_SIZE;  	equiv_cpu_id = find_equiv_id();  	if (!equiv_cpu_id) @@ -174,7 +175,7 @@ static int get_matching_microcode(int cpu, const u8 *ucode_ptr,  	/*  	 * now that the header looks sane, verify its size  	 */ -	actual_size = verify_ucode_size(cpu, *current_size, leftover_size); +	actual_size = verify_ucode_size(cpu, patch_size, leftover_size);  	if (!actual_size)  		return 0; diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 97d9a9914ba..a3b57a27be8 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -475,13 +475,26 @@ register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)  	return address_mask(ctxt, reg);  } +static void masked_increment(ulong *reg, ulong mask, int inc) +{ +	assign_masked(reg, *reg + inc, mask); +} +  static inline void  register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)  { +	ulong mask; +  	if (ctxt->ad_bytes == sizeof(unsigned long)) -		*reg += inc; +		mask = ~0UL;  	else -		*reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt)); +		mask = ad_mask(ctxt); +	masked_increment(reg, mask, inc); +} + +static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) +{ +	masked_increment(&ctxt->regs[VCPU_REGS_RSP], stack_mask(ctxt), inc);  }  static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) @@ -1522,8 +1535,8 @@ static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)  {  	struct segmented_address addr; -	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -bytes); -	addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]); +	rsp_increment(ctxt, -bytes); +	addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt);  	addr.seg = VCPU_SREG_SS;  	return segmented_write(ctxt, addr, data, bytes); @@ -1542,13 +1555,13 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt,  	int rc;  	struct segmented_address addr; -	addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]); +	addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt);  	addr.seg = VCPU_SREG_SS;  	rc = segmented_read(ctxt, addr, dest, len);  	if (rc != X86EMUL_CONTINUE)  		return rc; -	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len); +	rsp_increment(ctxt, len);  	return rc;  } @@ -1688,8 +1701,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)  	while (reg >= VCPU_REGS_RAX) {  		if (reg == VCPU_REGS_RSP) { -			register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -							ctxt->op_bytes); +			rsp_increment(ctxt, ctxt->op_bytes);  			--reg;  		} @@ -2825,7 +2837,7 @@ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)  	rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);  	if (rc != X86EMUL_CONTINUE)  		return rc; -	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val); +	rsp_increment(ctxt, ctxt->src.val);  	return X86EMUL_CONTINUE;  } diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 01ca0042393..7fbd0d273ea 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4113,16 +4113,21 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)  		LIST_HEAD(invalid_list);  		/* +		 * Never scan more than sc->nr_to_scan VM instances. +		 * Will not hit this condition practically since we do not try +		 * to shrink more than one VM and it is very unlikely to see +		 * !n_used_mmu_pages so many times. +		 */ +		if (!nr_to_scan--) +			break; +		/*  		 * n_used_mmu_pages is accessed without holding kvm->mmu_lock  		 * here. We may skip a VM instance errorneosly, but we do not  		 * want to shrink a VM that only started to populate its MMU  		 * anyway.  		 */ -		if (kvm->arch.n_used_mmu_pages > 0) { -			if (!nr_to_scan--) -				break; +		if (!kvm->arch.n_used_mmu_pages)  			continue; -		}  		idx = srcu_read_lock(&kvm->srcu);  		spin_lock(&kvm->mmu_lock); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 42bce48f692..dce75b76031 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -806,7 +806,7 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc);   * kvm-specific. Those are put in the beginning of the list.   */ -#define KVM_SAVE_MSRS_BEGIN	9 +#define KVM_SAVE_MSRS_BEGIN	10  static u32 msrs_to_save[] = {  	MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,  	MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index f6679a7fb8c..b91e4851242 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -56,9 +56,16 @@ static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)  }  /* - * search for a shareable pmd page for hugetlb. + * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() + * and returns the corresponding pte. While this is not necessary for the + * !shared pmd case because we can allocate the pmd later as well, it makes the + * code much cleaner. pmd allocation is essential for the shared case because + * pud has to be populated inside the same i_mmap_mutex section - otherwise + * racing tasks could either miss the sharing (see huge_pte_offset) or select a + * bad pmd for sharing.   */ -static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) +static pte_t * +huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)  {  	struct vm_area_struct *vma = find_vma(mm, addr);  	struct address_space *mapping = vma->vm_file->f_mapping; @@ -68,9 +75,10 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)  	struct vm_area_struct *svma;  	unsigned long saddr;  	pte_t *spte = NULL; +	pte_t *pte;  	if (!vma_shareable(vma, addr)) -		return; +		return (pte_t *)pmd_alloc(mm, pud, addr);  	mutex_lock(&mapping->i_mmap_mutex);  	vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) { @@ -97,7 +105,9 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)  		put_page(virt_to_page(spte));  	spin_unlock(&mm->page_table_lock);  out: +	pte = (pte_t *)pmd_alloc(mm, pud, addr);  	mutex_unlock(&mapping->i_mmap_mutex); +	return pte;  }  /* @@ -142,8 +152,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,  		} else {  			BUG_ON(sz != PMD_SIZE);  			if (pud_none(*pud)) -				huge_pmd_share(mm, addr, pud); -			pte = (pte_t *) pmd_alloc(mm, pud, addr); +				pte = huge_pmd_share(mm, addr, pud); +			else +				pte = (pte_t *)pmd_alloc(mm, pud, addr);  		}  	}  	BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 931930a9616..a718e0d2350 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -919,13 +919,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,  	/*  	 * On success we use clflush, when the CPU supports it to -	 * avoid the wbindv. If the CPU does not support it, in the -	 * error case, and during early boot (for EFI) we fall back -	 * to cpa_flush_all (which uses wbinvd): +	 * avoid the wbindv. If the CPU does not support it and in the +	 * error case we fall back to cpa_flush_all (which uses +	 * wbindv):  	 */ -	if (early_boot_irqs_disabled) -		__cpa_flush_all((void *)(long)cache); -	else if (!ret && cpu_has_clflush) { +	if (!ret && cpu_has_clflush) {  		if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {  			cpa_flush_array(addr, numpages, cache,  					cpa.flags, pages); diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 2dc29f51e75..92660edaa1e 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -234,7 +234,22 @@ static efi_status_t __init phys_efi_set_virtual_address_map(  	return status;  } -static int efi_set_rtc_mmss(unsigned long nowtime) +static efi_status_t __init phys_efi_get_time(efi_time_t *tm, +					     efi_time_cap_t *tc) +{ +	unsigned long flags; +	efi_status_t status; + +	spin_lock_irqsave(&rtc_lock, flags); +	efi_call_phys_prelog(); +	status = efi_call_phys2(efi_phys.get_time, virt_to_phys(tm), +				virt_to_phys(tc)); +	efi_call_phys_epilog(); +	spin_unlock_irqrestore(&rtc_lock, flags); +	return status; +} + +int efi_set_rtc_mmss(unsigned long nowtime)  {  	int real_seconds, real_minutes;  	efi_status_t 	status; @@ -263,7 +278,7 @@ static int efi_set_rtc_mmss(unsigned long nowtime)  	return 0;  } -static unsigned long efi_get_time(void) +unsigned long efi_get_time(void)  {  	efi_status_t status;  	efi_time_t eft; @@ -606,13 +621,18 @@ static int __init efi_runtime_init(void)  	}  	/*  	 * We will only need *early* access to the following -	 * EFI runtime service before set_virtual_address_map +	 * two EFI runtime services before set_virtual_address_map  	 * is invoked.  	 */ +	efi_phys.get_time = (efi_get_time_t *)runtime->get_time;  	efi_phys.set_virtual_address_map =  		(efi_set_virtual_address_map_t *)  		runtime->set_virtual_address_map; - +	/* +	 * Make efi_get_time can be called before entering +	 * virtual mode. +	 */ +	efi.get_time = phys_efi_get_time;  	early_iounmap(runtime, sizeof(efi_runtime_services_t));  	return 0; @@ -700,10 +720,12 @@ void __init efi_init(void)  		efi_enabled = 0;  		return;  	} +#ifdef CONFIG_X86_32  	if (efi_native) {  		x86_platform.get_wallclock = efi_get_time;  		x86_platform.set_wallclock = efi_set_rtc_mmss;  	} +#endif  #if EFI_DEBUG  	print_efi_memmap(); diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile index b2d534cab25..88692871823 100644 --- a/arch/x86/realmode/rm/Makefile +++ b/arch/x86/realmode/rm/Makefile @@ -72,7 +72,7 @@ KBUILD_CFLAGS	:= $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \  		   -Wall -Wstrict-prototypes \  		   -march=i386 -mregparm=3 \  		   -include $(srctree)/$(src)/../../boot/code16gcc.h \ -		   -fno-strict-aliasing -fomit-frame-pointer \ +		   -fno-strict-aliasing -fomit-frame-pointer -fno-pic \  		   $(call cc-option, -ffreestanding) \  		   $(call cc-option, -fno-toplevel-reorder,\  			$(call cc-option, -fno-unit-at-a-time)) \ diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl index 29aed7ac2c0..a582bfed95b 100644 --- a/arch/x86/syscalls/syscall_64.tbl +++ b/arch/x86/syscalls/syscall_64.tbl @@ -60,8 +60,8 @@  51	common	getsockname		sys_getsockname  52	common	getpeername		sys_getpeername  53	common	socketpair		sys_socketpair -54	common	setsockopt		sys_setsockopt -55	common	getsockopt		sys_getsockopt +54	64	setsockopt		sys_setsockopt +55	64	getsockopt		sys_getsockopt  56	common	clone			stub_clone  57	common	fork			stub_fork  58	common	vfork			stub_vfork @@ -353,3 +353,5 @@  538	x32	sendmmsg		compat_sys_sendmmsg  539	x32	process_vm_readv	compat_sys_process_vm_readv  540	x32	process_vm_writev	compat_sys_process_vm_writev +541	x32	setsockopt		compat_sys_setsockopt +542	x32	getsockopt		compat_sys_getsockopt diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index bf4bda6d3e9..9642d4a3860 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -31,7 +31,6 @@  #include <linux/pci.h>  #include <linux/gfp.h>  #include <linux/memblock.h> -#include <linux/syscore_ops.h>  #include <xen/xen.h>  #include <xen/interface/xen.h> @@ -1470,130 +1469,38 @@ asmlinkage void __init xen_start_kernel(void)  #endif  } -#ifdef CONFIG_XEN_PVHVM -/* - * The pfn containing the shared_info is located somewhere in RAM. This - * will cause trouble if the current kernel is doing a kexec boot into a - * new kernel. The new kernel (and its startup code) can not know where - * the pfn is, so it can not reserve the page. The hypervisor will - * continue to update the pfn, and as a result memory corruption occours - * in the new kernel. - * - * One way to work around this issue is to allocate a page in the - * xen-platform pci device's BAR memory range. But pci init is done very - * late and the shared_info page is already in use very early to read - * the pvclock. So moving the pfn from RAM to MMIO is racy because some - * code paths on other vcpus could access the pfn during the small - * window when the old pfn is moved to the new pfn. There is even a - * small window were the old pfn is not backed by a mfn, and during that - * time all reads return -1. - * - * Because it is not known upfront where the MMIO region is located it - * can not be used right from the start in xen_hvm_init_shared_info. - * - * To minimise trouble the move of the pfn is done shortly before kexec. - * This does not eliminate the race because all vcpus are still online - * when the syscore_ops will be called. But hopefully there is no work - * pending at this point in time. Also the syscore_op is run last which - * reduces the risk further. - */ - -static struct shared_info *xen_hvm_shared_info; - -static void xen_hvm_connect_shared_info(unsigned long pfn) +void __ref xen_hvm_init_shared_info(void)  { +	int cpu;  	struct xen_add_to_physmap xatp; +	static struct shared_info *shared_info_page = 0; +	if (!shared_info_page) +		shared_info_page = (struct shared_info *) +			extend_brk(PAGE_SIZE, PAGE_SIZE);  	xatp.domid = DOMID_SELF;  	xatp.idx = 0;  	xatp.space = XENMAPSPACE_shared_info; -	xatp.gpfn = pfn; +	xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;  	if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))  		BUG(); -} -static void xen_hvm_set_shared_info(struct shared_info *sip) -{ -	int cpu; - -	HYPERVISOR_shared_info = sip; +	HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;  	/* xen_vcpu is a pointer to the vcpu_info struct in the shared_info  	 * page, we use it in the event channel upcall and in some pvclock  	 * related functions. We don't need the vcpu_info placement  	 * optimizations because we don't use any pv_mmu or pv_irq op on  	 * HVM. -	 * When xen_hvm_set_shared_info is run at boot time only vcpu 0 is -	 * online but xen_hvm_set_shared_info is run at resume time too and +	 * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is +	 * online but xen_hvm_init_shared_info is run at resume time too and  	 * in that case multiple vcpus might be online. */  	for_each_online_cpu(cpu) {  		per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];  	}  } -/* Reconnect the shared_info pfn to a mfn */ -void xen_hvm_resume_shared_info(void) -{ -	xen_hvm_connect_shared_info(__pa(xen_hvm_shared_info) >> PAGE_SHIFT); -} - -#ifdef CONFIG_KEXEC -static struct shared_info *xen_hvm_shared_info_kexec; -static unsigned long xen_hvm_shared_info_pfn_kexec; - -/* Remember a pfn in MMIO space for kexec reboot */ -void __devinit xen_hvm_prepare_kexec(struct shared_info *sip, unsigned long pfn) -{ -	xen_hvm_shared_info_kexec = sip; -	xen_hvm_shared_info_pfn_kexec = pfn; -} - -static void xen_hvm_syscore_shutdown(void) -{ -	struct xen_memory_reservation reservation = { -		.domid = DOMID_SELF, -		.nr_extents = 1, -	}; -	unsigned long prev_pfn; -	int rc; - -	if (!xen_hvm_shared_info_kexec) -		return; - -	prev_pfn = __pa(xen_hvm_shared_info) >> PAGE_SHIFT; -	set_xen_guest_handle(reservation.extent_start, &prev_pfn); - -	/* Move pfn to MMIO, disconnects previous pfn from mfn */ -	xen_hvm_connect_shared_info(xen_hvm_shared_info_pfn_kexec); - -	/* Update pointers, following hypercall is also a memory barrier */ -	xen_hvm_set_shared_info(xen_hvm_shared_info_kexec); - -	/* Allocate new mfn for previous pfn */ -	do { -		rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); -		if (rc == 0) -			msleep(123); -	} while (rc == 0); - -	/* Make sure the previous pfn is really connected to a (new) mfn */ -	BUG_ON(rc != 1); -} - -static struct syscore_ops xen_hvm_syscore_ops = { -	.shutdown = xen_hvm_syscore_shutdown, -}; -#endif - -/* Use a pfn in RAM, may move to MMIO before kexec. */ -static void __init xen_hvm_init_shared_info(void) -{ -	/* Remember pointer for resume */ -	xen_hvm_shared_info = extend_brk(PAGE_SIZE, PAGE_SIZE); -	xen_hvm_connect_shared_info(__pa(xen_hvm_shared_info) >> PAGE_SHIFT); -	xen_hvm_set_shared_info(xen_hvm_shared_info); -} - +#ifdef CONFIG_XEN_PVHVM  static void __init init_hvm_pv_info(void)  {  	int major, minor; @@ -1644,9 +1551,6 @@ static void __init xen_hvm_guest_init(void)  	init_hvm_pv_info();  	xen_hvm_init_shared_info(); -#ifdef CONFIG_KEXEC -	register_syscore_ops(&xen_hvm_syscore_ops); -#endif  	if (xen_feature(XENFEAT_hvm_callback_vector))  		xen_have_vector_callback = 1; diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index b2e91d40a4c..d4b25546325 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -196,9 +196,11 @@ RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3);  /* When we populate back during bootup, the amount of pages can vary. The   * max we have is seen is 395979, but that does not mean it can't be more. - * But some machines can have 3GB I/O holes even. So lets reserve enough - * for 4GB of I/O and E820 holes. */ -RESERVE_BRK(p2m_populated, PMD_SIZE * 4); + * Some machines can have 3GB I/O holes even. With early_can_reuse_p2m_middle + * it can re-use Xen provided mfn_list array, so we only need to allocate at + * most three P2M top nodes. */ +RESERVE_BRK(p2m_populated, PAGE_SIZE * 3); +  static inline unsigned p2m_top_index(unsigned long pfn)  {  	BUG_ON(pfn >= MAX_P2M_PFN); @@ -575,12 +577,99 @@ static bool __init early_alloc_p2m(unsigned long pfn)  	}  	return true;  } + +/* + * Skim over the P2M tree looking at pages that are either filled with + * INVALID_P2M_ENTRY or with 1:1 PFNs. If found, re-use that page and + * replace the P2M leaf with a p2m_missing or p2m_identity. + * Stick the old page in the new P2M tree location. + */ +bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_mfn) +{ +	unsigned topidx; +	unsigned mididx; +	unsigned ident_pfns; +	unsigned inv_pfns; +	unsigned long *p2m; +	unsigned long *mid_mfn_p; +	unsigned idx; +	unsigned long pfn; + +	/* We only look when this entails a P2M middle layer */ +	if (p2m_index(set_pfn)) +		return false; + +	for (pfn = 0; pfn <= MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) { +		topidx = p2m_top_index(pfn); + +		if (!p2m_top[topidx]) +			continue; + +		if (p2m_top[topidx] == p2m_mid_missing) +			continue; + +		mididx = p2m_mid_index(pfn); +		p2m = p2m_top[topidx][mididx]; +		if (!p2m) +			continue; + +		if ((p2m == p2m_missing) || (p2m == p2m_identity)) +			continue; + +		if ((unsigned long)p2m == INVALID_P2M_ENTRY) +			continue; + +		ident_pfns = 0; +		inv_pfns = 0; +		for (idx = 0; idx < P2M_PER_PAGE; idx++) { +			/* IDENTITY_PFNs are 1:1 */ +			if (p2m[idx] == IDENTITY_FRAME(pfn + idx)) +				ident_pfns++; +			else if (p2m[idx] == INVALID_P2M_ENTRY) +				inv_pfns++; +			else +				break; +		} +		if ((ident_pfns == P2M_PER_PAGE) || (inv_pfns == P2M_PER_PAGE)) +			goto found; +	} +	return false; +found: +	/* Found one, replace old with p2m_identity or p2m_missing */ +	p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing); +	/* And the other for save/restore.. */ +	mid_mfn_p = p2m_top_mfn_p[topidx]; +	/* NOTE: Even if it is a p2m_identity it should still be point to +	 * a page filled with INVALID_P2M_ENTRY entries. */ +	mid_mfn_p[mididx] = virt_to_mfn(p2m_missing); + +	/* Reset where we want to stick the old page in. */ +	topidx = p2m_top_index(set_pfn); +	mididx = p2m_mid_index(set_pfn); + +	/* This shouldn't happen */ +	if (WARN_ON(p2m_top[topidx] == p2m_mid_missing)) +		early_alloc_p2m(set_pfn); + +	if (WARN_ON(p2m_top[topidx][mididx] != p2m_missing)) +		return false; + +	p2m_init(p2m); +	p2m_top[topidx][mididx] = p2m; +	mid_mfn_p = p2m_top_mfn_p[topidx]; +	mid_mfn_p[mididx] = virt_to_mfn(p2m); + +	return true; +}  bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)  {  	if (unlikely(!__set_phys_to_machine(pfn, mfn)))  {  		if (!early_alloc_p2m(pfn))  			return false; +		if (early_can_reuse_p2m_middle(pfn, mfn)) +			return __set_phys_to_machine(pfn, mfn); +  		if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/))  			return false; diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index ead85576d54..d11ca11d14f 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -78,9 +78,16 @@ static void __init xen_add_extra_mem(u64 start, u64 size)  	memblock_reserve(start, size);  	xen_max_p2m_pfn = PFN_DOWN(start + size); +	for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) { +		unsigned long mfn = pfn_to_mfn(pfn); + +		if (WARN(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn)) +			continue; +		WARN(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n", +			pfn, mfn); -	for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++)  		__set_phys_to_machine(pfn, INVALID_P2M_ENTRY); +	}  }  static unsigned long __init xen_do_chunk(unsigned long start, diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index ae8a00c39de..45329c8c226 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c @@ -30,7 +30,7 @@ void xen_arch_hvm_post_suspend(int suspend_cancelled)  {  #ifdef CONFIG_XEN_PVHVM  	int cpu; -	xen_hvm_resume_shared_info(); +	xen_hvm_init_shared_info();  	xen_callback_vector();  	xen_unplug_emulated_devices();  	if (xen_feature(XENFEAT_hvm_safe_pvclock)) { diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 1e4329e04e0..202d4c15015 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -41,7 +41,7 @@ void xen_enable_syscall(void);  void xen_vcpu_restore(void);  void xen_callback_vector(void); -void xen_hvm_resume_shared_info(void); +void xen_hvm_init_shared_info(void);  void xen_unplug_emulated_devices(void);  void __init xen_build_dynamic_phys_to_machine(void); diff --git a/block/blk-lib.c b/block/blk-lib.c index 2b461b496a7..19cc761cacb 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -44,6 +44,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,  	struct request_queue *q = bdev_get_queue(bdev);  	int type = REQ_WRITE | REQ_DISCARD;  	unsigned int max_discard_sectors; +	unsigned int granularity, alignment, mask;  	struct bio_batch bb;  	struct bio *bio;  	int ret = 0; @@ -54,18 +55,20 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,  	if (!blk_queue_discard(q))  		return -EOPNOTSUPP; +	/* Zero-sector (unknown) and one-sector granularities are the same.  */ +	granularity = max(q->limits.discard_granularity >> 9, 1U); +	mask = granularity - 1; +	alignment = (bdev_discard_alignment(bdev) >> 9) & mask; +  	/*  	 * Ensure that max_discard_sectors is of the proper -	 * granularity +	 * granularity, so that requests stay aligned after a split.  	 */  	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); +	max_discard_sectors = round_down(max_discard_sectors, granularity);  	if (unlikely(!max_discard_sectors)) {  		/* Avoid infinite loop below. Being cautious never hurts. */  		return -EOPNOTSUPP; -	} else if (q->limits.discard_granularity) { -		unsigned int disc_sects = q->limits.discard_granularity >> 9; - -		max_discard_sectors &= ~(disc_sects - 1);  	}  	if (flags & BLKDEV_DISCARD_SECURE) { @@ -79,25 +82,37 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,  	bb.wait = &wait;  	while (nr_sects) { +		unsigned int req_sects; +		sector_t end_sect; +  		bio = bio_alloc(gfp_mask, 1);  		if (!bio) {  			ret = -ENOMEM;  			break;  		} +		req_sects = min_t(sector_t, nr_sects, max_discard_sectors); + +		/* +		 * If splitting a request, and the next starting sector would be +		 * misaligned, stop the discard at the previous aligned sector. +		 */ +		end_sect = sector + req_sects; +		if (req_sects < nr_sects && (end_sect & mask) != alignment) { +			end_sect = +				round_down(end_sect - alignment, granularity) +				+ alignment; +			req_sects = end_sect - sector; +		} +  		bio->bi_sector = sector;  		bio->bi_end_io = bio_batch_end_io;  		bio->bi_bdev = bdev;  		bio->bi_private = &bb; -		if (nr_sects > max_discard_sectors) { -			bio->bi_size = max_discard_sectors << 9; -			nr_sects -= max_discard_sectors; -			sector += max_discard_sectors; -		} else { -			bio->bi_size = nr_sects << 9; -			nr_sects = 0; -		} +		bio->bi_size = req_sects << 9; +		nr_sects -= req_sects; +		sector = end_sect;  		atomic_inc(&bb.done);  		submit_bio(type, bio); diff --git a/block/blk-merge.c b/block/blk-merge.c index 160035f5488..e76279e4116 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -110,6 +110,49 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,  	return 0;  } +static void +__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, +		     struct scatterlist *sglist, struct bio_vec **bvprv, +		     struct scatterlist **sg, int *nsegs, int *cluster) +{ + +	int nbytes = bvec->bv_len; + +	if (*bvprv && *cluster) { +		if ((*sg)->length + nbytes > queue_max_segment_size(q)) +			goto new_segment; + +		if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec)) +			goto new_segment; +		if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec)) +			goto new_segment; + +		(*sg)->length += nbytes; +	} else { +new_segment: +		if (!*sg) +			*sg = sglist; +		else { +			/* +			 * If the driver previously mapped a shorter +			 * list, we could see a termination bit +			 * prematurely unless it fully inits the sg +			 * table on each mapping. We KNOW that there +			 * must be more entries here or the driver +			 * would be buggy, so force clear the +			 * termination bit to avoid doing a full +			 * sg_init_table() in drivers for each command. +			 */ +			(*sg)->page_link &= ~0x02; +			*sg = sg_next(*sg); +		} + +		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); +		(*nsegs)++; +	} +	*bvprv = bvec; +} +  /*   * map a request to scatterlist, return number of sg entries setup. Caller   * must make sure sg can hold rq->nr_phys_segments entries @@ -131,41 +174,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,  	bvprv = NULL;  	sg = NULL;  	rq_for_each_segment(bvec, rq, iter) { -		int nbytes = bvec->bv_len; - -		if (bvprv && cluster) { -			if (sg->length + nbytes > queue_max_segment_size(q)) -				goto new_segment; - -			if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) -				goto new_segment; -			if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) -				goto new_segment; - -			sg->length += nbytes; -		} else { -new_segment: -			if (!sg) -				sg = sglist; -			else { -				/* -				 * If the driver previously mapped a shorter -				 * list, we could see a termination bit -				 * prematurely unless it fully inits the sg -				 * table on each mapping. We KNOW that there -				 * must be more entries here or the driver -				 * would be buggy, so force clear the -				 * termination bit to avoid doing a full -				 * sg_init_table() in drivers for each command. -				 */ -				sg->page_link &= ~0x02; -				sg = sg_next(sg); -			} - -			sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset); -			nsegs++; -		} -		bvprv = bvec; +		__blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, +				     &nsegs, &cluster);  	} /* segments in rq */ @@ -199,6 +209,43 @@ new_segment:  }  EXPORT_SYMBOL(blk_rq_map_sg); +/** + * blk_bio_map_sg - map a bio to a scatterlist + * @q: request_queue in question + * @bio: bio being mapped + * @sglist: scatterlist being mapped + * + * Note: + *    Caller must make sure sg can hold bio->bi_phys_segments entries + * + * Will return the number of sg entries setup + */ +int blk_bio_map_sg(struct request_queue *q, struct bio *bio, +		   struct scatterlist *sglist) +{ +	struct bio_vec *bvec, *bvprv; +	struct scatterlist *sg; +	int nsegs, cluster; +	unsigned long i; + +	nsegs = 0; +	cluster = blk_queue_cluster(q); + +	bvprv = NULL; +	sg = NULL; +	bio_for_each_segment(bvec, bio, i) { +		__blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, +				     &nsegs, &cluster); +	} /* segments in bio */ + +	if (sg) +		sg_mark_end(sg); + +	BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments); +	return nsegs; +} +EXPORT_SYMBOL(blk_bio_map_sg); +  static inline int ll_new_hw_segment(struct request_queue *q,  				    struct request *req,  				    struct bio *bio) diff --git a/block/genhd.c b/block/genhd.c index cac7366957c..d839723303c 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -835,7 +835,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v)  static void *show_partition_start(struct seq_file *seqf, loff_t *pos)  { -	static void *p; +	void *p;  	p = disk_seqf_start(seqf, pos);  	if (!IS_ERR_OR_NULL(p) && !*pos) diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index ea4c6d52605..29e51bc0138 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c @@ -387,6 +387,7 @@ acpi_get_table_with_size(char *signature,  	return (AE_NOT_FOUND);  } +ACPI_EXPORT_SYMBOL(acpi_get_table_with_size)  acpi_status  acpi_get_table(char *signature, diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 2be8ef1d309..27cecd313e7 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -115,7 +115,7 @@ config SATA_SIL24  	  If unsure, say N.  config ATA_SFF -	bool "ATA SFF support" +	bool "ATA SFF support (for legacy IDE and PATA)"  	default y  	help  	  This option adds support for ATA controllers with SFF diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 062e6a1a248..50d5dea0ff5 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -256,6 +256,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {  	{ PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */  	{ PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */  	{ PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */ +	{ PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */ +	{ PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */ +	{ PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */ +	{ PCI_VDEVICE(INTEL, 0x9c05), board_ahci }, /* Lynx Point-LP RAID */ +	{ PCI_VDEVICE(INTEL, 0x9c06), board_ahci }, /* Lynx Point-LP RAID */ +	{ PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */ +	{ PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */ +	{ PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */  	/* JMicron 360/1/3/5/6, match class to avoid IDE function */  	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index c2594ddf25b..57eb1c212a4 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -320,6 +320,7 @@ extern struct device_attribute *ahci_sdev_attrs[];  extern struct ata_port_operations ahci_ops;  extern struct ata_port_operations ahci_pmp_retry_srst_ops; +unsigned int ahci_dev_classify(struct ata_port *ap);  void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,  			u32 opts);  void ahci_save_initial_config(struct device *dev, diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 3c809bfbccf..ef773e12af7 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -329,6 +329,14 @@ static const struct pci_device_id piix_pci_tbl[] = {  	{ 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },  	/* SATA Controller IDE (Lynx Point) */  	{ 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, +	/* SATA Controller IDE (Lynx Point-LP) */ +	{ 0x8086, 0x9c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, +	/* SATA Controller IDE (Lynx Point-LP) */ +	{ 0x8086, 0x9c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, +	/* SATA Controller IDE (Lynx Point-LP) */ +	{ 0x8086, 0x9c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, +	/* SATA Controller IDE (Lynx Point-LP) */ +	{ 0x8086, 0x9c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },  	/* SATA Controller IDE (DH89xxCC) */  	{ 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },  	{ }	/* terminate list */ diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index f9eaa82311a..555c07afa05 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -1139,7 +1139,7 @@ static void ahci_dev_config(struct ata_device *dev)  	}  } -static unsigned int ahci_dev_classify(struct ata_port *ap) +unsigned int ahci_dev_classify(struct ata_port *ap)  {  	void __iomem *port_mmio = ahci_port_base(ap);  	struct ata_taskfile tf; @@ -1153,6 +1153,7 @@ static unsigned int ahci_dev_classify(struct ata_port *ap)  	return ata_dev_classify(&tf);  } +EXPORT_SYMBOL_GPL(ahci_dev_classify);  void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,  			u32 opts) diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index 902b5a45717..fd9ecf74e63 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -60,17 +60,7 @@ acpi_handle ata_ap_acpi_handle(struct ata_port *ap)  	if (ap->flags & ATA_FLAG_ACPI_SATA)  		return NULL; -	/* -	 * If acpi bind operation has already happened, we can get the handle -	 * for the port by checking the corresponding scsi_host device's -	 * firmware node, otherwise we will need to find out the handle from -	 * its parent's acpi node. -	 */ -	if (ap->scsi_host) -		return DEVICE_ACPI_HANDLE(&ap->scsi_host->shost_gendev); -	else -		return acpi_get_child(DEVICE_ACPI_HANDLE(ap->host->dev), -				ap->port_no); +	return acpi_get_child(DEVICE_ACPI_HANDLE(ap->host->dev), ap->port_no);  }  EXPORT_SYMBOL(ata_ap_acpi_handle); @@ -1101,6 +1091,9 @@ static int ata_acpi_bind_host(struct ata_port *ap, acpi_handle *handle)  	if (!*handle)  		return -ENODEV; +	if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0) +		ap->pflags |= ATA_PFLAG_INIT_GTM_VALID; +  	return 0;  } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index fadd5866d40..8e1039c8e15 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4062,7 +4062,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {  	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },  	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },  	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA }, -	{ "2GB ATA Flash Disk", "ADMA428M",	ATA_HORKAGE_NODMA }, +	{ " 2GB ATA Flash Disk", "ADMA428M",	ATA_HORKAGE_NODMA },  	/* Odd clown on sil3726/4726 PMPs */  	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE }, @@ -4128,6 +4128,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {  	/* Devices that do not need bridging limits applied */  	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK, }, +	{ "BUFFALO HD-QSU2/R5",		NULL,	ATA_HORKAGE_BRIDGE_OK, },  	/* Devices which aren't very happy with higher link speeds */  	{ "WD My Book",			NULL,	ATA_HORKAGE_1_5_GBPS, }, diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c index 361c75cea57..24e51056ac2 100644 --- a/drivers/ata/pata_atiixp.c +++ b/drivers/ata/pata_atiixp.c @@ -20,6 +20,7 @@  #include <linux/delay.h>  #include <scsi/scsi_host.h>  #include <linux/libata.h> +#include <linux/dmi.h>  #define DRV_NAME "pata_atiixp"  #define DRV_VERSION "0.4.6" @@ -33,11 +34,26 @@ enum {  	ATIIXP_IDE_UDMA_MODE 	= 0x56  }; +static const struct dmi_system_id attixp_cable_override_dmi_table[] = { +	{ +		/* Board has onboard PATA<->SATA converters */ +		.ident = "MSI E350DM-E33", +		.matches = { +			DMI_MATCH(DMI_BOARD_VENDOR, "MSI"), +			DMI_MATCH(DMI_BOARD_NAME, "E350DM-E33(MS-7720)"), +		}, +	}, +	{ } +}; +  static int atiixp_cable_detect(struct ata_port *ap)  {  	struct pci_dev *pdev = to_pci_dev(ap->host->dev);  	u8 udma; +	if (dmi_check_system(attixp_cable_override_dmi_table)) +		return ATA_CBL_PATA40_SHORT; +  	/* Hack from drivers/ide/pci. Really we want to know how to do the  	   raw detection not play follow the bios mode guess */  	pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma); diff --git a/drivers/base/core.c b/drivers/base/core.c index f338037a4f3..5e6e00bc165 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -1865,6 +1865,7 @@ int __dev_printk(const char *level, const struct device *dev,  		 struct va_format *vaf)  {  	char dict[128]; +	const char *level_extra = "";  	size_t dictlen = 0;  	const char *subsys; @@ -1911,10 +1912,14 @@ int __dev_printk(const char *level, const struct device *dev,  				    "DEVICE=+%s:%s", subsys, dev_name(dev));  	}  skip: +	if (level[2]) +		level_extra = &level[2]; /* skip past KERN_SOH "L" */ +  	return printk_emit(0, level[1] - '0',  			   dictlen ? dict : NULL, dictlen, -			   "%s %s: %pV", -			   dev_driver_string(dev), dev_name(dev), vaf); +			   "%s %s: %s%pV", +			   dev_driver_string(dev), dev_name(dev), +			   level_extra, vaf);  }  EXPORT_SYMBOL(__dev_printk); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 59894873a3b..7d9c1cb1c39 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -147,6 +147,8 @@ static int rpm_check_suspend_allowed(struct device *dev)  	    || (dev->power.request_pending  			&& dev->power.request == RPM_REQ_RESUME))  		retval = -EAGAIN; +	else if (__dev_pm_qos_read_value(dev) < 0) +		retval = -EPERM;  	else if (dev->power.runtime_status == RPM_SUSPENDED)  		retval = 1; @@ -388,7 +390,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)  		goto repeat;  	} -	dev->power.deferred_resume = false;  	if (dev->power.no_callbacks)  		goto no_callback;	/* Assume success. */ @@ -403,12 +404,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)  		goto out;  	} -	if (__dev_pm_qos_read_value(dev) < 0) { -		/* Negative PM QoS constraint means "never suspend". */ -		retval = -EPERM; -		goto out; -	} -  	__update_runtime_status(dev, RPM_SUSPENDING);  	if (dev->pm_domain) @@ -440,6 +435,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)  	wake_up_all(&dev->power.wait_queue);  	if (dev->power.deferred_resume) { +		dev->power.deferred_resume = false;  		rpm_resume(dev, 0);  		retval = -EAGAIN;  		goto out; @@ -584,6 +580,7 @@ static int rpm_resume(struct device *dev, int rpmflags)  		    || dev->parent->power.runtime_status == RPM_ACTIVE) {  			atomic_inc(&dev->parent->power.child_count);  			spin_unlock(&dev->parent->power.lock); +			retval = 1;  			goto no_callback;	/* Assume success. */  		}  		spin_unlock(&dev->parent->power.lock); @@ -664,7 +661,7 @@ static int rpm_resume(struct device *dev, int rpmflags)  	}  	wake_up_all(&dev->power.wait_queue); -	if (!retval) +	if (retval >= 0)  		rpm_idle(dev, RPM_ASYNC);   out: diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index acda773b372..38aa6dda6b8 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c @@ -763,16 +763,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,  		{  			case CMD_TARGET_STATUS:  				/* Pass it up to the upper layers... */ -				if( ei->ScsiStatus) -                		{ -#if 0 -                    			printk(KERN_WARNING "cciss: cmd %p " -						"has SCSI Status = %x\n", -						c, ei->ScsiStatus); -#endif -					cmd->result |= (ei->ScsiStatus << 1); -                		} -				else {  /* scsi status is zero??? How??? */ +				if (!ei->ScsiStatus) {  	/* Ordinarily, this case should never happen, but there is a bug  	   in some released firmware revisions that allows it to happen diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index ba91b408aba..d8456649674 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -889,6 +889,7 @@ struct bm_aio_ctx {  	unsigned int done;  	unsigned flags;  #define BM_AIO_COPY_PAGES	1 +#define BM_WRITE_ALL_PAGES	2  	int error;  	struct kref kref;  }; @@ -1059,7 +1060,8 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w  		if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)  			break;  		if (rw & WRITE) { -			if (bm_test_page_unchanged(b->bm_pages[i])) { +			if (!(flags & BM_WRITE_ALL_PAGES) && +			    bm_test_page_unchanged(b->bm_pages[i])) {  				dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i);  				continue;  			} @@ -1141,6 +1143,17 @@ int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)  }  /** + * drbd_bm_write_all() - Write the whole bitmap to its on disk location. + * @mdev:	DRBD device. + * + * Will write all pages. + */ +int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local) +{ +	return bm_rw(mdev, WRITE, BM_WRITE_ALL_PAGES, 0); +} + +/**   * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed.   * @mdev:	DRBD device.   * @upper_idx:	0: write all changed pages; +ve: page index to stop scanning for changed pages diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index b2ca143d005..b953cc7c9c0 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1469,6 +1469,7 @@ extern int  drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr);  extern int  drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local);  extern int  drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);  extern int  drbd_bm_write(struct drbd_conf *mdev) __must_hold(local); +extern int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local);  extern int  drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local);  extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,  		unsigned long al_enr); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index dbe6135a2ab..f93a0320e95 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -79,6 +79,7 @@ static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);  static void md_sync_timer_fn(unsigned long data);  static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);  static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused); +static void _tl_clear(struct drbd_conf *mdev);  MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "  	      "Lars Ellenberg <lars@linbit.com>"); @@ -432,19 +433,10 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)  	/* Actions operating on the disk state, also want to work on  	   requests that got barrier acked. */ -	switch (what) { -	case fail_frozen_disk_io: -	case restart_frozen_disk_io: -		list_for_each_safe(le, tle, &mdev->barrier_acked_requests) { -			req = list_entry(le, struct drbd_request, tl_requests); -			_req_mod(req, what); -		} -	case connection_lost_while_pending: -	case resend: -		break; -	default: -		dev_err(DEV, "what = %d in _tl_restart()\n", what); +	list_for_each_safe(le, tle, &mdev->barrier_acked_requests) { +		req = list_entry(le, struct drbd_request, tl_requests); +		_req_mod(req, what);  	}  } @@ -459,11 +451,16 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)   */  void tl_clear(struct drbd_conf *mdev)  { +	spin_lock_irq(&mdev->req_lock); +	_tl_clear(mdev); +	spin_unlock_irq(&mdev->req_lock); +} + +static void _tl_clear(struct drbd_conf *mdev) +{  	struct list_head *le, *tle;  	struct drbd_request *r; -	spin_lock_irq(&mdev->req_lock); -  	_tl_restart(mdev, connection_lost_while_pending);  	/* we expect this list to be empty. */ @@ -482,7 +479,6 @@ void tl_clear(struct drbd_conf *mdev)  	memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *)); -	spin_unlock_irq(&mdev->req_lock);  }  void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) @@ -1476,12 +1472,12 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,  	if (ns.susp_fen) {  		/* case1: The outdate peer handler is successful: */  		if (os.pdsk > D_OUTDATED  && ns.pdsk <= D_OUTDATED) { -			tl_clear(mdev);  			if (test_bit(NEW_CUR_UUID, &mdev->flags)) {  				drbd_uuid_new_current(mdev);  				clear_bit(NEW_CUR_UUID, &mdev->flags);  			}  			spin_lock_irq(&mdev->req_lock); +			_tl_clear(mdev);  			_drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);  			spin_unlock_irq(&mdev->req_lock);  		} diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index fb9dce8daa2..edb490aad8b 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -674,8 +674,8 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds  			 la_size_changed && md_moved ? "size changed and md moved" :  			 la_size_changed ? "size changed" : "md moved");  		/* next line implicitly does drbd_suspend_io()+drbd_resume_io() */ -		err = drbd_bitmap_io(mdev, &drbd_bm_write, -				"size changed", BM_LOCKED_MASK); +		err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write, +				     "size changed", BM_LOCKED_MASK);  		if (err) {  			rv = dev_size_error;  			goto out; diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 910335c3092..01b2ac641c7 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -695,6 +695,12 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,  		break;  	case resend: +		/* Simply complete (local only) READs. */ +		if (!(req->rq_state & RQ_WRITE) && !req->w.cb) { +			_req_may_be_done(req, m); +			break; +		} +  		/* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK  		   before the connection loss (B&C only); only P_BARRIER_ACK was missing.  		   Trowing them out of the TL here by pretending we got a BARRIER_ACK @@ -834,7 +840,15 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns  		req->private_bio = NULL;  	}  	if (rw == WRITE) { -		remote = 1; +		/* Need to replicate writes.  Unless it is an empty flush, +		 * which is better mapped to a DRBD P_BARRIER packet, +		 * also for drbd wire protocol compatibility reasons. */ +		if (unlikely(size == 0)) { +			/* The only size==0 bios we expect are empty flushes. */ +			D_ASSERT(bio->bi_rw & REQ_FLUSH); +			remote = 0; +		} else +			remote = 1;  	} else {  		/* READ || READA */  		if (local) { @@ -870,8 +884,11 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns  	 * extent.  This waits for any resync activity in the corresponding  	 * resync extent to finish, and, if necessary, pulls in the target  	 * extent into the activity log, which involves further disk io because -	 * of transactional on-disk meta data updates. */ -	if (rw == WRITE && local && !test_bit(AL_SUSPENDED, &mdev->flags)) { +	 * of transactional on-disk meta data updates. +	 * Empty flushes don't need to go into the activity log, they can only +	 * flush data for pending writes which are already in there. */ +	if (rw == WRITE && local && size +	&& !test_bit(AL_SUSPENDED, &mdev->flags)) {  		req->rq_state |= RQ_IN_ACT_LOG;  		drbd_al_begin_io(mdev, sector);  	} @@ -994,7 +1011,10 @@ allocate_barrier:  	if (rw == WRITE && _req_conflicts(req))  		goto fail_conflicting; -	list_add_tail(&req->tl_requests, &mdev->newest_tle->requests); +	/* no point in adding empty flushes to the transfer log, +	 * they are mapped to drbd barriers already. */ +	if (likely(size!=0)) +		list_add_tail(&req->tl_requests, &mdev->newest_tle->requests);  	/* NOTE remote first: to get the concurrent write detection right,  	 * we must register the request before start of local IO.  */ @@ -1014,6 +1034,14 @@ allocate_barrier:  	    mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96)  		maybe_pull_ahead(mdev); +	/* If this was a flush, queue a drbd barrier/start a new epoch. +	 * Unless the current epoch was empty anyways, or we are not currently +	 * replicating, in which case there is no point. */ +	if (unlikely(bio->bi_rw & REQ_FLUSH) +		&& mdev->newest_tle->n_writes +		&& drbd_should_do_remote(mdev->state)) +		queue_barrier(mdev); +  	spin_unlock_irq(&mdev->req_lock);  	kfree(b); /* if someone else has beaten us to it... */ diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 10308cd8a7e..11f36e50213 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -79,6 +79,7 @@ static struct usb_device_id ath3k_table[] = {  	{ USB_DEVICE(0x13d3, 0x3362) },  	{ USB_DEVICE(0x0CF3, 0xE004) },  	{ USB_DEVICE(0x0930, 0x0219) }, +	{ USB_DEVICE(0x0489, 0xe057) },  	/* Atheros AR5BBU12 with sflash firmware */  	{ USB_DEVICE(0x0489, 0xE02C) }, @@ -104,6 +105,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {  	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },  	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },  	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, +	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },  	/* Atheros AR5BBU22 with sflash firmware */  	{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index e2722141103..cef3bac1a54 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -98,6 +98,7 @@ static struct usb_device_id btusb_table[] = {  	{ USB_DEVICE(0x0a5c, 0x21e6) },  	{ USB_DEVICE(0x0a5c, 0x21e8) },  	{ USB_DEVICE(0x0a5c, 0x21f3) }, +	{ USB_DEVICE(0x0a5c, 0x21f4) },  	{ USB_DEVICE(0x413c, 0x8197) },  	/* Foxconn - Hon Hai */ @@ -133,6 +134,7 @@ static struct usb_device_id blacklist_table[] = {  	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },  	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },  	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, +	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },  	/* Atheros AR5BBU12 with sflash firmware */  	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index 6f007b6c240..6ec0fff79bc 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h @@ -64,6 +64,7 @@  #define I830_PTE_SYSTEM_CACHED  0x00000006  /* GT PTE cache control fields */  #define GEN6_PTE_UNCACHED	0x00000002 +#define HSW_PTE_UNCACHED	0x00000000  #define GEN6_PTE_LLC		0x00000004  #define GEN6_PTE_LLC_MLC	0x00000006  #define GEN6_PTE_GFDT		0x00000008 diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 08fc5cbb13c..58e32f7c322 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1156,6 +1156,30 @@ static bool gen6_check_flags(unsigned int flags)  	return true;  } +static void haswell_write_entry(dma_addr_t addr, unsigned int entry, +				unsigned int flags) +{ +	unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT; +	unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; +	u32 pte_flags; + +	if (type_mask == AGP_USER_MEMORY) +		pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID; +	else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { +		pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; +		if (gfdt) +			pte_flags |= GEN6_PTE_GFDT; +	} else { /* set 'normal'/'cached' to LLC by default */ +		pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; +		if (gfdt) +			pte_flags |= GEN6_PTE_GFDT; +	} + +	/* gen6 has bit11-4 for physical addr bit39-32 */ +	addr |= (addr >> 28) & 0xff0; +	writel(addr | pte_flags, intel_private.gtt + entry); +} +  static void gen6_write_entry(dma_addr_t addr, unsigned int entry,  			     unsigned int flags)  { @@ -1382,6 +1406,15 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = {  	.check_flags = gen6_check_flags,  	.chipset_flush = i9xx_chipset_flush,  }; +static const struct intel_gtt_driver haswell_gtt_driver = { +	.gen = 6, +	.setup = i9xx_setup, +	.cleanup = gen6_cleanup, +	.write_entry = haswell_write_entry, +	.dma_mask_size = 40, +	.check_flags = gen6_check_flags, +	.chipset_flush = i9xx_chipset_flush, +};  static const struct intel_gtt_driver valleyview_gtt_driver = {  	.gen = 7,  	.setup = i9xx_setup, @@ -1499,77 +1532,77 @@ static const struct intel_gtt_driver_description {  	{ PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,  	    "ValleyView", &valleyview_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG, -	    "Haswell", &sandybridge_gtt_driver }, +	    "Haswell", &haswell_gtt_driver },  	{ 0, NULL, NULL }  }; diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c index 540795cd076..d9279385304 100644 --- a/drivers/clocksource/cs5535-clockevt.c +++ b/drivers/clocksource/cs5535-clockevt.c @@ -53,7 +53,7 @@ static struct cs5535_mfgpt_timer *cs5535_event_clock;  #define MFGPT_PERIODIC (MFGPT_HZ / HZ)  /* - * The MFPGT timers on the CS5536 provide us with suitable timers to use + * The MFGPT timers on the CS5536 provide us with suitable timers to use   * as clock event sources - not as good as a HPET or APIC, but certainly   * better than the PIT.  This isn't a general purpose MFGPT driver, but   * a simplified one designed specifically to act as a clock event source. @@ -144,7 +144,7 @@ static int __init cs5535_mfgpt_init(void)  	timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);  	if (!timer) { -		printk(KERN_ERR DRV_NAME ": Could not allocate MFPGT timer\n"); +		printk(KERN_ERR DRV_NAME ": Could not allocate MFGPT timer\n");  		return -ENODEV;  	}  	cs5535_event_clock = timer; diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index 17fa04d08be..b47034e650a 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -218,7 +218,7 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)  	policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu); -	if (atomic_inc_return(&freq_table_users) == 1) +	if (!freq_table)  		result = opp_init_cpufreq_table(mpu_dev, &freq_table);  	if (result) { @@ -227,6 +227,8 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)  		goto fail_ck;  	} +	atomic_inc_return(&freq_table_users); +  	result = cpufreq_frequency_table_cpuinfo(policy, freq_table);  	if (result)  		goto fail_table; diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c index 2c9bf269223..3265844839b 100644 --- a/drivers/cpuidle/coupled.c +++ b/drivers/cpuidle/coupled.c @@ -678,10 +678,22 @@ static int cpuidle_coupled_cpu_notify(struct notifier_block *nb,  	int cpu = (unsigned long)hcpu;  	struct cpuidle_device *dev; +	switch (action & ~CPU_TASKS_FROZEN) { +	case CPU_UP_PREPARE: +	case CPU_DOWN_PREPARE: +	case CPU_ONLINE: +	case CPU_DEAD: +	case CPU_UP_CANCELED: +	case CPU_DOWN_FAILED: +		break; +	default: +		return NOTIFY_OK; +	} +  	mutex_lock(&cpuidle_lock);  	dev = per_cpu(cpuidle_devices, cpu); -	if (!dev->coupled) +	if (!dev || !dev->coupled)  		goto out;  	switch (action & ~CPU_TASKS_FROZEN) { diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 53c8c51d588..93d14070141 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -63,7 +63,7 @@ static void caam_jr_dequeue(unsigned long devarg)  		head = ACCESS_ONCE(jrp->head); -		spin_lock_bh(&jrp->outlock); +		spin_lock(&jrp->outlock);  		sw_idx = tail = jrp->tail;  		hw_idx = jrp->out_ring_read_index; @@ -115,7 +115,7 @@ static void caam_jr_dequeue(unsigned long devarg)  			jrp->tail = tail;  		} -		spin_unlock_bh(&jrp->outlock); +		spin_unlock(&jrp->outlock);  		/* Finally, execute user's callback */  		usercall(dev, userdesc, userstatus, userarg); @@ -236,14 +236,14 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,  		return -EIO;  	} -	spin_lock(&jrp->inplock); +	spin_lock_bh(&jrp->inplock);  	head = jrp->head;  	tail = ACCESS_ONCE(jrp->tail);  	if (!rd_reg32(&jrp->rregs->inpring_avail) ||  	    CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { -		spin_unlock(&jrp->inplock); +		spin_unlock_bh(&jrp->inplock);  		dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);  		return -EBUSY;  	} @@ -265,7 +265,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,  	wr_reg32(&jrp->rregs->inpring_jobadd, 1); -	spin_unlock(&jrp->inplock); +	spin_unlock_bh(&jrp->inplock);  	return 0;  } diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index c9c4befb5a8..df14358d7fa 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -821,8 +821,8 @@ static int hifn_register_rng(struct hifn_device *dev)  	/*  	 * We must wait at least 256 Pk_clk cycles between two reads of the rng.  	 */ -	dev->rng_wait_time	= DIV_ROUND_UP(NSEC_PER_SEC, dev->pk_clk_freq) * -				  256; +	dev->rng_wait_time	= DIV_ROUND_UP_ULL(NSEC_PER_SEC, +						   dev->pk_clk_freq) * 256;  	dev->rng.name		= dev->name;  	dev->rng.data_present	= hifn_rng_data_present, diff --git a/drivers/extcon/extcon_gpio.c b/drivers/extcon/extcon_gpio.c index fe3db45fa83..3cc152e690b 100644 --- a/drivers/extcon/extcon_gpio.c +++ b/drivers/extcon/extcon_gpio.c @@ -107,7 +107,8 @@ static int __devinit gpio_extcon_probe(struct platform_device *pdev)  	if (ret < 0)  		return ret; -	ret = gpio_request_one(extcon_data->gpio, GPIOF_DIR_IN, pdev->name); +	ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN, +				    pdev->name);  	if (ret < 0)  		goto err; diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 23120c00a88..3a8c68345fe 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -22,7 +22,9 @@ menuconfig DRM  config DRM_USB  	tristate  	depends on DRM +	depends on USB_ARCH_HAS_HCD  	select USB +	select USB_SUPPORT  config DRM_KMS_HELPER  	tristate diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index d4af9edcbb9..20a437f8878 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -94,7 +94,6 @@ struct ast_private {  		struct drm_global_reference mem_global_ref;  		struct ttm_bo_global_ref bo_global_ref;  		struct ttm_bo_device bdev; -		atomic_t validate_sequence;  	} ttm;  	struct drm_gem_object *cursor_cache; diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 7282c081fb5..866e9d48b2d 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -737,6 +737,7 @@ static int ast_get_modes(struct drm_connector *connector)  	if (edid) {  		drm_mode_connector_update_edid_property(&ast_connector->base, edid);  		ret = drm_add_edid_modes(connector, edid); +		kfree(edid);  		return ret;  	} else  		drm_mode_connector_update_edid_property(&ast_connector->base, NULL); diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h index 64ea597cb6d..5d045647a3f 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.h +++ b/drivers/gpu/drm/cirrus/cirrus_drv.h @@ -143,7 +143,6 @@ struct cirrus_device {  		struct drm_global_reference mem_global_ref;  		struct ttm_bo_global_ref bo_global_ref;  		struct ttm_bo_device bdev; -		atomic_t validate_sequence;  	} ttm;  	bool mm_inited;  }; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index a8743c399e8..bcc472572cd 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -399,10 +399,7 @@ struct edid *drm_get_edid(struct drm_connector *connector,  	if (drm_probe_ddc(adapter))  		edid = (struct edid *)drm_do_get_edid(connector, adapter); -	connector->display_info.raw_edid = (char *)edid; -  	return edid; -  }  EXPORT_SYMBOL(drm_get_edid); diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c index 0303935d10e..186832e1874 100644 --- a/drivers/gpu/drm/drm_edid_load.c +++ b/drivers/gpu/drm/drm_edid_load.c @@ -114,8 +114,8 @@ static u8 generic_edid[GENERIC_EDIDS][128] = {  	},  }; -static int edid_load(struct drm_connector *connector, char *name, -		     char *connector_name) +static u8 *edid_load(struct drm_connector *connector, char *name, +			char *connector_name)  {  	const struct firmware *fw;  	struct platform_device *pdev; @@ -205,7 +205,6 @@ static int edid_load(struct drm_connector *connector, char *name,  		edid = new_edid;  	} -	connector->display_info.raw_edid = edid;  	DRM_INFO("Got %s EDID base block and %d extension%s from "  	    "\"%s\" for connector \"%s\"\n", builtin ? "built-in" :  	    "external", valid_extensions, valid_extensions == 1 ? "" : "s", @@ -215,7 +214,10 @@ relfw_out:  	release_firmware(fw);  out: -	return err; +	if (err) +		return ERR_PTR(err); + +	return edid;  }  int drm_load_edid_firmware(struct drm_connector *connector) @@ -223,6 +225,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)  	char *connector_name = drm_get_connector_name(connector);  	char *edidname = edid_firmware, *last, *colon;  	int ret; +	struct edid *edid;  	if (*edidname == '\0')  		return 0; @@ -240,13 +243,13 @@ int drm_load_edid_firmware(struct drm_connector *connector)  	if (*last == '\n')  		*last = '\0'; -	ret = edid_load(connector, edidname, connector_name); -	if (ret) +	edid = (struct edid *) edid_load(connector, edidname, connector_name); +	if (IS_ERR_OR_NULL(edid))  		return 0; -	drm_mode_connector_update_edid_property(connector, -	    (struct edid *) connector->display_info.raw_edid); +	drm_mode_connector_update_edid_property(connector, edid); +	ret = drm_add_edid_modes(connector, edid); +	kfree(edid); -	return drm_add_edid_modes(connector, (struct edid *) -	    connector->display_info.raw_edid); +	return ret;  } diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 4ecc869fb5b..eb79515797d 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -236,7 +236,7 @@ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)  }  EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode); -bool drm_fb_helper_force_kernel_mode(void) +static bool drm_fb_helper_force_kernel_mode(void)  {  	bool ret, error = false;  	struct drm_fb_helper *helper; diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 03f16f352fe..076c4a86ff8 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -1236,7 +1236,7 @@ done:  	return ret;  } -void drm_handle_vblank_events(struct drm_device *dev, int crtc) +static void drm_handle_vblank_events(struct drm_device *dev, int crtc)  {  	struct drm_pending_vblank_event *e, *t;  	struct timeval now; diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index b7adb4a967f..28637c181b1 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -706,9 +706,6 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)  	p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);  	p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);  	p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal); - -	p->crtc_hadjusted = false; -	p->crtc_vadjusted = false;  }  EXPORT_SYMBOL(drm_mode_set_crtcinfo); diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c index 371c695322d..da457b18eaa 100644 --- a/drivers/gpu/drm/drm_proc.c +++ b/drivers/gpu/drm/drm_proc.c @@ -89,7 +89,7 @@ static const struct file_operations drm_proc_fops = {   * Create a given set of proc files represented by an array of   * gdm_proc_lists in the given root directory.   */ -int drm_proc_create_files(struct drm_info_list *files, int count, +static int drm_proc_create_files(struct drm_info_list *files, int count,  			  struct proc_dir_entry *root, struct drm_minor *minor)  {  	struct drm_device *dev = minor->dev; @@ -172,7 +172,7 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,  	return 0;  } -int drm_proc_remove_files(struct drm_info_list *files, int count, +static int drm_proc_remove_files(struct drm_info_list *files, int count,  			  struct drm_minor *minor)  {  	struct list_head *pos, *q; diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index 961ee08927f..3f061667253 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -62,7 +62,7 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)  		tmp = pgprot_writecombine(tmp);  	else  		tmp = pgprot_noncached(tmp); -#elif defined(__sparc__) || defined(__arm__) +#elif defined(__sparc__) || defined(__arm__) || defined(__mips__)  	tmp = pgprot_noncached(tmp);  #endif  	return tmp; diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c index d9568198c30..9dce3b9c389 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.c +++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c @@ -147,9 +147,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)  		drm_mode_connector_update_edid_property(connector, edid);  		count = drm_add_edid_modes(connector, edid); - -		kfree(connector->display_info.raw_edid); -		connector->display_info.raw_edid = edid; +		kfree(edid);  	} else {  		struct drm_display_mode *mode = drm_mode_create(connector->dev);  		struct exynos_drm_panel_info *panel; diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index bb1550c4dd5..92395258d64 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -102,7 +102,6 @@ static int vidi_get_edid(struct device *dev, struct drm_connector *connector,  				u8 *edid, int len)  {  	struct vidi_context *ctx = get_vidi_context(dev); -	struct edid *raw_edid;  	DRM_DEBUG_KMS("%s\n", __FILE__); @@ -115,18 +114,6 @@ static int vidi_get_edid(struct device *dev, struct drm_connector *connector,  		return -EFAULT;  	} -	raw_edid = kzalloc(len, GFP_KERNEL); -	if (!raw_edid) { -		DRM_DEBUG_KMS("failed to allocate raw_edid.\n"); -		return -ENOMEM; -	} - -	memcpy(raw_edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions) -						* EDID_LENGTH, len)); - -	/* attach the edid data to connector. */ -	connector->display_info.raw_edid = (char *)raw_edid; -  	memcpy(edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)  					* EDID_LENGTH, len)); diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile index abfa2a93f0d..7a2d40a5c1e 100644 --- a/drivers/gpu/drm/gma500/Makefile +++ b/drivers/gpu/drm/gma500/Makefile @@ -3,7 +3,7 @@  #  ccflags-y += -I$(srctree)/include/drm -gma500_gfx-y += gem_glue.o \ +gma500_gfx-y += \  	  accel_2d.o \  	  backlight.o \  	  framebuffer.o \ @@ -30,7 +30,8 @@ gma500_gfx-$(CONFIG_DRM_GMA3600) +=  cdv_device.o \  	  cdv_intel_crt.o \  	  cdv_intel_display.o \  	  cdv_intel_hdmi.o \ -	  cdv_intel_lvds.o +	  cdv_intel_lvds.o \ +	  cdv_intel_dp.o  gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \  	  oaktrail_crtc.o \ diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c index 20793951fca..143eba3309c 100644 --- a/drivers/gpu/drm/gma500/backlight.c +++ b/drivers/gpu/drm/gma500/backlight.c @@ -26,10 +26,55 @@  #include "intel_bios.h"  #include "power.h" +static void do_gma_backlight_set(struct drm_device *dev) +{ +#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE +	struct drm_psb_private *dev_priv = dev->dev_private; +	backlight_update_status(dev_priv->backlight_device); +#endif	 +} + +void gma_backlight_enable(struct drm_device *dev) +{ +#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE +	struct drm_psb_private *dev_priv = dev->dev_private; +	dev_priv->backlight_enabled = true; +	if (dev_priv->backlight_device) { +		dev_priv->backlight_device->props.brightness = dev_priv->backlight_level; +		do_gma_backlight_set(dev); +	} +#endif	 +} + +void gma_backlight_disable(struct drm_device *dev) +{ +#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE +	struct drm_psb_private *dev_priv = dev->dev_private; +	dev_priv->backlight_enabled = false; +	if (dev_priv->backlight_device) { +		dev_priv->backlight_device->props.brightness = 0; +		do_gma_backlight_set(dev); +	} +#endif	 +} + +void gma_backlight_set(struct drm_device *dev, int v) +{ +#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE +	struct drm_psb_private *dev_priv = dev->dev_private; +	dev_priv->backlight_level = v; +	if (dev_priv->backlight_device && dev_priv->backlight_enabled) { +		dev_priv->backlight_device->props.brightness = v; +		do_gma_backlight_set(dev); +	} +#endif	 +} +  int gma_backlight_init(struct drm_device *dev)  {  #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE  	struct drm_psb_private *dev_priv = dev->dev_private; +	dev_priv->backlight_enabled = true;  	return dev_priv->ops->backlight_init(dev);  #else  	return 0; diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c index b7e7b49d8f6..bfc2f397019 100644 --- a/drivers/gpu/drm/gma500/cdv_device.c +++ b/drivers/gpu/drm/gma500/cdv_device.c @@ -58,10 +58,17 @@ static int cdv_output_init(struct drm_device *dev)  	cdv_intel_lvds_init(dev, &dev_priv->mode_dev);  	/* These bits indicate HDMI not SDVO on CDV */ -	if (REG_READ(SDVOB) & SDVO_DETECTED) +	if (REG_READ(SDVOB) & SDVO_DETECTED) {  		cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB); -	if (REG_READ(SDVOC) & SDVO_DETECTED) +		if (REG_READ(DP_B) & DP_DETECTED) +			cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_B); +	} + +	if (REG_READ(SDVOC) & SDVO_DETECTED) {  		cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOC); +		if (REG_READ(DP_C) & DP_DETECTED) +			cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_C); +	}  	return 0;  } @@ -163,6 +170,7 @@ static int cdv_backlight_init(struct drm_device *dev)  			cdv_get_brightness(cdv_backlight_device);  	backlight_update_status(cdv_backlight_device);  	dev_priv->backlight_device = cdv_backlight_device; +	dev_priv->backlight_enabled = true;  	return 0;  } @@ -449,6 +457,7 @@ static void cdv_get_core_freq(struct drm_device *dev)  	case 6:  	case 7:  		dev_priv->core_freq = 266; +		break;  	default:  		dev_priv->core_freq = 0;  	} @@ -488,6 +497,65 @@ static void cdv_hotplug_enable(struct drm_device *dev, bool on)  	}	  } +static const char *force_audio_names[] = { +	"off", +	"auto", +	"on", +}; + +void cdv_intel_attach_force_audio_property(struct drm_connector *connector) +{ +	struct drm_device *dev = connector->dev; +	struct drm_psb_private *dev_priv = dev->dev_private; +	struct drm_property *prop; +	int i; + +	prop = dev_priv->force_audio_property; +	if (prop == NULL) { +		prop = drm_property_create(dev, DRM_MODE_PROP_ENUM, +					   "audio", +					   ARRAY_SIZE(force_audio_names)); +		if (prop == NULL) +			return; + +		for (i = 0; i < ARRAY_SIZE(force_audio_names); i++) +			drm_property_add_enum(prop, i, i-1, force_audio_names[i]); + +		dev_priv->force_audio_property = prop; +	} +	drm_connector_attach_property(connector, prop, 0); +} + + +static const char *broadcast_rgb_names[] = { +	"Full", +	"Limited 16:235", +}; + +void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector) +{ +	struct drm_device *dev = connector->dev; +	struct drm_psb_private *dev_priv = dev->dev_private; +	struct drm_property *prop; +	int i; + +	prop = dev_priv->broadcast_rgb_property; +	if (prop == NULL) { +		prop = drm_property_create(dev, DRM_MODE_PROP_ENUM, +					   "Broadcast RGB", +					   ARRAY_SIZE(broadcast_rgb_names)); +		if (prop == NULL) +			return; + +		for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++) +			drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]); + +		dev_priv->broadcast_rgb_property = prop; +	} + +	drm_connector_attach_property(connector, prop, 0); +} +  /* Cedarview */  static const struct psb_offset cdv_regmap[2] = {  	{ diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index a68509ba22a..3cfd0931fbf 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -57,15 +57,26 @@ struct cdv_intel_clock_t {  struct cdv_intel_limit_t {  	struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1;  	struct cdv_intel_p2_t p2; +	bool (*find_pll)(const struct cdv_intel_limit_t *, struct drm_crtc *, +			int, int, struct cdv_intel_clock_t *);  }; +static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit, +	struct drm_crtc *crtc, int target, int refclk, +	struct cdv_intel_clock_t *best_clock); +static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target, +				int refclk, +				struct cdv_intel_clock_t *best_clock); +  #define CDV_LIMIT_SINGLE_LVDS_96	0  #define CDV_LIMIT_SINGLE_LVDS_100	1  #define CDV_LIMIT_DAC_HDMI_27		2  #define CDV_LIMIT_DAC_HDMI_96		3 +#define CDV_LIMIT_DP_27			4 +#define CDV_LIMIT_DP_100		5  static const struct cdv_intel_limit_t cdv_intel_limits[] = { -	{			/* CDV_SIGNLE_LVDS_96MHz */ +	{			/* CDV_SINGLE_LVDS_96MHz */  	 .dot = {.min = 20000, .max = 115500},  	 .vco = {.min = 1800000, .max = 3600000},  	 .n = {.min = 2, .max = 6}, @@ -76,6 +87,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {  	 .p1 = {.min = 2, .max = 10},  	 .p2 = {.dot_limit = 200000,  		.p2_slow = 14, .p2_fast = 14}, +		.find_pll = cdv_intel_find_best_PLL,  	 },  	{			/* CDV_SINGLE_LVDS_100MHz */  	 .dot = {.min = 20000, .max = 115500}, @@ -90,6 +102,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {  	  * is 80-224Mhz.  Prefer single channel as much as possible.  	  */  	 .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14}, +	.find_pll = cdv_intel_find_best_PLL,  	 },  	{			/* CDV_DAC_HDMI_27MHz */  	 .dot = {.min = 20000, .max = 400000}, @@ -101,6 +114,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {  	 .p = {.min = 5, .max = 90},  	 .p1 = {.min = 1, .max = 9},  	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5}, +	.find_pll = cdv_intel_find_best_PLL,  	 },  	{			/* CDV_DAC_HDMI_96MHz */  	 .dot = {.min = 20000, .max = 400000}, @@ -112,7 +126,32 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {  	 .p = {.min = 5, .max = 100},  	 .p1 = {.min = 1, .max = 10},  	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5}, +	.find_pll = cdv_intel_find_best_PLL, +	 }, +	{			/* CDV_DP_27MHz */ +	 .dot = {.min = 160000, .max = 272000}, +	 .vco = {.min = 1809000, .max = 3564000}, +	 .n = {.min = 1, .max = 1}, +	 .m = {.min = 67, .max = 132}, +	 .m1 = {.min = 0, .max = 0}, +	 .m2 = {.min = 65, .max = 130}, +	 .p = {.min = 5, .max = 90}, +	 .p1 = {.min = 1, .max = 9}, +	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10}, +	 .find_pll = cdv_intel_find_dp_pll,  	 }, +	{			/* CDV_DP_100MHz */ +	 .dot = {.min = 160000, .max = 272000}, +	 .vco = {.min = 1800000, .max = 3600000}, +	 .n = {.min = 2, .max = 6}, +	 .m = {.min = 60, .max = 164}, +	 .m1 = {.min = 0, .max = 0}, +	 .m2 = {.min = 58, .max = 162}, +	 .p = {.min = 5, .max = 100}, +	 .p1 = {.min = 1, .max = 10}, +	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10}, +	 .find_pll = cdv_intel_find_dp_pll, +	 }	  };  #define _wait_for(COND, MS, W) ({ \ @@ -132,7 +171,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {  #define wait_for(COND, MS) _wait_for(COND, MS, 1) -static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val) +int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)  {  	int ret; @@ -159,7 +198,7 @@ static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)  	return 0;  } -static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val) +int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)  {  	int ret;  	static bool dpio_debug = true; @@ -201,7 +240,7 @@ static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)  /* Reset the DPIO configuration register.  The BIOS does this at every   * mode set.   */ -static void cdv_sb_reset(struct drm_device *dev) +void cdv_sb_reset(struct drm_device *dev)  {  	REG_WRITE(DPIO_CFG, 0); @@ -216,7 +255,7 @@ static void cdv_sb_reset(struct drm_device *dev)   */  static int  cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc, -			       struct cdv_intel_clock_t *clock, bool is_lvds) +			       struct cdv_intel_clock_t *clock, bool is_lvds, u32 ddi_select)  {  	struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc);  	int pipe = psb_crtc->pipe; @@ -259,7 +298,7 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,  	ref_value &= ~(REF_CLK_MASK);  	/* use DPLL_A for pipeB on CRT/HDMI */ -	if (pipe == 1 && !is_lvds) { +	if (pipe == 1 && !is_lvds && !(ddi_select & DP_MASK)) {  		DRM_DEBUG_KMS("use DPLLA for pipe B\n");  		ref_value |= REF_CLK_DPLLA;  	} else { @@ -336,30 +375,33 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,  	if (ret)  		return ret; -	lane_reg = PSB_LANE0; -	cdv_sb_read(dev, lane_reg, &lane_value); -	lane_value &= ~(LANE_PLL_MASK); -	lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); -	cdv_sb_write(dev, lane_reg, lane_value); - -	lane_reg = PSB_LANE1; -	cdv_sb_read(dev, lane_reg, &lane_value); -	lane_value &= ~(LANE_PLL_MASK); -	lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); -	cdv_sb_write(dev, lane_reg, lane_value); +	if (ddi_select) { +		if ((ddi_select & DDI_MASK) == DDI0_SELECT) { +			lane_reg = PSB_LANE0; +			cdv_sb_read(dev, lane_reg, &lane_value); +			lane_value &= ~(LANE_PLL_MASK); +			lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); +			cdv_sb_write(dev, lane_reg, lane_value); -	lane_reg = PSB_LANE2; -	cdv_sb_read(dev, lane_reg, &lane_value); -	lane_value &= ~(LANE_PLL_MASK); -	lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); -	cdv_sb_write(dev, lane_reg, lane_value); - -	lane_reg = PSB_LANE3; -	cdv_sb_read(dev, lane_reg, &lane_value); -	lane_value &= ~(LANE_PLL_MASK); -	lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); -	cdv_sb_write(dev, lane_reg, lane_value); +			lane_reg = PSB_LANE1; +			cdv_sb_read(dev, lane_reg, &lane_value); +			lane_value &= ~(LANE_PLL_MASK); +			lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); +			cdv_sb_write(dev, lane_reg, lane_value); +		} else { +			lane_reg = PSB_LANE2; +			cdv_sb_read(dev, lane_reg, &lane_value); +			lane_value &= ~(LANE_PLL_MASK); +			lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); +			cdv_sb_write(dev, lane_reg, lane_value); +			lane_reg = PSB_LANE3; +			cdv_sb_read(dev, lane_reg, &lane_value); +			lane_value &= ~(LANE_PLL_MASK); +			lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); +			cdv_sb_write(dev, lane_reg, lane_value); +		} +	}  	return 0;  } @@ -396,6 +438,12 @@ static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,  			limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];  		else  			limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100]; +	} else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || +			psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { +		if (refclk == 27000) +			limit = &cdv_intel_limits[CDV_LIMIT_DP_27]; +		else +			limit = &cdv_intel_limits[CDV_LIMIT_DP_100];  	} else {  		if (refclk == 27000)  			limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27]; @@ -438,13 +486,12 @@ static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc,  	return true;  } -static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target, -				int refclk, -				struct cdv_intel_clock_t *best_clock) +static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit, +	struct drm_crtc *crtc, int target, int refclk, +	struct cdv_intel_clock_t *best_clock)  {  	struct drm_device *dev = crtc->dev;  	struct cdv_intel_clock_t clock; -	const struct cdv_intel_limit_t *limit = cdv_intel_limit(crtc, refclk);  	int err = target; @@ -498,6 +545,49 @@ static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target,  	return err != target;  } +static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target, +				int refclk, +				struct cdv_intel_clock_t *best_clock) +{ +	struct cdv_intel_clock_t clock; +	if (refclk == 27000) { +		if (target < 200000) { +			clock.p1 = 2; +			clock.p2 = 10; +			clock.n = 1; +			clock.m1 = 0; +			clock.m2 = 118; +		} else { +			clock.p1 = 1; +			clock.p2 = 10; +			clock.n = 1; +			clock.m1 = 0; +			clock.m2 = 98; +		} +	} else if (refclk == 100000) { +		if (target < 200000) { +			clock.p1 = 2; +			clock.p2 = 10; +			clock.n = 5; +			clock.m1 = 0; +			clock.m2 = 160; +		} else { +			clock.p1 = 1; +			clock.p2 = 10; +			clock.n = 5; +			clock.m1 = 0; +			clock.m2 = 133; +		} +	} else +		return false; +	clock.m = clock.m2 + 2; +	clock.p = clock.p1 * clock.p2; +	clock.vco = (refclk * clock.m) / clock.n; +	clock.dot = clock.vco / clock.p; +	memcpy(best_clock, &clock, sizeof(struct cdv_intel_clock_t)); +	return true; +} +  static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,  			    int x, int y, struct drm_framebuffer *old_fb)  { @@ -791,7 +881,7 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)  	case DRM_MODE_DPMS_STANDBY:  	case DRM_MODE_DPMS_SUSPEND:  		if (psb_intel_crtc->active) -			return; +			break;  		psb_intel_crtc->active = true; @@ -835,17 +925,15 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)  		REG_WRITE(map->status, temp);  		REG_READ(map->status); -		cdv_intel_update_watermark(dev, crtc);  		cdv_intel_crtc_load_lut(crtc);  		/* Give the overlay scaler a chance to enable  		 * if it's on this pipe */  		/* psb_intel_crtc_dpms_video(crtc, true); TODO */ -		psb_intel_crtc->crtc_enable = true;  		break;  	case DRM_MODE_DPMS_OFF:  		if (!psb_intel_crtc->active) -			return; +			break;  		psb_intel_crtc->active = false; @@ -892,10 +980,9 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)  		/* Wait for the clocks to turn off. */  		udelay(150); -		cdv_intel_update_watermark(dev, crtc); -		psb_intel_crtc->crtc_enable = false;  		break;  	} +	cdv_intel_update_watermark(dev, crtc);  	/*Set FIFO Watermarks*/  	REG_WRITE(DSPARB, 0x3F3E);  } @@ -952,9 +1039,12 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,  	u32 dpll = 0, dspcntr, pipeconf;  	bool ok;  	bool is_crt = false, is_lvds = false, is_tv = false; -	bool is_hdmi = false; +	bool is_hdmi = false, is_dp = false;  	struct drm_mode_config *mode_config = &dev->mode_config;  	struct drm_connector *connector; +	const struct cdv_intel_limit_t *limit; +	u32 ddi_select = 0; +	bool is_edp = false;  	list_for_each_entry(connector, &mode_config->connector_list, head) {  		struct psb_intel_encoder *psb_intel_encoder = @@ -964,6 +1054,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,  		    || connector->encoder->crtc != crtc)  			continue; +		ddi_select = psb_intel_encoder->ddi_select;  		switch (psb_intel_encoder->type) {  		case INTEL_OUTPUT_LVDS:  			is_lvds = true; @@ -977,6 +1068,15 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,  		case INTEL_OUTPUT_HDMI:  			is_hdmi = true;  			break; +		case INTEL_OUTPUT_DISPLAYPORT: +			is_dp = true; +			break; +		case INTEL_OUTPUT_EDP: +			is_edp = true; +			break; +		default: +			DRM_ERROR("invalid output type.\n"); +			return 0;  		}  	} @@ -986,6 +1086,20 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,  	else  		/* high-end sku, 27/100 mhz */  		refclk = 27000; +	if (is_dp || is_edp) { +		/* +		 * Based on the spec the low-end SKU has only CRT/LVDS. So it is +		 * unnecessary to consider it for DP/eDP. +		 * On the high-end SKU, it will use the 27/100M reference clk +		 * for DP/eDP. When using SSC clock, the ref clk is 100MHz.Otherwise +		 * it will be 27MHz. From the VBIOS code it seems that the pipe A choose +		 * 27MHz for DP/eDP while the Pipe B chooses the 100MHz. +		 */  +		if (pipe == 0) +			refclk = 27000; +		else +			refclk = 100000; +	}  	if (is_lvds && dev_priv->lvds_use_ssc) {  		refclk = dev_priv->lvds_ssc_freq * 1000; @@ -993,8 +1107,10 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,  	}  	drm_mode_debug_printmodeline(adjusted_mode); +	 +	limit = cdv_intel_limit(crtc, refclk); -	ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, +	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,  				 &clock);  	if (!ok) {  		dev_err(dev->dev, "Couldn't find PLL settings for mode!\n"); @@ -1009,6 +1125,15 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,  	}  /*		dpll |= PLL_REF_INPUT_DREFCLK; */ +	if (is_dp || is_edp) { +		cdv_intel_dp_set_m_n(crtc, mode, adjusted_mode); +	} else { +		REG_WRITE(PIPE_GMCH_DATA_M(pipe), 0); +		REG_WRITE(PIPE_GMCH_DATA_N(pipe), 0); +		REG_WRITE(PIPE_DP_LINK_M(pipe), 0); +		REG_WRITE(PIPE_DP_LINK_N(pipe), 0); +	} +  	dpll |= DPLL_SYNCLOCK_ENABLE;  /*	if (is_lvds)  		dpll |= DPLLB_MODE_LVDS; @@ -1019,6 +1144,31 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,  	/* setup pipeconf */  	pipeconf = REG_READ(map->conf); +	pipeconf &= ~(PIPE_BPC_MASK); +	if (is_edp) { +		switch (dev_priv->edp.bpp) { +		case 24: +			pipeconf |= PIPE_8BPC; +			break; +		case 18: +			pipeconf |= PIPE_6BPC; +			break; +		case 30: +			pipeconf |= PIPE_10BPC; +			break; +		default: +			pipeconf |= PIPE_8BPC; +			break; +		} +	} else if (is_lvds) { +		/* the BPC will be 6 if it is 18-bit LVDS panel */ +		if ((REG_READ(LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) +			pipeconf |= PIPE_8BPC; +		else +			pipeconf |= PIPE_6BPC; +	} else +		pipeconf |= PIPE_8BPC; +			  	/* Set up the display plane register */  	dspcntr = DISPPLANE_GAMMA_ENABLE; @@ -1033,7 +1183,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,  	REG_WRITE(map->dpll, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);  	REG_READ(map->dpll); -	cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds); +	cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds, ddi_select);  	udelay(150); diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c new file mode 100644 index 00000000000..c9abc06ef68 --- /dev/null +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -0,0 +1,1951 @@ +/* + * Copyright © 2012 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + *    Keith Packard <keithp@keithp.com> + * + */ + +#include <linux/i2c.h> +#include <linux/slab.h> +#include "drmP.h" +#include "drm.h" +#include "drm_crtc.h" +#include "drm_crtc_helper.h" +#include "psb_drv.h" +#include "psb_intel_drv.h" +#include "psb_intel_reg.h" +#include "drm_dp_helper.h" + +#define _wait_for(COND, MS, W) ({ \ +        unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);       \ +        int ret__ = 0;                                                  \ +        while (! (COND)) {                                              \ +                if (time_after(jiffies, timeout__)) {                   \ +                        ret__ = -ETIMEDOUT;                             \ +                        break;                                          \ +                }                                                       \ +                if (W && !in_dbg_master()) msleep(W);                   \ +        }                                                               \ +        ret__;                                                          \ +})       + +#define wait_for(COND, MS) _wait_for(COND, MS, 1) + +#define DP_LINK_STATUS_SIZE	6 +#define DP_LINK_CHECK_TIMEOUT	(10 * 1000) + +#define DP_LINK_CONFIGURATION_SIZE	9 + +#define CDV_FAST_LINK_TRAIN	1 + +struct cdv_intel_dp { +	uint32_t output_reg; +	uint32_t DP; +	uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE]; +	bool has_audio; +	int force_audio; +	uint32_t color_range; +	uint8_t link_bw; +	uint8_t lane_count; +	uint8_t dpcd[4]; +	struct psb_intel_encoder *encoder; +	struct i2c_adapter adapter; +	struct i2c_algo_dp_aux_data algo; +	uint8_t	train_set[4]; +	uint8_t link_status[DP_LINK_STATUS_SIZE]; +	int panel_power_up_delay; +	int panel_power_down_delay; +	int panel_power_cycle_delay; +	int backlight_on_delay; +	int backlight_off_delay; +	struct drm_display_mode *panel_fixed_mode;  /* for eDP */ +	bool panel_on; +}; + +struct ddi_regoff { +	uint32_t	PreEmph1; +	uint32_t	PreEmph2; +	uint32_t	VSwing1; +	uint32_t	VSwing2; +	uint32_t	VSwing3; +	uint32_t	VSwing4; +	uint32_t	VSwing5; +}; + +static struct ddi_regoff ddi_DP_train_table[] = { +	{.PreEmph1 = 0x812c, .PreEmph2 = 0x8124, .VSwing1 = 0x8154, +	.VSwing2 = 0x8148, .VSwing3 = 0x814C, .VSwing4 = 0x8150, +	.VSwing5 = 0x8158,}, +	{.PreEmph1 = 0x822c, .PreEmph2 = 0x8224, .VSwing1 = 0x8254, +	.VSwing2 = 0x8248, .VSwing3 = 0x824C, .VSwing4 = 0x8250, +	.VSwing5 = 0x8258,}, +}; + +static uint32_t dp_vswing_premph_table[] = { +        0x55338954,	0x4000, +        0x554d8954,	0x2000, +        0x55668954,	0, +        0x559ac0d4,	0x6000, +}; +/** + * is_edp - is the given port attached to an eDP panel (either CPU or PCH) + * @intel_dp: DP struct + * + * If a CPU or PCH DP output is attached to an eDP panel, this function + * will return true, and false otherwise. + */ +static bool is_edp(struct psb_intel_encoder *encoder) +{ +	return encoder->type == INTEL_OUTPUT_EDP; +} + + +static void cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder); +static void cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder); +static void cdv_intel_dp_link_down(struct psb_intel_encoder *encoder); + +static int +cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder) +{ +	struct cdv_intel_dp *intel_dp = encoder->dev_priv; +	int max_lane_count = 4; + +	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { +		max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; +		switch (max_lane_count) { +		case 1: case 2: case 4: +			break; +		default: +			max_lane_count = 4; +		} +	} +	return max_lane_count; +} + +static int +cdv_intel_dp_max_link_bw(struct psb_intel_encoder *encoder) +{ +	struct cdv_intel_dp *intel_dp = encoder->dev_priv; +	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; + +	switch (max_link_bw) { +	case DP_LINK_BW_1_62: +	case DP_LINK_BW_2_7: +		break; +	default: +		max_link_bw = DP_LINK_BW_1_62; +		break; +	} +	return max_link_bw; +} + +static int +cdv_intel_dp_link_clock(uint8_t link_bw) +{ +	if (link_bw == DP_LINK_BW_2_7) +		return 270000; +	else +		return 162000; +} + +static int +cdv_intel_dp_link_required(int pixel_clock, int bpp) +{ +	return (pixel_clock * bpp + 7) / 8; +} + +static int +cdv_intel_dp_max_data_rate(int max_link_clock, int max_lanes) +{ +	return (max_link_clock * max_lanes * 19) / 20; +} + +static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder) +{ +	struct drm_device *dev = intel_encoder->base.dev; +	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; +	u32 pp; + +	if (intel_dp->panel_on) { +		DRM_DEBUG_KMS("Skip VDD on because of panel on\n"); +		return; +	}	 +	DRM_DEBUG_KMS("\n"); + +	pp = REG_READ(PP_CONTROL); + +	pp |= EDP_FORCE_VDD; +	REG_WRITE(PP_CONTROL, pp); +	REG_READ(PP_CONTROL); +	msleep(intel_dp->panel_power_up_delay); +} + +static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder) +{ +	struct drm_device *dev = intel_encoder->base.dev; +	u32 pp; + +	DRM_DEBUG_KMS("\n"); +	pp = REG_READ(PP_CONTROL); + +	pp &= ~EDP_FORCE_VDD; +	REG_WRITE(PP_CONTROL, pp); +	REG_READ(PP_CONTROL); + +} + +/* Returns true if the panel was already on when called */ +static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder) +{ +	struct drm_device *dev = intel_encoder->base.dev; +	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; +	u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_NONE; + +	if (intel_dp->panel_on) +		return true; + +	DRM_DEBUG_KMS("\n"); +	pp = REG_READ(PP_CONTROL); +	pp &= ~PANEL_UNLOCK_MASK; + +	pp |= (PANEL_UNLOCK_REGS | POWER_TARGET_ON); +	REG_WRITE(PP_CONTROL, pp); +	REG_READ(PP_CONTROL); + +	if (wait_for(((REG_READ(PP_STATUS) & idle_on_mask) == idle_on_mask), 1000)) { +		DRM_DEBUG_KMS("Error in Powering up eDP panel, status %x\n", REG_READ(PP_STATUS)); +		intel_dp->panel_on = false; +	} else +		intel_dp->panel_on = true;	 +	msleep(intel_dp->panel_power_up_delay); + +	return false; +} + +static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder) +{ +	struct drm_device *dev = intel_encoder->base.dev; +	u32 pp, idle_off_mask = PP_ON ; +	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; + +	DRM_DEBUG_KMS("\n"); + +	pp = REG_READ(PP_CONTROL); + +	if ((pp & POWER_TARGET_ON) == 0)  +		return; + +	intel_dp->panel_on = false; +	pp &= ~PANEL_UNLOCK_MASK; +	/* ILK workaround: disable reset around power sequence */ + +	pp &= ~POWER_TARGET_ON; +	pp &= ~EDP_FORCE_VDD; +	pp &= ~EDP_BLC_ENABLE; +	REG_WRITE(PP_CONTROL, pp); +	REG_READ(PP_CONTROL); +	DRM_DEBUG_KMS("PP_STATUS %x\n", REG_READ(PP_STATUS)); + +	if (wait_for((REG_READ(PP_STATUS) & idle_off_mask) == 0, 1000)) { +		DRM_DEBUG_KMS("Error in turning off Panel\n");	 +	} + +	msleep(intel_dp->panel_power_cycle_delay); +	DRM_DEBUG_KMS("Over\n"); +} + +static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder) +{ +	struct drm_device *dev = intel_encoder->base.dev; +	u32 pp; + +	DRM_DEBUG_KMS("\n"); +	/* +	 * If we enable the backlight right away following a panel power +	 * on, we may see slight flicker as the panel syncs with the eDP +	 * link.  So delay a bit to make sure the image is solid before +	 * allowing it to appear. +	 */ +	msleep(300); +	pp = REG_READ(PP_CONTROL); + +	pp |= EDP_BLC_ENABLE; +	REG_WRITE(PP_CONTROL, pp); +	gma_backlight_enable(dev); +} + +static void cdv_intel_edp_backlight_off (struct psb_intel_encoder *intel_encoder) +{ +	struct drm_device *dev = intel_encoder->base.dev; +	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; +	u32 pp; + +	DRM_DEBUG_KMS("\n"); +	gma_backlight_disable(dev); +	msleep(10); +	pp = REG_READ(PP_CONTROL); + +	pp &= ~EDP_BLC_ENABLE; +	REG_WRITE(PP_CONTROL, pp); +	msleep(intel_dp->backlight_off_delay); +} + +static int +cdv_intel_dp_mode_valid(struct drm_connector *connector, +		    struct drm_display_mode *mode) +{ +	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); +	struct cdv_intel_dp *intel_dp = encoder->dev_priv; +	int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder)); +	int max_lanes = cdv_intel_dp_max_lane_count(encoder); +	struct drm_psb_private *dev_priv = connector->dev->dev_private; + +	if (is_edp(encoder) && intel_dp->panel_fixed_mode) { +		if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay) +			return MODE_PANEL; +		if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay) +			return MODE_PANEL; +	} + +	/* only refuse the mode on non eDP since we have seen some weird eDP panels +	   which are outside spec tolerances but somehow work by magic */ +	if (!is_edp(encoder) && +	    (cdv_intel_dp_link_required(mode->clock, dev_priv->edp.bpp) +	     > cdv_intel_dp_max_data_rate(max_link_clock, max_lanes))) +		return MODE_CLOCK_HIGH; + +	if (is_edp(encoder)) { +	    if (cdv_intel_dp_link_required(mode->clock, 24) +	     	> cdv_intel_dp_max_data_rate(max_link_clock, max_lanes)) +		return MODE_CLOCK_HIGH; +		 +	} +	if (mode->clock < 10000) +		return MODE_CLOCK_LOW; + +	return MODE_OK; +} + +static uint32_t +pack_aux(uint8_t *src, int src_bytes) +{ +	int	i; +	uint32_t v = 0; + +	if (src_bytes > 4) +		src_bytes = 4; +	for (i = 0; i < src_bytes; i++) +		v |= ((uint32_t) src[i]) << ((3-i) * 8); +	return v; +} + +static void +unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) +{ +	int i; +	if (dst_bytes > 4) +		dst_bytes = 4; +	for (i = 0; i < dst_bytes; i++) +		dst[i] = src >> ((3-i) * 8); +} + +static int +cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder, +		uint8_t *send, int send_bytes, +		uint8_t *recv, int recv_size) +{ +	struct cdv_intel_dp *intel_dp = encoder->dev_priv; +	uint32_t output_reg = intel_dp->output_reg; +	struct drm_device *dev = encoder->base.dev; +	uint32_t ch_ctl = output_reg + 0x10; +	uint32_t ch_data = ch_ctl + 4; +	int i; +	int recv_bytes; +	uint32_t status; +	uint32_t aux_clock_divider; +	int try, precharge; + +	/* The clock divider is based off the hrawclk, +	 * and would like to run at 2MHz. So, take the +	 * hrawclk value and divide by 2 and use that +	 * On CDV platform it uses 200MHz as hrawclk. +	 * +	 */ +	aux_clock_divider = 200 / 2; + +	precharge = 4; +	if (is_edp(encoder)) +		precharge = 10; + +	if (REG_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) { +		DRM_ERROR("dp_aux_ch not started status 0x%08x\n", +			  REG_READ(ch_ctl)); +		return -EBUSY; +	} + +	/* Must try at least 3 times according to DP spec */ +	for (try = 0; try < 5; try++) { +		/* Load the send data into the aux channel data registers */ +		for (i = 0; i < send_bytes; i += 4) +			REG_WRITE(ch_data + i, +				   pack_aux(send + i, send_bytes - i)); +	 +		/* Send the command and wait for it to complete */ +		REG_WRITE(ch_ctl, +			   DP_AUX_CH_CTL_SEND_BUSY | +			   DP_AUX_CH_CTL_TIME_OUT_400us | +			   (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | +			   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | +			   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | +			   DP_AUX_CH_CTL_DONE | +			   DP_AUX_CH_CTL_TIME_OUT_ERROR | +			   DP_AUX_CH_CTL_RECEIVE_ERROR); +		for (;;) { +			status = REG_READ(ch_ctl); +			if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) +				break; +			udelay(100); +		} +	 +		/* Clear done status and any errors */ +		REG_WRITE(ch_ctl, +			   status | +			   DP_AUX_CH_CTL_DONE | +			   DP_AUX_CH_CTL_TIME_OUT_ERROR | +			   DP_AUX_CH_CTL_RECEIVE_ERROR); +		if (status & DP_AUX_CH_CTL_DONE) +			break; +	} + +	if ((status & DP_AUX_CH_CTL_DONE) == 0) { +		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); +		return -EBUSY; +	} + +	/* Check for timeout or receive error. +	 * Timeouts occur when the sink is not connected +	 */ +	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { +		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); +		return -EIO; +	} + +	/* Timeouts occur when the device isn't connected, so they're +	 * "normal" -- don't fill the kernel log with these */ +	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { +		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); +		return -ETIMEDOUT; +	} + +	/* Unload any bytes sent back from the other side */ +	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> +		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); +	if (recv_bytes > recv_size) +		recv_bytes = recv_size; +	 +	for (i = 0; i < recv_bytes; i += 4) +		unpack_aux(REG_READ(ch_data + i), +			   recv + i, recv_bytes - i); + +	return recv_bytes; +} + +/* Write data to the aux channel in native mode */ +static int +cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder, +			  uint16_t address, uint8_t *send, int send_bytes) +{ +	int ret; +	uint8_t	msg[20]; +	int msg_bytes; +	uint8_t	ack; + +	if (send_bytes > 16) +		return -1; +	msg[0] = AUX_NATIVE_WRITE << 4; +	msg[1] = address >> 8; +	msg[2] = address & 0xff; +	msg[3] = send_bytes - 1; +	memcpy(&msg[4], send, send_bytes); +	msg_bytes = send_bytes + 4; +	for (;;) { +		ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes, &ack, 1); +		if (ret < 0) +			return ret; +		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) +			break; +		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) +			udelay(100); +		else +			return -EIO; +	} +	return send_bytes; +} + +/* Write a single byte to the aux channel in native mode */ +static int +cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder, +			    uint16_t address, uint8_t byte) +{ +	return cdv_intel_dp_aux_native_write(encoder, address, &byte, 1); +} + +/* read bytes from a native aux channel */ +static int +cdv_intel_dp_aux_native_read(struct psb_intel_encoder *encoder, +			 uint16_t address, uint8_t *recv, int recv_bytes) +{ +	uint8_t msg[4]; +	int msg_bytes; +	uint8_t reply[20]; +	int reply_bytes; +	uint8_t ack; +	int ret; + +	msg[0] = AUX_NATIVE_READ << 4; +	msg[1] = address >> 8; +	msg[2] = address & 0xff; +	msg[3] = recv_bytes - 1; + +	msg_bytes = 4; +	reply_bytes = recv_bytes + 1; + +	for (;;) { +		ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes, +				      reply, reply_bytes); +		if (ret == 0) +			return -EPROTO; +		if (ret < 0) +			return ret; +		ack = reply[0]; +		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { +			memcpy(recv, reply + 1, ret - 1); +			return ret - 1; +		} +		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) +			udelay(100); +		else +			return -EIO; +	} +} + +static int +cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, +		    uint8_t write_byte, uint8_t *read_byte) +{ +	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; +	struct cdv_intel_dp *intel_dp = container_of(adapter, +						struct cdv_intel_dp, +						adapter); +	struct psb_intel_encoder *encoder = intel_dp->encoder; +	uint16_t address = algo_data->address; +	uint8_t msg[5]; +	uint8_t reply[2]; +	unsigned retry; +	int msg_bytes; +	int reply_bytes; +	int ret; + +	/* Set up the command byte */ +	if (mode & MODE_I2C_READ) +		msg[0] = AUX_I2C_READ << 4; +	else +		msg[0] = AUX_I2C_WRITE << 4; + +	if (!(mode & MODE_I2C_STOP)) +		msg[0] |= AUX_I2C_MOT << 4; + +	msg[1] = address >> 8; +	msg[2] = address; + +	switch (mode) { +	case MODE_I2C_WRITE: +		msg[3] = 0; +		msg[4] = write_byte; +		msg_bytes = 5; +		reply_bytes = 1; +		break; +	case MODE_I2C_READ: +		msg[3] = 0; +		msg_bytes = 4; +		reply_bytes = 2; +		break; +	default: +		msg_bytes = 3; +		reply_bytes = 1; +		break; +	} + +	for (retry = 0; retry < 5; retry++) { +		ret = cdv_intel_dp_aux_ch(encoder, +				      msg, msg_bytes, +				      reply, reply_bytes); +		if (ret < 0) { +			DRM_DEBUG_KMS("aux_ch failed %d\n", ret); +			return ret; +		} + +		switch (reply[0] & AUX_NATIVE_REPLY_MASK) { +		case AUX_NATIVE_REPLY_ACK: +			/* I2C-over-AUX Reply field is only valid +			 * when paired with AUX ACK. +			 */ +			break; +		case AUX_NATIVE_REPLY_NACK: +			DRM_DEBUG_KMS("aux_ch native nack\n"); +			return -EREMOTEIO; +		case AUX_NATIVE_REPLY_DEFER: +			udelay(100); +			continue; +		default: +			DRM_ERROR("aux_ch invalid native reply 0x%02x\n", +				  reply[0]); +			return -EREMOTEIO; +		} + +		switch (reply[0] & AUX_I2C_REPLY_MASK) { +		case AUX_I2C_REPLY_ACK: +			if (mode == MODE_I2C_READ) { +				*read_byte = reply[1]; +			} +			return reply_bytes - 1; +		case AUX_I2C_REPLY_NACK: +			DRM_DEBUG_KMS("aux_i2c nack\n"); +			return -EREMOTEIO; +		case AUX_I2C_REPLY_DEFER: +			DRM_DEBUG_KMS("aux_i2c defer\n"); +			udelay(100); +			break; +		default: +			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); +			return -EREMOTEIO; +		} +	} + +	DRM_ERROR("too many retries, giving up\n"); +	return -EREMOTEIO; +} + +static int +cdv_intel_dp_i2c_init(struct psb_intel_connector *connector, struct psb_intel_encoder *encoder, const char *name) +{ +	struct cdv_intel_dp *intel_dp = encoder->dev_priv; +	int ret; + +	DRM_DEBUG_KMS("i2c_init %s\n", name); + +	intel_dp->algo.running = false; +	intel_dp->algo.address = 0; +	intel_dp->algo.aux_ch = cdv_intel_dp_i2c_aux_ch; + +	memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter)); +	intel_dp->adapter.owner = THIS_MODULE; +	intel_dp->adapter.class = I2C_CLASS_DDC; +	strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); +	intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; +	intel_dp->adapter.algo_data = &intel_dp->algo; +	intel_dp->adapter.dev.parent = &connector->base.kdev; + +	if (is_edp(encoder)) +		cdv_intel_edp_panel_vdd_on(encoder); +	ret = i2c_dp_aux_add_bus(&intel_dp->adapter); +	if (is_edp(encoder)) +		cdv_intel_edp_panel_vdd_off(encoder); +	 +	return ret; +} + +void cdv_intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, +	struct drm_display_mode *adjusted_mode) +{ +	adjusted_mode->hdisplay = fixed_mode->hdisplay; +	adjusted_mode->hsync_start = fixed_mode->hsync_start; +	adjusted_mode->hsync_end = fixed_mode->hsync_end; +	adjusted_mode->htotal = fixed_mode->htotal; + +	adjusted_mode->vdisplay = fixed_mode->vdisplay; +	adjusted_mode->vsync_start = fixed_mode->vsync_start; +	adjusted_mode->vsync_end = fixed_mode->vsync_end; +	adjusted_mode->vtotal = fixed_mode->vtotal; + +	adjusted_mode->clock = fixed_mode->clock; + +	drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); +} + +static bool +cdv_intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, +		    struct drm_display_mode *adjusted_mode) +{ +	struct drm_psb_private *dev_priv = encoder->dev->dev_private; +	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); +	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; +	int lane_count, clock; +	int max_lane_count = cdv_intel_dp_max_lane_count(intel_encoder); +	int max_clock = cdv_intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0; +	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; +	int refclock = mode->clock; +	int bpp = 24; + +	if (is_edp(intel_encoder) && intel_dp->panel_fixed_mode) { +		cdv_intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode); +		refclock = intel_dp->panel_fixed_mode->clock; +		bpp = dev_priv->edp.bpp; +	} + +	for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { +		for (clock = max_clock; clock >= 0; clock--) { +			int link_avail = cdv_intel_dp_max_data_rate(cdv_intel_dp_link_clock(bws[clock]), lane_count); + +			if (cdv_intel_dp_link_required(refclock, bpp) <= link_avail) { +				intel_dp->link_bw = bws[clock]; +				intel_dp->lane_count = lane_count; +				adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw); +				DRM_DEBUG_KMS("Display port link bw %02x lane " +						"count %d clock %d\n", +				       intel_dp->link_bw, intel_dp->lane_count, +				       adjusted_mode->clock); +				return true; +			} +		} +	} +	if (is_edp(intel_encoder)) { +		/* okay we failed just pick the highest */ +		intel_dp->lane_count = max_lane_count; +		intel_dp->link_bw = bws[max_clock]; +		adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw); +		DRM_DEBUG_KMS("Force picking display port link bw %02x lane " +			      "count %d clock %d\n", +			      intel_dp->link_bw, intel_dp->lane_count, +			      adjusted_mode->clock); + +		return true; +	} +	return false; +} + +struct cdv_intel_dp_m_n { +	uint32_t	tu; +	uint32_t	gmch_m; +	uint32_t	gmch_n; +	uint32_t	link_m; +	uint32_t	link_n; +}; + +static void +cdv_intel_reduce_ratio(uint32_t *num, uint32_t *den) +{ +	/* +	while (*num > 0xffffff || *den > 0xffffff) { +		*num >>= 1; +		*den >>= 1; +	}*/ +	uint64_t value, m; +	m = *num; +	value = m * (0x800000); +	m = do_div(value, *den); +	*num = value; +	*den = 0x800000; +} + +static void +cdv_intel_dp_compute_m_n(int bpp, +		     int nlanes, +		     int pixel_clock, +		     int link_clock, +		     struct cdv_intel_dp_m_n *m_n) +{ +	m_n->tu = 64; +	m_n->gmch_m = (pixel_clock * bpp + 7) >> 3; +	m_n->gmch_n = link_clock * nlanes; +	cdv_intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); +	m_n->link_m = pixel_clock; +	m_n->link_n = link_clock; +	cdv_intel_reduce_ratio(&m_n->link_m, &m_n->link_n); +} + +void +cdv_intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, +		 struct drm_display_mode *adjusted_mode) +{ +	struct drm_device *dev = crtc->dev; +	struct drm_psb_private *dev_priv = dev->dev_private; +	struct drm_mode_config *mode_config = &dev->mode_config; +	struct drm_encoder *encoder; +	struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); +	int lane_count = 4, bpp = 24; +	struct cdv_intel_dp_m_n m_n; +	int pipe = intel_crtc->pipe; + +	/* +	 * Find the lane count in the intel_encoder private +	 */ +	list_for_each_entry(encoder, &mode_config->encoder_list, head) { +		struct psb_intel_encoder *intel_encoder; +		struct cdv_intel_dp *intel_dp; + +		if (encoder->crtc != crtc) +			continue; + +		intel_encoder = to_psb_intel_encoder(encoder); +		intel_dp = intel_encoder->dev_priv; +		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { +			lane_count = intel_dp->lane_count; +			break; +		} else if (is_edp(intel_encoder)) { +			lane_count = intel_dp->lane_count; +			bpp = dev_priv->edp.bpp; +			break; +		} +	} + +	/* +	 * Compute the GMCH and Link ratios. The '3' here is +	 * the number of bytes_per_pixel post-LUT, which we always +	 * set up for 8-bits of R/G/B, or 3 bytes total. +	 */ +	cdv_intel_dp_compute_m_n(bpp, lane_count, +			     mode->clock, adjusted_mode->clock, &m_n); + +	{ +		REG_WRITE(PIPE_GMCH_DATA_M(pipe), +			   ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | +			   m_n.gmch_m); +		REG_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); +		REG_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); +		REG_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); +	} +} + +static void +cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, +		  struct drm_display_mode *adjusted_mode) +{ +	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); +	struct drm_crtc *crtc = encoder->crtc; +	struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); +	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; +	struct drm_device *dev = encoder->dev; + +	intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; +	intel_dp->DP |= intel_dp->color_range; + +	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) +		intel_dp->DP |= DP_SYNC_HS_HIGH; +	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) +		intel_dp->DP |= DP_SYNC_VS_HIGH; + +	intel_dp->DP |= DP_LINK_TRAIN_OFF; + +	switch (intel_dp->lane_count) { +	case 1: +		intel_dp->DP |= DP_PORT_WIDTH_1; +		break; +	case 2: +		intel_dp->DP |= DP_PORT_WIDTH_2; +		break; +	case 4: +		intel_dp->DP |= DP_PORT_WIDTH_4; +		break; +	} +	if (intel_dp->has_audio) +		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; + +	memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); +	intel_dp->link_configuration[0] = intel_dp->link_bw; +	intel_dp->link_configuration[1] = intel_dp->lane_count; + +	/* +	 * Check for DPCD version > 1.1 and enhanced framing support +	 */ +	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && +	    (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { +		intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; +		intel_dp->DP |= DP_ENHANCED_FRAMING; +	} + +	/* CPT DP's pipe select is decided in TRANS_DP_CTL */ +	if (intel_crtc->pipe == 1) +		intel_dp->DP |= DP_PIPEB_SELECT; + +	REG_WRITE(intel_dp->output_reg, (intel_dp->DP | DP_PORT_EN)); +	DRM_DEBUG_KMS("DP expected reg is %x\n", intel_dp->DP); +	if (is_edp(intel_encoder)) { +		uint32_t pfit_control; +		cdv_intel_edp_panel_on(intel_encoder); + +		if (mode->hdisplay != adjusted_mode->hdisplay || +			    mode->vdisplay != adjusted_mode->vdisplay) +			pfit_control = PFIT_ENABLE; +		else +			pfit_control = 0; + +		pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT; + +		REG_WRITE(PFIT_CONTROL, pfit_control); +	} +} + + +/* If the sink supports it, try to set the power state appropriately */ +static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode) +{ +	struct cdv_intel_dp *intel_dp = encoder->dev_priv; +	int ret, i; + +	/* Should have a valid DPCD by this point */ +	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) +		return; + +	if (mode != DRM_MODE_DPMS_ON) { +		ret = cdv_intel_dp_aux_native_write_1(encoder, DP_SET_POWER, +						  DP_SET_POWER_D3); +		if (ret != 1) +			DRM_DEBUG_DRIVER("failed to write sink power state\n"); +	} else { +		/* +		 * When turning on, we need to retry for 1ms to give the sink +		 * time to wake up. +		 */ +		for (i = 0; i < 3; i++) { +			ret = cdv_intel_dp_aux_native_write_1(encoder, +							  DP_SET_POWER, +							  DP_SET_POWER_D0); +			if (ret == 1) +				break; +			udelay(1000); +		} +	} +} + +static void cdv_intel_dp_prepare(struct drm_encoder *encoder) +{ +	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); +	int edp = is_edp(intel_encoder); + +	if (edp) { +		cdv_intel_edp_backlight_off(intel_encoder); +		cdv_intel_edp_panel_off(intel_encoder); +		cdv_intel_edp_panel_vdd_on(intel_encoder); +        } +	/* Wake up the sink first */ +	cdv_intel_dp_sink_dpms(intel_encoder, DRM_MODE_DPMS_ON); +	cdv_intel_dp_link_down(intel_encoder); +	if (edp) +		cdv_intel_edp_panel_vdd_off(intel_encoder); +} + +static void cdv_intel_dp_commit(struct drm_encoder *encoder) +{ +	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); +	int edp = is_edp(intel_encoder); + +	if (edp) +		cdv_intel_edp_panel_on(intel_encoder); +	cdv_intel_dp_start_link_train(intel_encoder); +	cdv_intel_dp_complete_link_train(intel_encoder); +	if (edp) +		cdv_intel_edp_backlight_on(intel_encoder); +} + +static void +cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode) +{ +	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); +	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; +	struct drm_device *dev = encoder->dev; +	uint32_t dp_reg = REG_READ(intel_dp->output_reg); +	int edp = is_edp(intel_encoder); + +	if (mode != DRM_MODE_DPMS_ON) { +		if (edp) { +			cdv_intel_edp_backlight_off(intel_encoder); +			cdv_intel_edp_panel_vdd_on(intel_encoder); +		} +		cdv_intel_dp_sink_dpms(intel_encoder, mode); +		cdv_intel_dp_link_down(intel_encoder); +		if (edp) { +			cdv_intel_edp_panel_vdd_off(intel_encoder); +			cdv_intel_edp_panel_off(intel_encoder); +		} +	} else { +        	if (edp) +			cdv_intel_edp_panel_on(intel_encoder); +		cdv_intel_dp_sink_dpms(intel_encoder, mode); +		if (!(dp_reg & DP_PORT_EN)) { +			cdv_intel_dp_start_link_train(intel_encoder); +			cdv_intel_dp_complete_link_train(intel_encoder); +		} +		if (edp) +        		cdv_intel_edp_backlight_on(intel_encoder); +	} +} + +/* + * Native read with retry for link status and receiver capability reads for + * cases where the sink may still be asleep. + */ +static bool +cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t address, +			       uint8_t *recv, int recv_bytes) +{ +	int ret, i; + +	/* +	 * Sinks are *supposed* to come up within 1ms from an off state, +	 * but we're also supposed to retry 3 times per the spec. +	 */ +	for (i = 0; i < 3; i++) { +		ret = cdv_intel_dp_aux_native_read(encoder, address, recv, +					       recv_bytes); +		if (ret == recv_bytes) +			return true; +		udelay(1000); +	} + +	return false; +} + +/* + * Fetch AUX CH registers 0x202 - 0x207 which contain + * link status information + */ +static bool +cdv_intel_dp_get_link_status(struct psb_intel_encoder *encoder) +{ +	struct cdv_intel_dp *intel_dp = encoder->dev_priv; +	return cdv_intel_dp_aux_native_read_retry(encoder, +					      DP_LANE0_1_STATUS, +					      intel_dp->link_status, +					      DP_LINK_STATUS_SIZE); +} + +static uint8_t +cdv_intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], +		     int r) +{ +	return link_status[r - DP_LANE0_1_STATUS]; +} + +static uint8_t +cdv_intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], +				 int lane) +{ +	int	    i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); +	int	    s = ((lane & 1) ? +			 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : +			 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); +	uint8_t l = cdv_intel_dp_link_status(link_status, i); + +	return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; +} + +static uint8_t +cdv_intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], +				      int lane) +{ +	int	    i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); +	int	    s = ((lane & 1) ? +			 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : +			 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); +	uint8_t l = cdv_intel_dp_link_status(link_status, i); + +	return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; +} + + +#if 0 +static char	*voltage_names[] = { +	"0.4V", "0.6V", "0.8V", "1.2V" +}; +static char	*pre_emph_names[] = { +	"0dB", "3.5dB", "6dB", "9.5dB" +}; +static char	*link_train_names[] = { +	"pattern 1", "pattern 2", "idle", "off" +}; +#endif + +#define CDV_DP_VOLTAGE_MAX	    DP_TRAIN_VOLTAGE_SWING_1200 +/* +static uint8_t +cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing) +{ +	switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { +	case DP_TRAIN_VOLTAGE_SWING_400: +		return DP_TRAIN_PRE_EMPHASIS_6; +	case DP_TRAIN_VOLTAGE_SWING_600: +		return DP_TRAIN_PRE_EMPHASIS_6; +	case DP_TRAIN_VOLTAGE_SWING_800: +		return DP_TRAIN_PRE_EMPHASIS_3_5; +	case DP_TRAIN_VOLTAGE_SWING_1200: +	default: +		return DP_TRAIN_PRE_EMPHASIS_0; +	} +} +*/ +static void +cdv_intel_get_adjust_train(struct psb_intel_encoder *encoder) +{ +	struct cdv_intel_dp *intel_dp = encoder->dev_priv; +	uint8_t v = 0; +	uint8_t p = 0; +	int lane; + +	for (lane = 0; lane < intel_dp->lane_count; lane++) { +		uint8_t this_v = cdv_intel_get_adjust_request_voltage(intel_dp->link_status, lane); +		uint8_t this_p = cdv_intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane); + +		if (this_v > v) +			v = this_v; +		if (this_p > p) +			p = this_p; +	} +	 +	if (v >= CDV_DP_VOLTAGE_MAX) +		v = CDV_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; + +	if (p == DP_TRAIN_PRE_EMPHASIS_MASK) +		p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; +		 +	for (lane = 0; lane < 4; lane++) +		intel_dp->train_set[lane] = v | p; +} + + +static uint8_t +cdv_intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], +		      int lane) +{ +	int i = DP_LANE0_1_STATUS + (lane >> 1); +	int s = (lane & 1) * 4; +	uint8_t l = cdv_intel_dp_link_status(link_status, i); + +	return (l >> s) & 0xf; +} + +/* Check for clock recovery is done on all channels */ +static bool +cdv_intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) +{ +	int lane; +	uint8_t lane_status; + +	for (lane = 0; lane < lane_count; lane++) { +		lane_status = cdv_intel_get_lane_status(link_status, lane); +		if ((lane_status & DP_LANE_CR_DONE) == 0) +			return false; +	} +	return true; +} + +/* Check to see if channel eq is done on all channels */ +#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\ +			 DP_LANE_CHANNEL_EQ_DONE|\ +			 DP_LANE_SYMBOL_LOCKED) +static bool +cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder) +{ +	struct cdv_intel_dp *intel_dp = encoder->dev_priv; +	uint8_t lane_align; +	uint8_t lane_status; +	int lane; + +	lane_align = cdv_intel_dp_link_status(intel_dp->link_status, +					  DP_LANE_ALIGN_STATUS_UPDATED); +	if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) +		return false; +	for (lane = 0; lane < intel_dp->lane_count; lane++) { +		lane_status = cdv_intel_get_lane_status(intel_dp->link_status, lane); +		if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) +			return false; +	} +	return true; +} + +static bool +cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder, +			uint32_t dp_reg_value, +			uint8_t dp_train_pat) +{ +	 +	struct drm_device *dev = encoder->base.dev; +	int ret; +	struct cdv_intel_dp *intel_dp = encoder->dev_priv; + +	REG_WRITE(intel_dp->output_reg, dp_reg_value); +	REG_READ(intel_dp->output_reg); + +	ret = cdv_intel_dp_aux_native_write_1(encoder, +				    DP_TRAINING_PATTERN_SET, +				    dp_train_pat); + +	if (ret != 1) { +		DRM_DEBUG_KMS("Failure in setting link pattern %x\n", +				dp_train_pat); +		return false; +	} + +	return true; +} + + +static bool +cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder, +			uint8_t dp_train_pat) +{ +	 +	int ret; +	struct cdv_intel_dp *intel_dp = encoder->dev_priv; + +	ret = cdv_intel_dp_aux_native_write(encoder, +					DP_TRAINING_LANE0_SET, +					intel_dp->train_set, +					intel_dp->lane_count); + +	if (ret != intel_dp->lane_count) { +		DRM_DEBUG_KMS("Failure in setting level %d, lane_cnt= %d\n", +				intel_dp->train_set[0], intel_dp->lane_count); +		return false; +	} +	return true; +} + +static void +cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal_level) +{ +	struct drm_device *dev = encoder->base.dev; +	struct cdv_intel_dp *intel_dp = encoder->dev_priv; +	struct ddi_regoff *ddi_reg; +	int vswing, premph, index; + +	if (intel_dp->output_reg == DP_B) +		ddi_reg = &ddi_DP_train_table[0]; +	else +		ddi_reg = &ddi_DP_train_table[1]; + +	vswing = (signal_level & DP_TRAIN_VOLTAGE_SWING_MASK); +	premph = ((signal_level & DP_TRAIN_PRE_EMPHASIS_MASK)) >> +				DP_TRAIN_PRE_EMPHASIS_SHIFT; + +	if (vswing + premph > 3) +		return; +#ifdef CDV_FAST_LINK_TRAIN +	return; +#endif +	DRM_DEBUG_KMS("Test2\n"); +	//return ; +	cdv_sb_reset(dev); +	/* ;Swing voltage programming +        ;gfx_dpio_set_reg(0xc058, 0x0505313A) */ +	cdv_sb_write(dev, ddi_reg->VSwing5, 0x0505313A); + +	/* ;gfx_dpio_set_reg(0x8154, 0x43406055) */ +	cdv_sb_write(dev, ddi_reg->VSwing1, 0x43406055); + +	/* ;gfx_dpio_set_reg(0x8148, 0x55338954) +	 * The VSwing_PreEmph table is also considered based on the vswing/premp +	 */ +	index = (vswing + premph) * 2; +	if (premph == 1 && vswing == 1) { +		cdv_sb_write(dev, ddi_reg->VSwing2, 0x055738954); +	} else +		cdv_sb_write(dev, ddi_reg->VSwing2, dp_vswing_premph_table[index]); + +	/* ;gfx_dpio_set_reg(0x814c, 0x40802040) */ +	if ((vswing + premph) == DP_TRAIN_VOLTAGE_SWING_1200) +		cdv_sb_write(dev, ddi_reg->VSwing3, 0x70802040); +	else +		cdv_sb_write(dev, ddi_reg->VSwing3, 0x40802040); + +	/* ;gfx_dpio_set_reg(0x8150, 0x2b405555) */ +	/* cdv_sb_write(dev, ddi_reg->VSwing4, 0x2b405555); */ + +	/* ;gfx_dpio_set_reg(0x8154, 0xc3406055) */ +	cdv_sb_write(dev, ddi_reg->VSwing1, 0xc3406055); + +	/* ;Pre emphasis programming +	 * ;gfx_dpio_set_reg(0xc02c, 0x1f030040) +	 */ +	cdv_sb_write(dev, ddi_reg->PreEmph1, 0x1f030040); + +	/* ;gfx_dpio_set_reg(0x8124, 0x00004000) */ +	index = 2 * premph + 1; +	cdv_sb_write(dev, ddi_reg->PreEmph2, dp_vswing_premph_table[index]); +	return;	 +} + + +/* Enable corresponding port and start training pattern 1 */ +static void +cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder) +{ +	struct drm_device *dev = encoder->base.dev; +	struct cdv_intel_dp *intel_dp = encoder->dev_priv; +	int i; +	uint8_t voltage; +	bool clock_recovery = false; +	int tries; +	u32 reg; +	uint32_t DP = intel_dp->DP; + +	DP |= DP_PORT_EN; +	DP &= ~DP_LINK_TRAIN_MASK; +		 +	reg = DP;	 +	reg |= DP_LINK_TRAIN_PAT_1; +	/* Enable output, wait for it to become active */ +	REG_WRITE(intel_dp->output_reg, reg); +	REG_READ(intel_dp->output_reg); +	psb_intel_wait_for_vblank(dev); + +	DRM_DEBUG_KMS("Link config\n"); +	/* Write the link configuration data */ +	cdv_intel_dp_aux_native_write(encoder, DP_LINK_BW_SET, +				  intel_dp->link_configuration, +				  2); + +	memset(intel_dp->train_set, 0, 4); +	voltage = 0; +	tries = 0; +	clock_recovery = false; + +	DRM_DEBUG_KMS("Start train\n"); +		reg = DP | DP_LINK_TRAIN_PAT_1; + + +	for (;;) { +		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ +		DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n", +				intel_dp->train_set[0], +				intel_dp->link_configuration[0], +				intel_dp->link_configuration[1]); + +		if (!cdv_intel_dp_set_link_train(encoder, reg, DP_TRAINING_PATTERN_1)) { +			DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 1\n"); +		} +		cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]); +		/* Set training pattern 1 */ + +		cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_1); + +		udelay(200); +		if (!cdv_intel_dp_get_link_status(encoder)) +			break; + +		DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n", +				intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2], +				intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]); + +		if (cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { +			DRM_DEBUG_KMS("PT1 train is done\n"); +			clock_recovery = true; +			break; +		} + +		/* Check to see if we've tried the max voltage */ +		for (i = 0; i < intel_dp->lane_count; i++) +			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) +				break; +		if (i == intel_dp->lane_count) +			break; + +		/* Check to see if we've tried the same voltage 5 times */ +		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { +			++tries; +			if (tries == 5) +				break; +		} else +			tries = 0; +		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + +		/* Compute new intel_dp->train_set as requested by target */ +		cdv_intel_get_adjust_train(encoder); + +	} + +	if (!clock_recovery) { +		DRM_DEBUG_KMS("failure in DP patter 1 training, train set %x\n", intel_dp->train_set[0]); +	} +	 +	intel_dp->DP = DP; +} + +static void +cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder) +{ +	struct drm_device *dev = encoder->base.dev; +	struct cdv_intel_dp *intel_dp = encoder->dev_priv; +	bool channel_eq = false; +	int tries, cr_tries; +	u32 reg; +	uint32_t DP = intel_dp->DP; + +	/* channel equalization */ +	tries = 0; +	cr_tries = 0; +	channel_eq = false; + +	DRM_DEBUG_KMS("\n"); +		reg = DP | DP_LINK_TRAIN_PAT_2; + +	for (;;) { + +		DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n", +				intel_dp->train_set[0], +				intel_dp->link_configuration[0], +				intel_dp->link_configuration[1]); +        	/* channel eq pattern */ + +		if (!cdv_intel_dp_set_link_train(encoder, reg, +					     DP_TRAINING_PATTERN_2)) { +			DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 2\n"); +		} +		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ + +		if (cr_tries > 5) { +			DRM_ERROR("failed to train DP, aborting\n"); +			cdv_intel_dp_link_down(encoder); +			break; +		} + +		cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]); + +		cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_2); + +		udelay(1000); +		if (!cdv_intel_dp_get_link_status(encoder)) +			break; + +		DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n", +				intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2], +				intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]); + +		/* Make sure clock is still ok */ +		if (!cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { +			cdv_intel_dp_start_link_train(encoder); +			cr_tries++; +			continue; +		} + +		if (cdv_intel_channel_eq_ok(encoder)) { +			DRM_DEBUG_KMS("PT2 train is done\n"); +			channel_eq = true; +			break; +		} + +		/* Try 5 times, then try clock recovery if that fails */ +		if (tries > 5) { +			cdv_intel_dp_link_down(encoder); +			cdv_intel_dp_start_link_train(encoder); +			tries = 0; +			cr_tries++; +			continue; +		} + +		/* Compute new intel_dp->train_set as requested by target */ +		cdv_intel_get_adjust_train(encoder); +		++tries; + +	} + +	reg = DP | DP_LINK_TRAIN_OFF; + +	REG_WRITE(intel_dp->output_reg, reg); +	REG_READ(intel_dp->output_reg); +	cdv_intel_dp_aux_native_write_1(encoder, +				    DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); +} + +static void +cdv_intel_dp_link_down(struct psb_intel_encoder *encoder) +{ +	struct drm_device *dev = encoder->base.dev; +	struct cdv_intel_dp *intel_dp = encoder->dev_priv; +	uint32_t DP = intel_dp->DP; + +	if ((REG_READ(intel_dp->output_reg) & DP_PORT_EN) == 0) +		return; + +	DRM_DEBUG_KMS("\n"); + + +	{ +		DP &= ~DP_LINK_TRAIN_MASK; +		REG_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); +	} +	REG_READ(intel_dp->output_reg); + +	msleep(17); + +	REG_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); +	REG_READ(intel_dp->output_reg); +} + +static enum drm_connector_status +cdv_dp_detect(struct psb_intel_encoder *encoder) +{ +	struct cdv_intel_dp *intel_dp = encoder->dev_priv; +	enum drm_connector_status status; + +	status = connector_status_disconnected; +	if (cdv_intel_dp_aux_native_read(encoder, 0x000, intel_dp->dpcd, +				     sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) +	{ +		if (intel_dp->dpcd[DP_DPCD_REV] != 0) +			status = connector_status_connected; +	} +	if (status == connector_status_connected) +		DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n", +			intel_dp->dpcd[0], intel_dp->dpcd[1], +			intel_dp->dpcd[2], intel_dp->dpcd[3]); +	return status; +} + +/** + * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. + * + * \return true if DP port is connected. + * \return false if DP port is disconnected. + */ +static enum drm_connector_status +cdv_intel_dp_detect(struct drm_connector *connector, bool force) +{ +	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); +	struct cdv_intel_dp *intel_dp = encoder->dev_priv; +	enum drm_connector_status status; +	struct edid *edid = NULL; +	int edp = is_edp(encoder); + +	intel_dp->has_audio = false; + +	if (edp) +		cdv_intel_edp_panel_vdd_on(encoder); +	status = cdv_dp_detect(encoder); +	if (status != connector_status_connected) { +		if (edp) +			cdv_intel_edp_panel_vdd_off(encoder); +		return status; +        } + +	if (intel_dp->force_audio) { +		intel_dp->has_audio = intel_dp->force_audio > 0; +	} else { +		edid = drm_get_edid(connector, &intel_dp->adapter); +		if (edid) { +			intel_dp->has_audio = drm_detect_monitor_audio(edid); +			kfree(edid); +		} +	} +	if (edp) +		cdv_intel_edp_panel_vdd_off(encoder); + +	return connector_status_connected; +} + +static int cdv_intel_dp_get_modes(struct drm_connector *connector) +{ +	struct psb_intel_encoder *intel_encoder = psb_intel_attached_encoder(connector); +	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; +	struct edid *edid = NULL; +	int ret = 0; +	int edp = is_edp(intel_encoder); + + +	edid = drm_get_edid(connector, &intel_dp->adapter); +	if (edid) { +		drm_mode_connector_update_edid_property(connector, edid); +		ret = drm_add_edid_modes(connector, edid); +		kfree(edid); +	} + +	if (is_edp(intel_encoder)) { +		struct drm_device *dev = connector->dev; +		struct drm_psb_private *dev_priv = dev->dev_private; +		 +		cdv_intel_edp_panel_vdd_off(intel_encoder); +		if (ret) { +			if (edp && !intel_dp->panel_fixed_mode) { +				struct drm_display_mode *newmode; +				list_for_each_entry(newmode, &connector->probed_modes, +					    head) { +					if (newmode->type & DRM_MODE_TYPE_PREFERRED) { +						intel_dp->panel_fixed_mode = +							drm_mode_duplicate(dev, newmode); +						break; +					} +				} +			} + +			return ret; +		} +		if (!intel_dp->panel_fixed_mode && dev_priv->lfp_lvds_vbt_mode) { +			intel_dp->panel_fixed_mode = +				drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); +			if (intel_dp->panel_fixed_mode) { +				intel_dp->panel_fixed_mode->type |= +					DRM_MODE_TYPE_PREFERRED; +			} +		} +		if (intel_dp->panel_fixed_mode != NULL) { +			struct drm_display_mode *mode; +			mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode); +			drm_mode_probed_add(connector, mode); +			return 1; +		} +	} + +	return ret; +} + +static bool +cdv_intel_dp_detect_audio(struct drm_connector *connector) +{ +	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); +	struct cdv_intel_dp *intel_dp = encoder->dev_priv; +	struct edid *edid; +	bool has_audio = false; +	int edp = is_edp(encoder); + +	if (edp) +		cdv_intel_edp_panel_vdd_on(encoder); + +	edid = drm_get_edid(connector, &intel_dp->adapter); +	if (edid) { +		has_audio = drm_detect_monitor_audio(edid); +		kfree(edid); +	} +	if (edp) +		cdv_intel_edp_panel_vdd_off(encoder); + +	return has_audio; +} + +static int +cdv_intel_dp_set_property(struct drm_connector *connector, +		      struct drm_property *property, +		      uint64_t val) +{ +	struct drm_psb_private *dev_priv = connector->dev->dev_private; +	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); +	struct cdv_intel_dp *intel_dp = encoder->dev_priv; +	int ret; + +	ret = drm_connector_property_set_value(connector, property, val); +	if (ret) +		return ret; + +	if (property == dev_priv->force_audio_property) { +		int i = val; +		bool has_audio; + +		if (i == intel_dp->force_audio) +			return 0; + +		intel_dp->force_audio = i; + +		if (i == 0) +			has_audio = cdv_intel_dp_detect_audio(connector); +		else +			has_audio = i > 0; + +		if (has_audio == intel_dp->has_audio) +			return 0; + +		intel_dp->has_audio = has_audio; +		goto done; +	} + +	if (property == dev_priv->broadcast_rgb_property) { +		if (val == !!intel_dp->color_range) +			return 0; + +		intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0; +		goto done; +	} + +	return -EINVAL; + +done: +	if (encoder->base.crtc) { +		struct drm_crtc *crtc = encoder->base.crtc; +		drm_crtc_helper_set_mode(crtc, &crtc->mode, +					 crtc->x, crtc->y, +					 crtc->fb); +	} + +	return 0; +} + +static void +cdv_intel_dp_destroy(struct drm_connector *connector) +{ +	struct psb_intel_encoder *psb_intel_encoder = +					psb_intel_attached_encoder(connector); +	struct cdv_intel_dp *intel_dp = psb_intel_encoder->dev_priv; + +	if (is_edp(psb_intel_encoder)) { +	/*	cdv_intel_panel_destroy_backlight(connector->dev); */ +		if (intel_dp->panel_fixed_mode) { +			kfree(intel_dp->panel_fixed_mode); +			intel_dp->panel_fixed_mode = NULL; +		} +	} +	i2c_del_adapter(&intel_dp->adapter); +	drm_sysfs_connector_remove(connector); +	drm_connector_cleanup(connector); +	kfree(connector); +} + +static void cdv_intel_dp_encoder_destroy(struct drm_encoder *encoder) +{ +	drm_encoder_cleanup(encoder); +} + +static const struct drm_encoder_helper_funcs cdv_intel_dp_helper_funcs = { +	.dpms = cdv_intel_dp_dpms, +	.mode_fixup = cdv_intel_dp_mode_fixup, +	.prepare = cdv_intel_dp_prepare, +	.mode_set = cdv_intel_dp_mode_set, +	.commit = cdv_intel_dp_commit, +}; + +static const struct drm_connector_funcs cdv_intel_dp_connector_funcs = { +	.dpms = drm_helper_connector_dpms, +	.detect = cdv_intel_dp_detect, +	.fill_modes = drm_helper_probe_single_connector_modes, +	.set_property = cdv_intel_dp_set_property, +	.destroy = cdv_intel_dp_destroy, +}; + +static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_funcs = { +	.get_modes = cdv_intel_dp_get_modes, +	.mode_valid = cdv_intel_dp_mode_valid, +	.best_encoder = psb_intel_best_encoder, +}; + +static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = { +	.destroy = cdv_intel_dp_encoder_destroy, +}; + + +static void cdv_intel_dp_add_properties(struct drm_connector *connector) +{ +	cdv_intel_attach_force_audio_property(connector); +	cdv_intel_attach_broadcast_rgb_property(connector); +} + +/* check the VBT to see whether the eDP is on DP-D port */ +static bool cdv_intel_dpc_is_edp(struct drm_device *dev) +{ +	struct drm_psb_private *dev_priv = dev->dev_private; +	struct child_device_config *p_child; +	int i; + +	if (!dev_priv->child_dev_num) +		return false; + +	for (i = 0; i < dev_priv->child_dev_num; i++) { +		p_child = dev_priv->child_dev + i; + +		if (p_child->dvo_port == PORT_IDPC && +		    p_child->device_type == DEVICE_TYPE_eDP) +			return true; +	} +	return false; +} + +/* Cedarview display clock gating + +   We need this disable dot get correct behaviour while enabling +   DP/eDP. TODO - investigate if we can turn it back to normality +   after enabling */ +static void cdv_disable_intel_clock_gating(struct drm_device *dev) +{ +	u32 reg_value; +	reg_value = REG_READ(DSPCLK_GATE_D); + +	reg_value |= (DPUNIT_PIPEB_GATE_DISABLE | +			DPUNIT_PIPEA_GATE_DISABLE | +			DPCUNIT_CLOCK_GATE_DISABLE | +			DPLSUNIT_CLOCK_GATE_DISABLE | +			DPOUNIT_CLOCK_GATE_DISABLE | +		 	DPIOUNIT_CLOCK_GATE_DISABLE);	 + +	REG_WRITE(DSPCLK_GATE_D, reg_value); + +	udelay(500);		 +} + +void +cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg) +{ +	struct psb_intel_encoder *psb_intel_encoder; +	struct psb_intel_connector *psb_intel_connector; +	struct drm_connector *connector; +	struct drm_encoder *encoder; +	struct cdv_intel_dp *intel_dp; +	const char *name = NULL; +	int type = DRM_MODE_CONNECTOR_DisplayPort; + +	psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); +	if (!psb_intel_encoder) +		return; +        psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); +        if (!psb_intel_connector) +                goto err_connector; +	intel_dp = kzalloc(sizeof(struct cdv_intel_dp), GFP_KERNEL); +	if (!intel_dp) +	        goto err_priv; + +	if ((output_reg == DP_C) && cdv_intel_dpc_is_edp(dev)) +		type = DRM_MODE_CONNECTOR_eDP; + +	connector = &psb_intel_connector->base; +	encoder = &psb_intel_encoder->base; + +	drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type); +	drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS); + +	psb_intel_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); + +	if (type == DRM_MODE_CONNECTOR_DisplayPort) +        	psb_intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; +        else +		psb_intel_encoder->type = INTEL_OUTPUT_EDP; + + +	psb_intel_encoder->dev_priv=intel_dp; +	intel_dp->encoder = psb_intel_encoder; +	intel_dp->output_reg = output_reg; +	 +	drm_encoder_helper_add(encoder, &cdv_intel_dp_helper_funcs); +	drm_connector_helper_add(connector, &cdv_intel_dp_connector_helper_funcs); + +	connector->polled = DRM_CONNECTOR_POLL_HPD; +	connector->interlace_allowed = false; +	connector->doublescan_allowed = false; + +	drm_sysfs_connector_add(connector); + +	/* Set up the DDC bus. */ +	switch (output_reg) { +		case DP_B: +			name = "DPDDC-B"; +			psb_intel_encoder->ddi_select = (DP_MASK | DDI0_SELECT); +			break; +		case DP_C: +			name = "DPDDC-C"; +			psb_intel_encoder->ddi_select = (DP_MASK | DDI1_SELECT); +			break; +	} + +	cdv_disable_intel_clock_gating(dev); + +	cdv_intel_dp_i2c_init(psb_intel_connector, psb_intel_encoder, name); +        /* FIXME:fail check */ +	cdv_intel_dp_add_properties(connector); + +	if (is_edp(psb_intel_encoder)) { +		int ret; +		struct edp_power_seq cur; +                u32 pp_on, pp_off, pp_div; +		u32 pwm_ctrl; + +		pp_on = REG_READ(PP_CONTROL); +		pp_on &= ~PANEL_UNLOCK_MASK; +	        pp_on |= PANEL_UNLOCK_REGS; +		 +		REG_WRITE(PP_CONTROL, pp_on); + +		pwm_ctrl = REG_READ(BLC_PWM_CTL2); +		pwm_ctrl |= PWM_PIPE_B; +		REG_WRITE(BLC_PWM_CTL2, pwm_ctrl); + +                pp_on = REG_READ(PP_ON_DELAYS); +                pp_off = REG_READ(PP_OFF_DELAYS); +                pp_div = REG_READ(PP_DIVISOR); +	 +		/* Pull timing values out of registers */ +                cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> +                        PANEL_POWER_UP_DELAY_SHIFT; + +                cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> +                        PANEL_LIGHT_ON_DELAY_SHIFT; + +                cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> +                        PANEL_LIGHT_OFF_DELAY_SHIFT; + +                cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> +                        PANEL_POWER_DOWN_DELAY_SHIFT; + +                cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> +                               PANEL_POWER_CYCLE_DELAY_SHIFT); + +                DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", +                              cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); + + +		intel_dp->panel_power_up_delay = cur.t1_t3 / 10; +                intel_dp->backlight_on_delay = cur.t8 / 10; +                intel_dp->backlight_off_delay = cur.t9 / 10; +                intel_dp->panel_power_down_delay = cur.t10 / 10; +                intel_dp->panel_power_cycle_delay = (cur.t11_t12 - 1) * 100; + +                DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", +                              intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, +                              intel_dp->panel_power_cycle_delay); + +                DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", +                              intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); + + +		cdv_intel_edp_panel_vdd_on(psb_intel_encoder); +		ret = cdv_intel_dp_aux_native_read(psb_intel_encoder, DP_DPCD_REV, +					       intel_dp->dpcd, +					       sizeof(intel_dp->dpcd)); +		cdv_intel_edp_panel_vdd_off(psb_intel_encoder); +		if (ret == 0) { +			/* if this fails, presume the device is a ghost */ +			DRM_INFO("failed to retrieve link info, disabling eDP\n"); +			cdv_intel_dp_encoder_destroy(encoder); +			cdv_intel_dp_destroy(connector); +			goto err_priv; +		} else { +        		DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n", +				intel_dp->dpcd[0], intel_dp->dpcd[1],  +				intel_dp->dpcd[2], intel_dp->dpcd[3]); +			 +		} +		/* The CDV reference driver moves pnale backlight setup into the displays that +		   have a backlight: this is a good idea and one we should probably adopt, however +		   we need to migrate all the drivers before we can do that */ +                /*cdv_intel_panel_setup_backlight(dev); */ +	} +	return; + +err_priv: +	kfree(psb_intel_connector); +err_connector: +	kfree(psb_intel_encoder); +} diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c index a86f87b9ddd..b1b77bb92a8 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c +++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c @@ -157,8 +157,6 @@ static enum drm_connector_status cdv_hdmi_detect(  			hdmi_priv->has_hdmi_audio =  						drm_detect_monitor_audio(edid);  		} - -		psb_intel_connector->base.display_info.raw_edid = NULL;  		kfree(edid);  	}  	return status; @@ -352,9 +350,11 @@ void cdv_hdmi_init(struct drm_device *dev,  	switch (reg) {  	case SDVOB:  		ddc_bus = GPIOE; +		psb_intel_encoder->ddi_select = DDI0_SELECT;  		break;  	case SDVOC:  		ddc_bus = GPIOD; +		psb_intel_encoder->ddi_select = DDI1_SELECT;  		break;  	default:  		DRM_ERROR("unknown reg 0x%x for HDMI\n", reg); diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index c7f9468b74b..b362dd39bf5 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -506,16 +506,8 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,  							property,  							value))  			return -1; -		else { -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE -			struct drm_psb_private *dev_priv = -						encoder->dev->dev_private; -			struct backlight_device *bd = -						dev_priv->backlight_device; -			bd->props.brightness = value; -			backlight_update_status(bd); -#endif -		} +		else +                        gma_backlight_set(encoder->dev, value);  	} else if (!strcmp(property->name, "DPMS") && encoder) {  		struct drm_encoder_helper_funcs *helpers =  					encoder->helper_private; diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index 5732b5702e1..884ba73ac6c 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -764,6 +764,13 @@ static void psb_setup_outputs(struct drm_device *dev)  		        crtc_mask = dev_priv->ops->hdmi_mask;  			clone_mask = (1 << INTEL_OUTPUT_HDMI);  			break; +		case INTEL_OUTPUT_DISPLAYPORT: +			crtc_mask = (1 << 0) | (1 << 1); +			clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT); +			break; +		case INTEL_OUTPUT_EDP: +			crtc_mask = (1 << 1); +			clone_mask = (1 << INTEL_OUTPUT_EDP);  		}  		encoder->possible_crtcs = crtc_mask;  		encoder->possible_clones = diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c index fc7d144bc2d..df20546a2a3 100644 --- a/drivers/gpu/drm/gma500/gem.c +++ b/drivers/gpu/drm/gma500/gem.c @@ -36,7 +36,12 @@ int psb_gem_init_object(struct drm_gem_object *obj)  void psb_gem_free_object(struct drm_gem_object *obj)  {  	struct gtt_range *gtt = container_of(obj, struct gtt_range, gem); -	drm_gem_object_release_wrap(obj); + +	/* Remove the list map if one is present */ +	if (obj->map_list.map) +		drm_gem_free_mmap_offset(obj); +	drm_gem_object_release(obj); +  	/* This must occur last as it frees up the memory of the GEM object */  	psb_gtt_free_range(obj->dev, gtt);  } @@ -77,7 +82,7 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,  	/* Make it mmapable */  	if (!obj->map_list.map) { -		ret = gem_create_mmap_offset(obj); +		ret = drm_gem_create_mmap_offset(obj);  		if (ret)  			goto out;  	} diff --git a/drivers/gpu/drm/gma500/gem_glue.c b/drivers/gpu/drm/gma500/gem_glue.c deleted file mode 100644 index 3c17634f606..00000000000 --- a/drivers/gpu/drm/gma500/gem_glue.c +++ /dev/null @@ -1,90 +0,0 @@ -/************************************************************************** - * Copyright (c) 2011, Intel Corporation. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * - **************************************************************************/ - -#include <drm/drmP.h> -#include <drm/drm.h> -#include "gem_glue.h" - -void drm_gem_object_release_wrap(struct drm_gem_object *obj) -{ -	/* Remove the list map if one is present */ -	if (obj->map_list.map) { -		struct drm_gem_mm *mm = obj->dev->mm_private; -		struct drm_map_list *list = &obj->map_list; -		drm_ht_remove_item(&mm->offset_hash, &list->hash); -		drm_mm_put_block(list->file_offset_node); -		kfree(list->map); -		list->map = NULL; -	} -	drm_gem_object_release(obj); -} - -/** - *	gem_create_mmap_offset		-	invent an mmap offset - *	@obj: our object - * - *	Standard implementation of offset generation for mmap as is - *	duplicated in several drivers. This belongs in GEM. - */ -int gem_create_mmap_offset(struct drm_gem_object *obj) -{ -	struct drm_device *dev = obj->dev; -	struct drm_gem_mm *mm = dev->mm_private; -	struct drm_map_list *list; -	struct drm_local_map *map; -	int ret; - -	list = &obj->map_list; -	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); -	if (list->map == NULL) -		return -ENOMEM; -	map = list->map; -	map->type = _DRM_GEM; -	map->size = obj->size; -	map->handle = obj; - -	list->file_offset_node = drm_mm_search_free(&mm->offset_manager, -					obj->size / PAGE_SIZE, 0, 0); -	if (!list->file_offset_node) { -		dev_err(dev->dev, "failed to allocate offset for bo %d\n", -								obj->name); -		ret = -ENOSPC; -		goto free_it; -	} -	list->file_offset_node = drm_mm_get_block(list->file_offset_node, -					obj->size / PAGE_SIZE, 0); -	if (!list->file_offset_node) { -		ret = -ENOMEM; -		goto free_it; -	} -	list->hash.key = list->file_offset_node->start; -	ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); -	if (ret) { -		dev_err(dev->dev, "failed to add to map hash\n"); -		goto free_mm; -	} -	return 0; - -free_mm: -	drm_mm_put_block(list->file_offset_node); -free_it: -	kfree(list->map); -	list->map = NULL; -	return ret; -} diff --git a/drivers/gpu/drm/gma500/gem_glue.h b/drivers/gpu/drm/gma500/gem_glue.h deleted file mode 100644 index ce5ce30f74d..00000000000 --- a/drivers/gpu/drm/gma500/gem_glue.h +++ /dev/null @@ -1,2 +0,0 @@ -extern void drm_gem_object_release_wrap(struct drm_gem_object *obj); -extern int gem_create_mmap_offset(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c index 8d7caf0f363..4fb79cf00ed 100644 --- a/drivers/gpu/drm/gma500/intel_bios.c +++ b/drivers/gpu/drm/gma500/intel_bios.c @@ -54,6 +54,98 @@ static void *find_section(struct bdb_header *bdb, int section_id)  	return NULL;  } +static void +parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb) +{ +	struct bdb_edp *edp; +	struct edp_power_seq *edp_pps; +	struct edp_link_params *edp_link_params; +	uint8_t	panel_type; + +	edp = find_section(bdb, BDB_EDP); +	 +	dev_priv->edp.bpp = 18; +	if (!edp) { +		if (dev_priv->edp.support) { +			DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported, assume %dbpp panel color depth.\n", +				      dev_priv->edp.bpp); +		} +		return; +	} + +	panel_type = dev_priv->panel_type; +	switch ((edp->color_depth >> (panel_type * 2)) & 3) { +	case EDP_18BPP: +		dev_priv->edp.bpp = 18; +		break; +	case EDP_24BPP: +		dev_priv->edp.bpp = 24; +		break; +	case EDP_30BPP: +		dev_priv->edp.bpp = 30; +		break; +	} + +	/* Get the eDP sequencing and link info */ +	edp_pps = &edp->power_seqs[panel_type]; +	edp_link_params = &edp->link_params[panel_type]; + +	dev_priv->edp.pps = *edp_pps; + +	DRM_DEBUG_KMS("EDP timing in vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", +				dev_priv->edp.pps.t1_t3, dev_priv->edp.pps.t8,  +				dev_priv->edp.pps.t9, dev_priv->edp.pps.t10, +				dev_priv->edp.pps.t11_t12); + +	dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 : +		DP_LINK_BW_1_62; +	switch (edp_link_params->lanes) { +	case 0: +		dev_priv->edp.lanes = 1; +		break; +	case 1: +		dev_priv->edp.lanes = 2; +		break; +	case 3: +	default: +		dev_priv->edp.lanes = 4; +		break; +	} +	DRM_DEBUG_KMS("VBT reports EDP: Lane_count %d, Lane_rate %d, Bpp %d\n", +			dev_priv->edp.lanes, dev_priv->edp.rate, dev_priv->edp.bpp); + +	switch (edp_link_params->preemphasis) { +	case 0: +		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0; +		break; +	case 1: +		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; +		break; +	case 2: +		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6; +		break; +	case 3: +		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; +		break; +	} +	switch (edp_link_params->vswing) { +	case 0: +		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400; +		break; +	case 1: +		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600; +		break; +	case 2: +		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800; +		break; +	case 3: +		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200; +		break; +	} +	DRM_DEBUG_KMS("VBT reports EDP: VSwing  %d, Preemph %d\n", +			dev_priv->edp.vswing, dev_priv->edp.preemphasis); +} +  static u16  get_blocksize(void *p)  { @@ -154,6 +246,8 @@ static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,  		return;  	dev_priv->lvds_dither = lvds_options->pixel_dither; +	dev_priv->panel_type = lvds_options->panel_type; +  	if (lvds_options->panel_type == 0xff)  		return; @@ -340,6 +434,9 @@ parse_driver_features(struct drm_psb_private *dev_priv,  	if (!driver)  		return; +	if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP) +		dev_priv->edp.support = 1; +  	/* This bit means to use 96Mhz for DPLL_A or not */  	if (driver->primary_lfp_id)  		dev_priv->dplla_96mhz = true; @@ -437,6 +534,9 @@ int psb_intel_init_bios(struct drm_device *dev)  	size_t size;  	int i; + +	dev_priv->panel_type = 0xff; +  	/* XXX Should this validation be moved to intel_opregion.c? */  	if (dev_priv->opregion.vbt) {  		struct vbt_header *vbt = dev_priv->opregion.vbt; @@ -477,6 +577,7 @@ int psb_intel_init_bios(struct drm_device *dev)  	parse_sdvo_device_mapping(dev_priv, bdb);  	parse_device_mapping(dev_priv, bdb);  	parse_backlight_data(dev_priv, bdb); +	parse_edp(dev_priv, bdb);  	if (bios)  		pci_unmap_rom(pdev, bios); diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h index 2e95523b84b..c6267c98c9e 100644 --- a/drivers/gpu/drm/gma500/intel_bios.h +++ b/drivers/gpu/drm/gma500/intel_bios.h @@ -23,6 +23,7 @@  #define _I830_BIOS_H_  #include <drm/drmP.h> +#include <drm/drm_dp_helper.h>  struct vbt_header {  	u8 signature[20];		/**< Always starts with 'VBT$' */ @@ -93,6 +94,7 @@ struct vbios_data {  #define BDB_SDVO_LVDS_PNP_IDS	 24  #define BDB_SDVO_LVDS_POWER_SEQ	 25  #define BDB_TV_OPTIONS		 26 +#define BDB_EDP			 27  #define BDB_LVDS_OPTIONS	 40  #define BDB_LVDS_LFP_DATA_PTRS	 41  #define BDB_LVDS_LFP_DATA	 42 @@ -391,6 +393,11 @@ struct bdb_sdvo_lvds_options {  	u8 panel_misc_bits_4;  } __attribute__((packed)); +#define BDB_DRIVER_FEATURE_NO_LVDS		0 +#define BDB_DRIVER_FEATURE_INT_LVDS		1 +#define BDB_DRIVER_FEATURE_SDVO_LVDS		2 +#define BDB_DRIVER_FEATURE_EDP			3 +  struct bdb_driver_features {  	u8 boot_dev_algorithm:1;  	u8 block_display_switch:1; @@ -431,6 +438,45 @@ struct bdb_driver_features {  	u8 custom_vbt_version;  } __attribute__((packed)); +#define EDP_18BPP	0 +#define EDP_24BPP	1 +#define EDP_30BPP	2 +#define EDP_RATE_1_62	0 +#define EDP_RATE_2_7	1 +#define EDP_LANE_1	0 +#define EDP_LANE_2	1 +#define EDP_LANE_4	3 +#define EDP_PREEMPHASIS_NONE	0 +#define EDP_PREEMPHASIS_3_5dB	1 +#define EDP_PREEMPHASIS_6dB	2 +#define EDP_PREEMPHASIS_9_5dB	3 +#define EDP_VSWING_0_4V		0 +#define EDP_VSWING_0_6V		1 +#define EDP_VSWING_0_8V		2 +#define EDP_VSWING_1_2V		3 + +struct edp_power_seq { +	u16 t1_t3; +	u16 t8; +	u16 t9; +	u16 t10; +	u16 t11_t12; +} __attribute__ ((packed)); + +struct edp_link_params { +	u8 rate:4; +	u8 lanes:4; +	u8 preemphasis:4; +	u8 vswing:4; +} __attribute__ ((packed)); + +struct bdb_edp { +	struct edp_power_seq power_seqs[16]; +	u32 color_depth; +	u32 sdrrs_msa_timing_delay; +	struct edp_link_params link_params[16]; +} __attribute__ ((packed)); +  extern int psb_intel_init_bios(struct drm_device *dev);  extern void psb_intel_destroy_bios(struct drm_device *dev); diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c index 5675d93b420..32dba2ab53e 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c +++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c @@ -299,17 +299,8 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,  		if (drm_connector_property_set_value(connector, property,  									value))  			goto set_prop_error; -		else { -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE -			struct backlight_device *psb_bd; - -			psb_bd = mdfld_get_backlight_device(); -			if (psb_bd) { -				psb_bd->props.brightness = value; -				mdfld_set_brightness(psb_bd); -			} -#endif -		} +		else +			gma_backlight_set(encoder->dev, value);  	}  set_prop_done:  	return 0; diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c index b2a790bd989..850cd3fbb96 100644 --- a/drivers/gpu/drm/gma500/mid_bios.c +++ b/drivers/gpu/drm/gma500/mid_bios.c @@ -118,20 +118,20 @@ static void mid_get_pci_revID(struct drm_psb_private *dev_priv)  					dev_priv->platform_rev_id);  } -struct vbt_header { +struct mid_vbt_header {  	u32 signature;  	u8 revision;  } __packed;  /* The same for r0 and r1 */  struct vbt_r0 { -	struct vbt_header vbt_header; +	struct mid_vbt_header vbt_header;  	u8 size;  	u8 checksum;  } __packed;  struct vbt_r10 { -	struct vbt_header vbt_header; +	struct mid_vbt_header vbt_header;  	u8 checksum;  	u16 size;  	u8 panel_count; @@ -281,7 +281,7 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv)  	struct drm_device *dev = dev_priv->dev;  	u32 addr;  	u8 __iomem *vbt_virtual; -	struct vbt_header vbt_header; +	struct mid_vbt_header vbt_header;  	struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));  	int ret = -1; diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c index 2eb3dc4e9c9..69e51e903f3 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c @@ -252,7 +252,6 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector)  	if (edid) {  		drm_mode_connector_update_edid_property(connector, edid);  		ret = drm_add_edid_modes(connector, edid); -		connector->display_info.raw_edid = NULL;  	}  	/* diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c index c430bd42468..ad0d6de938f 100644 --- a/drivers/gpu/drm/gma500/opregion.c +++ b/drivers/gpu/drm/gma500/opregion.c @@ -166,8 +166,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)  	if (config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)) {  		int max = bd->props.max_brightness; -		bd->props.brightness = bclp * max / 255; -		backlight_update_status(bd); +		gma_backlight_set(dev, bclp * max / 255);  	}  	asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID; diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c index 5971bc82b76..f1432f096e5 100644 --- a/drivers/gpu/drm/gma500/psb_device.c +++ b/drivers/gpu/drm/gma500/psb_device.c @@ -290,6 +290,7 @@ static void psb_get_core_freq(struct drm_device *dev)  	case 6:  	case 7:  		dev_priv->core_freq = 266; +		break;  	default:  		dev_priv->core_freq = 0;  	} diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index 1bd115ecefe..223ff5b1b5c 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -24,10 +24,10 @@  #include <drm/drmP.h>  #include "drm_global.h" -#include "gem_glue.h"  #include "gma_drm.h"  #include "psb_reg.h"  #include "psb_intel_drv.h" +#include "intel_bios.h"  #include "gtt.h"  #include "power.h"  #include "opregion.h" @@ -613,6 +613,8 @@ struct drm_psb_private {  	 */  	struct backlight_device *backlight_device;  	struct drm_property *backlight_property; +	bool backlight_enabled; +	int backlight_level;  	uint32_t blc_adj1;  	uint32_t blc_adj2; @@ -640,6 +642,19 @@ struct drm_psb_private {  	int mdfld_panel_id;  	bool dplla_96mhz;	/* DPLL data from the VBT */ + +	struct { +		int rate; +		int lanes; +		int preemphasis; +		int vswing; + +		bool initialized; +		bool support; +		int bpp; +		struct edp_power_seq pps; +	} edp; +	uint8_t panel_type;  }; @@ -796,6 +811,9 @@ extern int psb_fbdev_init(struct drm_device *dev);  /* backlight.c */  int gma_backlight_init(struct drm_device *dev);  void gma_backlight_exit(struct drm_device *dev); +void gma_backlight_disable(struct drm_device *dev); +void gma_backlight_enable(struct drm_device *dev); +void gma_backlight_set(struct drm_device *dev, int v);  /* oaktrail_crtc.c */  extern const struct drm_crtc_helper_funcs oaktrail_helper_funcs; diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index ebe1a28f60e..90f2d11e686 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -29,10 +29,6 @@   * Display related stuff   */ -/* store information about an Ixxx DVO */ -/* The i830->i865 use multiple DVOs with multiple i2cs */ -/* the i915, i945 have a single sDVO i2c bus - which is different */ -#define MAX_OUTPUTS 6  /* maximum connectors per crtcs in the mode set */  #define INTELFB_CONN_LIMIT 4 @@ -69,6 +65,8 @@  #define INTEL_OUTPUT_HDMI 6  #define INTEL_OUTPUT_MIPI 7  #define INTEL_OUTPUT_MIPI2 8 +#define INTEL_OUTPUT_DISPLAYPORT 9 +#define INTEL_OUTPUT_EDP 10  #define INTEL_DVO_CHIP_NONE 0  #define INTEL_DVO_CHIP_LVDS 1 @@ -133,6 +131,11 @@ struct psb_intel_encoder {  	void (*hot_plug)(struct psb_intel_encoder *);  	int crtc_mask;  	int clone_mask; +	u32 ddi_select;	/* Channel info */ +#define DDI0_SELECT	0x01 +#define DDI1_SELECT	0x02 +#define DP_MASK		0x8000 +#define DDI_MASK	0x03  	void *dev_priv; /* For sdvo_priv, lvds_priv, etc... */  	/* FIXME: Either make SDVO and LVDS store it's i2c here or give CDV it's @@ -190,7 +193,6 @@ struct psb_intel_crtc {  	u32 mode_flags;  	bool active; -	bool crtc_enable;  	/* Saved Crtc HW states */  	struct psb_intel_crtc_state *crtc_state; @@ -285,4 +287,20 @@ extern void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);  extern void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);  extern void gma_intel_teardown_gmbus(struct drm_device *dev); +/* DP support */ +extern void cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg); +extern void cdv_intel_dp_set_m_n(struct drm_crtc *crtc, +					struct drm_display_mode *mode, +					struct drm_display_mode *adjusted_mode); + +extern void psb_intel_attach_force_audio_property(struct drm_connector *connector); +extern void psb_intel_attach_broadcast_rgb_property(struct drm_connector *connector); + +extern int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val); +extern int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val); +extern void cdv_sb_reset(struct drm_device *dev); + +extern void cdv_intel_attach_force_audio_property(struct drm_connector *connector); +extern void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector); +  #endif				/* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index 37adc9edf97..2a4c3a9e33e 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -630,17 +630,8 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,  							property,  							value))  			goto set_prop_error; -		else { -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE -			struct drm_psb_private *devp = -						encoder->dev->dev_private; -			struct backlight_device *bd = devp->backlight_device; -			if (bd) { -				bd->props.brightness = value; -				backlight_update_status(bd); -			} -#endif -		} +		else +                        gma_backlight_set(encoder->dev, value);  	} else if (!strcmp(property->name, "DPMS")) {  		struct drm_encoder_helper_funcs *hfuncs  						= encoder->helper_private; diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h index 8e8c8efb0a8..d914719c4b6 100644 --- a/drivers/gpu/drm/gma500/psb_intel_reg.h +++ b/drivers/gpu/drm/gma500/psb_intel_reg.h @@ -173,15 +173,46 @@  #define PP_SEQUENCE_ON			(1 << 28)  #define PP_SEQUENCE_OFF			(2 << 28)  #define PP_SEQUENCE_MASK		0x30000000 +#define	PP_CYCLE_DELAY_ACTIVE		(1 << 27) +#define	PP_SEQUENCE_STATE_ON_IDLE	(1 << 3) +#define	PP_SEQUENCE_STATE_MASK		0x0000000f +  #define PP_CONTROL		0x61204  #define POWER_TARGET_ON			(1 << 0) +#define	PANEL_UNLOCK_REGS		(0xabcd << 16) +#define	PANEL_UNLOCK_MASK		(0xffff << 16) +#define	EDP_FORCE_VDD			(1 << 3) +#define	EDP_BLC_ENABLE			(1 << 2) +#define	PANEL_POWER_RESET		(1 << 1) +#define	PANEL_POWER_OFF			(0 << 0) +#define	PANEL_POWER_ON			(1 << 0) +/* Poulsbo/Oaktrail */  #define LVDSPP_ON		0x61208  #define LVDSPP_OFF		0x6120c  #define PP_CYCLE		0x61210 +/* Cedartrail */  #define PP_ON_DELAYS		0x61208		/* Cedartrail */ +#define PANEL_PORT_SELECT_MASK 		(3 << 30) +#define PANEL_PORT_SELECT_LVDS 		(0 << 30) +#define PANEL_PORT_SELECT_EDP		(1 << 30) +#define PANEL_POWER_UP_DELAY_MASK	(0x1fff0000) +#define PANEL_POWER_UP_DELAY_SHIFT	16 +#define PANEL_LIGHT_ON_DELAY_MASK	(0x1fff) +#define PANEL_LIGHT_ON_DELAY_SHIFT	0 +  #define PP_OFF_DELAYS		0x6120c		/* Cedartrail */ +#define PANEL_POWER_DOWN_DELAY_MASK	(0x1fff0000) +#define PANEL_POWER_DOWN_DELAY_SHIFT	16 +#define PANEL_LIGHT_OFF_DELAY_MASK	(0x1fff) +#define PANEL_LIGHT_OFF_DELAY_SHIFT	0 + +#define PP_DIVISOR		0x61210		/* Cedartrail */ +#define  PP_REFERENCE_DIVIDER_MASK	(0xffffff00) +#define  PP_REFERENCE_DIVIDER_SHIFT	8 +#define  PANEL_POWER_CYCLE_DELAY_MASK	(0x1f) +#define  PANEL_POWER_CYCLE_DELAY_SHIFT	0  #define PFIT_CONTROL		0x61230  #define PFIT_ENABLE			(1 << 31) @@ -1282,6 +1313,10 @@ No status bits are changed.  # define VRHUNIT_CLOCK_GATE_DISABLE		(1 << 28) /* Fixed value on CDV */  # define DPOUNIT_CLOCK_GATE_DISABLE		(1 << 11)  # define DPIOUNIT_CLOCK_GATE_DISABLE		(1 << 6) +# define DPUNIT_PIPEB_GATE_DISABLE		(1 << 30) +# define DPUNIT_PIPEA_GATE_DISABLE		(1 << 25) +# define DPCUNIT_CLOCK_GATE_DISABLE		(1 << 24) +# define DPLSUNIT_CLOCK_GATE_DISABLE		(1 << 13)  #define RAMCLK_GATE_D		0x6210 @@ -1347,5 +1382,165 @@ No status bits are changed.  #define LANE_PLL_ENABLE		(0x3 << 20)  #define LANE_PLL_PIPE(p)	(((p) == 0) ? (1 << 21) : (0 << 21)) +#define DP_B				0x64100 +#define DP_C				0x64200 + +#define   DP_PORT_EN			(1 << 31) +#define   DP_PIPEB_SELECT		(1 << 30) +#define   DP_PIPE_MASK			(1 << 30) + +/* Link training mode - select a suitable mode for each stage */ +#define   DP_LINK_TRAIN_PAT_1		(0 << 28) +#define   DP_LINK_TRAIN_PAT_2		(1 << 28) +#define   DP_LINK_TRAIN_PAT_IDLE	(2 << 28) +#define   DP_LINK_TRAIN_OFF		(3 << 28) +#define   DP_LINK_TRAIN_MASK		(3 << 28) +#define   DP_LINK_TRAIN_SHIFT		28 + +/* Signal voltages. These are mostly controlled by the other end */ +#define   DP_VOLTAGE_0_4		(0 << 25) +#define   DP_VOLTAGE_0_6		(1 << 25) +#define   DP_VOLTAGE_0_8		(2 << 25) +#define   DP_VOLTAGE_1_2		(3 << 25) +#define   DP_VOLTAGE_MASK		(7 << 25) +#define   DP_VOLTAGE_SHIFT		25 + +/* Signal pre-emphasis levels, like voltages, the other end tells us what + * they want + */ +#define   DP_PRE_EMPHASIS_0		(0 << 22) +#define   DP_PRE_EMPHASIS_3_5		(1 << 22) +#define   DP_PRE_EMPHASIS_6		(2 << 22) +#define   DP_PRE_EMPHASIS_9_5		(3 << 22) +#define   DP_PRE_EMPHASIS_MASK		(7 << 22) +#define   DP_PRE_EMPHASIS_SHIFT		22 + +/* How many wires to use. I guess 3 was too hard */ +#define   DP_PORT_WIDTH_1		(0 << 19) +#define   DP_PORT_WIDTH_2		(1 << 19) +#define   DP_PORT_WIDTH_4		(3 << 19) +#define   DP_PORT_WIDTH_MASK		(7 << 19) + +/* Mystic DPCD version 1.1 special mode */ +#define   DP_ENHANCED_FRAMING		(1 << 18) + +/** locked once port is enabled */ +#define   DP_PORT_REVERSAL		(1 << 15) + +/** sends the clock on lane 15 of the PEG for debug */ +#define   DP_CLOCK_OUTPUT_ENABLE	(1 << 13) + +#define   DP_SCRAMBLING_DISABLE		(1 << 12) +#define   DP_SCRAMBLING_DISABLE_IRONLAKE	(1 << 7) + +/** limit RGB values to avoid confusing TVs */ +#define   DP_COLOR_RANGE_16_235		(1 << 8) + +/** Turn on the audio link */ +#define   DP_AUDIO_OUTPUT_ENABLE	(1 << 6) + +/** vs and hs sync polarity */ +#define   DP_SYNC_VS_HIGH		(1 << 4) +#define   DP_SYNC_HS_HIGH		(1 << 3) + +/** A fantasy */ +#define   DP_DETECTED			(1 << 2) + +/** The aux channel provides a way to talk to the + * signal sink for DDC etc. Max packet size supported + * is 20 bytes in each direction, hence the 5 fixed + * data registers + */ +#define DPB_AUX_CH_CTL			0x64110 +#define DPB_AUX_CH_DATA1		0x64114 +#define DPB_AUX_CH_DATA2		0x64118 +#define DPB_AUX_CH_DATA3		0x6411c +#define DPB_AUX_CH_DATA4		0x64120 +#define DPB_AUX_CH_DATA5		0x64124 + +#define DPC_AUX_CH_CTL			0x64210 +#define DPC_AUX_CH_DATA1		0x64214 +#define DPC_AUX_CH_DATA2		0x64218 +#define DPC_AUX_CH_DATA3		0x6421c +#define DPC_AUX_CH_DATA4		0x64220 +#define DPC_AUX_CH_DATA5		0x64224 + +#define   DP_AUX_CH_CTL_SEND_BUSY	    (1 << 31) +#define   DP_AUX_CH_CTL_DONE		    (1 << 30) +#define   DP_AUX_CH_CTL_INTERRUPT	    (1 << 29) +#define   DP_AUX_CH_CTL_TIME_OUT_ERROR	    (1 << 28) +#define   DP_AUX_CH_CTL_TIME_OUT_400us	    (0 << 26) +#define   DP_AUX_CH_CTL_TIME_OUT_600us	    (1 << 26) +#define   DP_AUX_CH_CTL_TIME_OUT_800us	    (2 << 26) +#define   DP_AUX_CH_CTL_TIME_OUT_1600us	    (3 << 26) +#define   DP_AUX_CH_CTL_TIME_OUT_MASK	    (3 << 26) +#define   DP_AUX_CH_CTL_RECEIVE_ERROR	    (1 << 25) +#define   DP_AUX_CH_CTL_MESSAGE_SIZE_MASK    (0x1f << 20) +#define   DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT   20 +#define   DP_AUX_CH_CTL_PRECHARGE_2US_MASK   (0xf << 16) +#define   DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT  16 +#define   DP_AUX_CH_CTL_AUX_AKSV_SELECT	    (1 << 15) +#define   DP_AUX_CH_CTL_MANCHESTER_TEST	    (1 << 14) +#define   DP_AUX_CH_CTL_SYNC_TEST	    (1 << 13) +#define   DP_AUX_CH_CTL_DEGLITCH_TEST	    (1 << 12) +#define   DP_AUX_CH_CTL_PRECHARGE_TEST	    (1 << 11) +#define   DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK    (0x7ff) +#define   DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT   0 + +/* + * Computing GMCH M and N values for the Display Port link + * + * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes + * + * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz) + * + * The GMCH value is used internally + * + * bytes_per_pixel is the number of bytes coming out of the plane, + * which is after the LUTs, so we want the bytes for our color format. + * For our current usage, this is always 3, one byte for R, G and B. + */ + +#define _PIPEA_GMCH_DATA_M			0x70050 +#define _PIPEB_GMCH_DATA_M			0x71050 + +/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */ +#define   PIPE_GMCH_DATA_M_TU_SIZE_MASK		(0x3f << 25) +#define   PIPE_GMCH_DATA_M_TU_SIZE_SHIFT	25 + +#define   PIPE_GMCH_DATA_M_MASK			(0xffffff) + +#define _PIPEA_GMCH_DATA_N			0x70054 +#define _PIPEB_GMCH_DATA_N			0x71054 +#define   PIPE_GMCH_DATA_N_MASK			(0xffffff) + +/* + * Computing Link M and N values for the Display Port link + * + * Link M / N = pixel_clock / ls_clk + * + * (the DP spec calls pixel_clock the 'strm_clk') + * + * The Link value is transmitted in the Main Stream + * Attributes and VB-ID. + */ + +#define _PIPEA_DP_LINK_M				0x70060 +#define _PIPEB_DP_LINK_M				0x71060 +#define   PIPEA_DP_LINK_M_MASK			(0xffffff) + +#define _PIPEA_DP_LINK_N				0x70064 +#define _PIPEB_DP_LINK_N				0x71064 +#define   PIPEA_DP_LINK_N_MASK			(0xffffff) + +#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M) +#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N) +#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M) +#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N) + +#define   PIPE_BPC_MASK				(7 << 5) +#define   PIPE_8BPC				(0 << 5) +#define   PIPE_10BPC				(1 << 5) +#define   PIPE_6BPC				(2 << 5)  #endif diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index 0466c7b985f..d35f93ba3a8 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -1292,7 +1292,6 @@ psb_intel_sdvo_get_analog_edid(struct drm_connector *connector)  	return drm_get_edid(connector,  			    &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); -	return NULL;  }  static enum drm_connector_status @@ -1343,7 +1342,6 @@ psb_intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)  			}  		} else  			status = connector_status_disconnected; -		connector->display_info.raw_edid = NULL;  		kfree(edid);  	} @@ -1404,7 +1402,6 @@ psb_intel_sdvo_detect(struct drm_connector *connector, bool force)  				ret = connector_status_disconnected;  			else  				ret = connector_status_connected; -			connector->display_info.raw_edid = NULL;  			kfree(edid);  		} else  			ret = connector_status_connected; @@ -1453,7 +1450,6 @@ static void psb_intel_sdvo_get_ddc_modes(struct drm_connector *connector)  			drm_add_edid_modes(connector, edid);  		} -		connector->display_info.raw_edid = NULL;  		kfree(edid);  	}  } diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c index 36d952280c5..599099fe76e 100644 --- a/drivers/gpu/drm/i2c/ch7006_drv.c +++ b/drivers/gpu/drm/i2c/ch7006_drv.c @@ -427,15 +427,10 @@ static int ch7006_remove(struct i2c_client *client)  	return 0;  } -static int ch7006_suspend(struct i2c_client *client, pm_message_t mesg) +static int ch7006_resume(struct device *dev)  { -	ch7006_dbg(client, "\n"); - -	return 0; -} +	struct i2c_client *client = to_i2c_client(dev); -static int ch7006_resume(struct i2c_client *client) -{  	ch7006_dbg(client, "\n");  	ch7006_write(client, 0x3d, 0x0); @@ -499,15 +494,18 @@ static struct i2c_device_id ch7006_ids[] = {  };  MODULE_DEVICE_TABLE(i2c, ch7006_ids); +static const struct dev_pm_ops ch7006_pm_ops = { +	.resume = ch7006_resume, +}; +  static struct drm_i2c_encoder_driver ch7006_driver = {  	.i2c_driver = {  		.probe = ch7006_probe,  		.remove = ch7006_remove, -		.suspend = ch7006_suspend, -		.resume = ch7006_resume,  		.driver = {  			.name = "ch7006", +			.pm = &ch7006_pm_ops,  		},  		.id_table = ch7006_ids, diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index a18e93687b8..274a3280cdc 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -118,6 +118,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)  		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");  	if (obj->base.name)  		seq_printf(m, " (name: %d)", obj->base.name); +	if (obj->pin_count) +		seq_printf(m, " (pinned x %d)", obj->pin_count);  	if (obj->fence_reg != I915_FENCE_REG_NONE)  		seq_printf(m, " (fence: %d)", obj->fence_reg);  	if (obj->gtt_space != NULL) @@ -197,8 +199,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data)  	struct drm_info_node *node = (struct drm_info_node *) m->private;  	struct drm_device *dev = node->minor->dev;  	struct drm_i915_private *dev_priv = dev->dev_private; -	u32 count, mappable_count; -	size_t size, mappable_size; +	u32 count, mappable_count, purgeable_count; +	size_t size, mappable_size, purgeable_size;  	struct drm_i915_gem_object *obj;  	int ret; @@ -211,7 +213,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)  		   dev_priv->mm.object_memory);  	size = count = mappable_size = mappable_count = 0; -	count_objects(&dev_priv->mm.gtt_list, gtt_list); +	count_objects(&dev_priv->mm.bound_list, gtt_list);  	seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",  		   count, mappable_count, size, mappable_size); @@ -225,8 +227,16 @@ static int i915_gem_object_info(struct seq_file *m, void* data)  	seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",  		   count, mappable_count, size, mappable_size); +	size = count = purgeable_size = purgeable_count = 0; +	list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) { +		size += obj->base.size, ++count; +		if (obj->madv == I915_MADV_DONTNEED) +			purgeable_size += obj->base.size, ++purgeable_count; +	} +	seq_printf(m, "%u unbound objects, %zu bytes\n", count, size); +  	size = count = mappable_size = mappable_count = 0; -	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { +	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {  		if (obj->fault_mappable) {  			size += obj->gtt_space->size;  			++count; @@ -235,7 +245,13 @@ static int i915_gem_object_info(struct seq_file *m, void* data)  			mappable_size += obj->gtt_space->size;  			++mappable_count;  		} +		if (obj->madv == I915_MADV_DONTNEED) { +			purgeable_size += obj->base.size; +			++purgeable_count; +		}  	} +	seq_printf(m, "%u purgeable objects, %zu bytes\n", +		   purgeable_count, purgeable_size);  	seq_printf(m, "%u pinned mappable objects, %zu bytes\n",  		   mappable_count, mappable_size);  	seq_printf(m, "%u fault mappable objects, %zu bytes\n", @@ -264,7 +280,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)  		return ret;  	total_obj_size = total_gtt_size = count = 0; -	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { +	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {  		if (list == PINNED_LIST && obj->pin_count == 0)  			continue; @@ -337,40 +353,22 @@ static int i915_gem_request_info(struct seq_file *m, void *data)  	struct drm_info_node *node = (struct drm_info_node *) m->private;  	struct drm_device *dev = node->minor->dev;  	drm_i915_private_t *dev_priv = dev->dev_private; +	struct intel_ring_buffer *ring;  	struct drm_i915_gem_request *gem_request; -	int ret, count; +	int ret, count, i;  	ret = mutex_lock_interruptible(&dev->struct_mutex);  	if (ret)  		return ret;  	count = 0; -	if (!list_empty(&dev_priv->ring[RCS].request_list)) { -		seq_printf(m, "Render requests:\n"); -		list_for_each_entry(gem_request, -				    &dev_priv->ring[RCS].request_list, -				    list) { -			seq_printf(m, "    %d @ %d\n", -				   gem_request->seqno, -				   (int) (jiffies - gem_request->emitted_jiffies)); -		} -		count++; -	} -	if (!list_empty(&dev_priv->ring[VCS].request_list)) { -		seq_printf(m, "BSD requests:\n"); -		list_for_each_entry(gem_request, -				    &dev_priv->ring[VCS].request_list, -				    list) { -			seq_printf(m, "    %d @ %d\n", -				   gem_request->seqno, -				   (int) (jiffies - gem_request->emitted_jiffies)); -		} -		count++; -	} -	if (!list_empty(&dev_priv->ring[BCS].request_list)) { -		seq_printf(m, "BLT requests:\n"); +	for_each_ring(ring, dev_priv, i) { +		if (list_empty(&ring->request_list)) +			continue; + +		seq_printf(m, "%s requests:\n", ring->name);  		list_for_each_entry(gem_request, -				    &dev_priv->ring[BCS].request_list, +				    &ring->request_list,  				    list) {  			seq_printf(m, "    %d @ %d\n",  				   gem_request->seqno, @@ -400,14 +398,15 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)  	struct drm_info_node *node = (struct drm_info_node *) m->private;  	struct drm_device *dev = node->minor->dev;  	drm_i915_private_t *dev_priv = dev->dev_private; +	struct intel_ring_buffer *ring;  	int ret, i;  	ret = mutex_lock_interruptible(&dev->struct_mutex);  	if (ret)  		return ret; -	for (i = 0; i < I915_NUM_RINGS; i++) -		i915_ring_seqno_info(m, &dev_priv->ring[i]); +	for_each_ring(ring, dev_priv, i) +		i915_ring_seqno_info(m, ring);  	mutex_unlock(&dev->struct_mutex); @@ -420,6 +419,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)  	struct drm_info_node *node = (struct drm_info_node *) m->private;  	struct drm_device *dev = node->minor->dev;  	drm_i915_private_t *dev_priv = dev->dev_private; +	struct intel_ring_buffer *ring;  	int ret, i, pipe;  	ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -497,13 +497,13 @@ static int i915_interrupt_info(struct seq_file *m, void *data)  	}  	seq_printf(m, "Interrupts received: %d\n",  		   atomic_read(&dev_priv->irq_received)); -	for (i = 0; i < I915_NUM_RINGS; i++) { +	for_each_ring(ring, dev_priv, i) {  		if (IS_GEN6(dev) || IS_GEN7(dev)) { -			seq_printf(m, "Graphics Interrupt mask (%s):	%08x\n", -				   dev_priv->ring[i].name, -				   I915_READ_IMR(&dev_priv->ring[i])); +			seq_printf(m, +				   "Graphics Interrupt mask (%s):	%08x\n", +				   ring->name, I915_READ_IMR(ring));  		} -		i915_ring_seqno_info(m, &dev_priv->ring[i]); +		i915_ring_seqno_info(m, ring);  	}  	mutex_unlock(&dev->struct_mutex); @@ -526,7 +526,8 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)  	for (i = 0; i < dev_priv->num_fence_regs; i++) {  		struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; -		seq_printf(m, "Fenced object[%2d] = ", i); +		seq_printf(m, "Fence %d, pin count = %d, object = ", +			   i, dev_priv->fence_regs[i].pin_count);  		if (obj == NULL)  			seq_printf(m, "unused");  		else @@ -645,10 +646,9 @@ static void i915_ring_error_state(struct seq_file *m,  	seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir[ring]);  	seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr[ring]);  	seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone[ring]); -	if (ring == RCS && INTEL_INFO(dev)->gen >= 4) { -		seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1); +	if (ring == RCS && INTEL_INFO(dev)->gen >= 4)  		seq_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr); -	} +  	if (INTEL_INFO(dev)->gen >= 4)  		seq_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);  	seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]); @@ -697,11 +697,17 @@ static int i915_error_state(struct seq_file *m, void *unused)  	for (i = 0; i < dev_priv->num_fence_regs; i++)  		seq_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]); +	for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++) +		seq_printf(m, "  INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]); +  	if (INTEL_INFO(dev)->gen >= 6) {  		seq_printf(m, "ERROR: 0x%08x\n", error->error);  		seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);  	} +	if (INTEL_INFO(dev)->gen == 7) +		seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int); +  	for_each_ring(ring, dev_priv, i)  		i915_ring_error_state(m, dev, error, i); @@ -1507,9 +1513,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)  	if (INTEL_INFO(dev)->gen == 6)  		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); -	for (i = 0; i < I915_NUM_RINGS; i++) { -		ring = &dev_priv->ring[i]; - +	for_each_ring(ring, dev_priv, i) {  		seq_printf(m, "%s\n", ring->name);  		if (INTEL_INFO(dev)->gen == 7)  			seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2cba7b4a04e..2c09900e326 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1558,11 +1558,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)  	 *  	 * All tasks on the workqueue are expected to acquire the dev mutex  	 * so there is no point in running more than one instance of the -	 * workqueue at any time: max_active = 1 and NON_REENTRANT. +	 * workqueue at any time.  Use an ordered one.  	 */ -	dev_priv->wq = alloc_workqueue("i915", -				       WQ_UNBOUND | WQ_NON_REENTRANT, -				       1); +	dev_priv->wq = alloc_ordered_workqueue("i915", 0);  	if (dev_priv->wq == NULL) {  		DRM_ERROR("Failed to create our workqueue.\n");  		ret = -ENOMEM; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index cd6697c98c5..a7837e55694 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1173,6 +1173,10 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \  	if (unlikely(__fifo_ret)) { \  		gen6_gt_check_fifodbg(dev_priv); \  	} \ +	if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \ +		DRM_ERROR("Unclaimed write to %x\n", reg); \ +		writel(ERR_INT_MMIO_UNCLAIMED, dev_priv->regs + GEN7_ERR_INT);	\ +	} \  }  __i915_write(8, b)  __i915_write(16, w) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9fce7820d96..26c6959a524 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -196,9 +196,10 @@ struct drm_i915_error_state {  	u32 cpu_ring_head[I915_NUM_RINGS];  	u32 cpu_ring_tail[I915_NUM_RINGS];  	u32 error; /* gen6+ */ +	u32 err_int; /* gen7 */  	u32 instpm[I915_NUM_RINGS];  	u32 instps[I915_NUM_RINGS]; -	u32 instdone1; +	u32 extra_instdone[I915_NUM_INSTDONE_REG];  	u32 seqno[I915_NUM_RINGS];  	u64 bbaddr;  	u32 fault_reg[I915_NUM_RINGS]; @@ -454,8 +455,7 @@ typedef struct drm_i915_private {  	struct timer_list hangcheck_timer;  	int hangcheck_count;  	uint32_t last_acthd[I915_NUM_RINGS]; -	uint32_t last_instdone; -	uint32_t last_instdone1; +	uint32_t prev_instdone[I915_NUM_INSTDONE_REG];  	unsigned int stop_rings; @@ -686,7 +686,13 @@ typedef struct drm_i915_private {  		struct drm_mm gtt_space;  		/** List of all objects in gtt_space. Used to restore gtt  		 * mappings on resume */ -		struct list_head gtt_list; +		struct list_head bound_list; +		/** +		 * List of objects which are not bound to the GTT (thus +		 * are idle and not used by the GPU) but still have +		 * (presumably uncached) pages still attached. +		 */ +		struct list_head unbound_list;  		/** Usable portion of the GTT for GEM */  		unsigned long gtt_start; @@ -834,22 +840,26 @@ typedef struct drm_i915_private {  		u8 max_delay;  	} rps; +	/* ilk-only ips/rps state. Everything in here is protected by the global +	 * mchdev_lock in intel_pm.c */ +	struct { +		u8 cur_delay; +		u8 min_delay; +		u8 max_delay; +		u8 fmax; +		u8 fstart; -	u8 cur_delay; -	u8 min_delay; -	u8 max_delay; -	u8 fmax; -	u8 fstart; +		u64 last_count1; +		unsigned long last_time1; +		unsigned long chipset_power; +		u64 last_count2; +		struct timespec last_time2; +		unsigned long gfx_power; +		u8 corr; -	u64 last_count1; -	unsigned long last_time1; -	unsigned long chipset_power; -	u64 last_count2; -	struct timespec last_time2; -	unsigned long gfx_power; -	int c_m; -	int r_t; -	u8 corr; +		int c_m; +		int r_t; +	} ips;  	enum no_fbc_reason no_fbc_reason; @@ -1296,19 +1306,20 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,  			struct drm_file *file_priv);  void i915_gem_load(struct drm_device *dev);  int i915_gem_init_object(struct drm_gem_object *obj); +void i915_gem_object_init(struct drm_i915_gem_object *obj);  struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,  						  size_t size);  void i915_gem_free_object(struct drm_gem_object *obj);  int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,  				     uint32_t alignment, -				     bool map_and_fenceable); +				     bool map_and_fenceable, +				     bool nonblocking);  void i915_gem_object_unpin(struct drm_i915_gem_object *obj);  int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);  void i915_gem_release_mmap(struct drm_i915_gem_object *obj);  void i915_gem_lastclose(struct drm_device *dev); -int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, -				  gfp_t gfpmask); +int __must_check i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj);  int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);  int i915_gem_object_sync(struct drm_i915_gem_object *obj,  			 struct intel_ring_buffer *to); @@ -1449,8 +1460,9 @@ void i915_gem_init_global_gtt(struct drm_device *dev,  int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,  					  unsigned alignment,  					  unsigned cache_level, -					  bool mappable); -int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only); +					  bool mappable, +					  bool nonblock); +int i915_gem_evict_everything(struct drm_device *dev);  /* i915_gem_stolen.c */  int i915_gem_init_stolen(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 05145932482..87a64e5f28f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -41,7 +41,8 @@ static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *o  static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);  static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,  						    unsigned alignment, -						    bool map_and_fenceable); +						    bool map_and_fenceable, +						    bool nonblocking);  static int i915_gem_phys_pwrite(struct drm_device *dev,  				struct drm_i915_gem_object *obj,  				struct drm_i915_gem_pwrite *args, @@ -55,6 +56,8 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,  static int i915_gem_inactive_shrink(struct shrinker *shrinker,  				    struct shrink_control *sc); +static long i915_gem_purge(struct drm_i915_private *dev_priv, long target); +static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);  static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);  static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) @@ -140,7 +143,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)  static inline bool  i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)  { -	return !obj->active; +	return obj->gtt_space && !obj->active;  }  int @@ -179,7 +182,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,  	pinned = 0;  	mutex_lock(&dev->struct_mutex); -	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) +	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)  		if (obj->pin_count)  			pinned += obj->gtt_space->size;  	mutex_unlock(&dev->struct_mutex); @@ -423,9 +426,11 @@ i915_gem_shmem_pread(struct drm_device *dev,  		 * anyway again before the next pread happens. */  		if (obj->cache_level == I915_CACHE_NONE)  			needs_clflush = 1; -		ret = i915_gem_object_set_to_gtt_domain(obj, false); -		if (ret) -			return ret; +		if (obj->gtt_space) { +			ret = i915_gem_object_set_to_gtt_domain(obj, false); +			if (ret) +				return ret; +		}  	}  	offset = args->offset; @@ -605,7 +610,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,  	char __user *user_data;  	int page_offset, page_length, ret; -	ret = i915_gem_object_pin(obj, 0, true); +	ret = i915_gem_object_pin(obj, 0, true, true);  	if (ret)  		goto out; @@ -751,9 +756,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev,  		 * right away and we therefore have to clflush anyway. */  		if (obj->cache_level == I915_CACHE_NONE)  			needs_clflush_after = 1; -		ret = i915_gem_object_set_to_gtt_domain(obj, true); -		if (ret) -			return ret; +		if (obj->gtt_space) { +			ret = i915_gem_object_set_to_gtt_domain(obj, true); +			if (ret) +				return ret; +		}  	}  	/* Same trick applies for invalidate partially written cachelines before  	 * writing.  */ @@ -919,10 +926,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,  		goto out;  	} -	if (obj->gtt_space && -	    obj->cache_level == I915_CACHE_NONE && +	if (obj->cache_level == I915_CACHE_NONE &&  	    obj->tiling_mode == I915_TILING_NONE && -	    obj->map_and_fenceable &&  	    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {  		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);  		/* Note that the gtt paths might fail with non-page-backed user @@ -930,7 +935,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,  		 * textures). Fallback to the shmem path in that case. */  	} -	if (ret == -EFAULT) +	if (ret == -EFAULT || ret == -ENOSPC)  		ret = i915_gem_shmem_pwrite(dev, obj, args, file);  out: @@ -940,6 +945,240 @@ unlock:  	return ret;  } +int +i915_gem_check_wedge(struct drm_i915_private *dev_priv, +		     bool interruptible) +{ +	if (atomic_read(&dev_priv->mm.wedged)) { +		struct completion *x = &dev_priv->error_completion; +		bool recovery_complete; +		unsigned long flags; + +		/* Give the error handler a chance to run. */ +		spin_lock_irqsave(&x->wait.lock, flags); +		recovery_complete = x->done > 0; +		spin_unlock_irqrestore(&x->wait.lock, flags); + +		/* Non-interruptible callers can't handle -EAGAIN, hence return +		 * -EIO unconditionally for these. */ +		if (!interruptible) +			return -EIO; + +		/* Recovery complete, but still wedged means reset failure. */ +		if (recovery_complete) +			return -EIO; + +		return -EAGAIN; +	} + +	return 0; +} + +/* + * Compare seqno against outstanding lazy request. Emit a request if they are + * equal. + */ +static int +i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) +{ +	int ret; + +	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); + +	ret = 0; +	if (seqno == ring->outstanding_lazy_request) +		ret = i915_add_request(ring, NULL, NULL); + +	return ret; +} + +/** + * __wait_seqno - wait until execution of seqno has finished + * @ring: the ring expected to report seqno + * @seqno: duh! + * @interruptible: do an interruptible wait (normally yes) + * @timeout: in - how long to wait (NULL forever); out - how much time remaining + * + * Returns 0 if the seqno was found within the alloted time. Else returns the + * errno with remaining time filled in timeout argument. + */ +static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, +			bool interruptible, struct timespec *timeout) +{ +	drm_i915_private_t *dev_priv = ring->dev->dev_private; +	struct timespec before, now, wait_time={1,0}; +	unsigned long timeout_jiffies; +	long end; +	bool wait_forever = true; +	int ret; + +	if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) +		return 0; + +	trace_i915_gem_request_wait_begin(ring, seqno); + +	if (timeout != NULL) { +		wait_time = *timeout; +		wait_forever = false; +	} + +	timeout_jiffies = timespec_to_jiffies(&wait_time); + +	if (WARN_ON(!ring->irq_get(ring))) +		return -ENODEV; + +	/* Record current time in case interrupted by signal, or wedged * */ +	getrawmonotonic(&before); + +#define EXIT_COND \ +	(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ +	atomic_read(&dev_priv->mm.wedged)) +	do { +		if (interruptible) +			end = wait_event_interruptible_timeout(ring->irq_queue, +							       EXIT_COND, +							       timeout_jiffies); +		else +			end = wait_event_timeout(ring->irq_queue, EXIT_COND, +						 timeout_jiffies); + +		ret = i915_gem_check_wedge(dev_priv, interruptible); +		if (ret) +			end = ret; +	} while (end == 0 && wait_forever); + +	getrawmonotonic(&now); + +	ring->irq_put(ring); +	trace_i915_gem_request_wait_end(ring, seqno); +#undef EXIT_COND + +	if (timeout) { +		struct timespec sleep_time = timespec_sub(now, before); +		*timeout = timespec_sub(*timeout, sleep_time); +	} + +	switch (end) { +	case -EIO: +	case -EAGAIN: /* Wedged */ +	case -ERESTARTSYS: /* Signal */ +		return (int)end; +	case 0: /* Timeout */ +		if (timeout) +			set_normalized_timespec(timeout, 0, 0); +		return -ETIME; +	default: /* Completed */ +		WARN_ON(end < 0); /* We're not aware of other errors */ +		return 0; +	} +} + +/** + * Waits for a sequence number to be signaled, and cleans up the + * request and object lists appropriately for that event. + */ +int +i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) +{ +	struct drm_device *dev = ring->dev; +	struct drm_i915_private *dev_priv = dev->dev_private; +	bool interruptible = dev_priv->mm.interruptible; +	int ret; + +	BUG_ON(!mutex_is_locked(&dev->struct_mutex)); +	BUG_ON(seqno == 0); + +	ret = i915_gem_check_wedge(dev_priv, interruptible); +	if (ret) +		return ret; + +	ret = i915_gem_check_olr(ring, seqno); +	if (ret) +		return ret; + +	return __wait_seqno(ring, seqno, interruptible, NULL); +} + +/** + * Ensures that all rendering to the object has completed and the object is + * safe to unbind from the GTT or access from the CPU. + */ +static __must_check int +i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, +			       bool readonly) +{ +	struct intel_ring_buffer *ring = obj->ring; +	u32 seqno; +	int ret; + +	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; +	if (seqno == 0) +		return 0; + +	ret = i915_wait_seqno(ring, seqno); +	if (ret) +		return ret; + +	i915_gem_retire_requests_ring(ring); + +	/* Manually manage the write flush as we may have not yet +	 * retired the buffer. +	 */ +	if (obj->last_write_seqno && +	    i915_seqno_passed(seqno, obj->last_write_seqno)) { +		obj->last_write_seqno = 0; +		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; +	} + +	return 0; +} + +/* A nonblocking variant of the above wait. This is a highly dangerous routine + * as the object state may change during this call. + */ +static __must_check int +i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, +					    bool readonly) +{ +	struct drm_device *dev = obj->base.dev; +	struct drm_i915_private *dev_priv = dev->dev_private; +	struct intel_ring_buffer *ring = obj->ring; +	u32 seqno; +	int ret; + +	BUG_ON(!mutex_is_locked(&dev->struct_mutex)); +	BUG_ON(!dev_priv->mm.interruptible); + +	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; +	if (seqno == 0) +		return 0; + +	ret = i915_gem_check_wedge(dev_priv, true); +	if (ret) +		return ret; + +	ret = i915_gem_check_olr(ring, seqno); +	if (ret) +		return ret; + +	mutex_unlock(&dev->struct_mutex); +	ret = __wait_seqno(ring, seqno, true, NULL); +	mutex_lock(&dev->struct_mutex); + +	i915_gem_retire_requests_ring(ring); + +	/* Manually manage the write flush as we may have not yet +	 * retired the buffer. +	 */ +	if (obj->last_write_seqno && +	    i915_seqno_passed(seqno, obj->last_write_seqno)) { +		obj->last_write_seqno = 0; +		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; +	} + +	return ret; +} +  /**   * Called when user space prepares to use an object with the CPU, either   * through the mmap ioctl's mapping or a GTT mapping. @@ -977,6 +1216,14 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,  		goto unlock;  	} +	/* Try to flush the object off the GPU without holding the lock. +	 * We will repeat the flush holding the lock in the normal manner +	 * to catch cases where we are gazumped. +	 */ +	ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain); +	if (ret) +		goto unref; +  	if (read_domains & I915_GEM_DOMAIN_GTT) {  		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); @@ -990,6 +1237,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,  		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);  	} +unref:  	drm_gem_object_unreference(&obj->base);  unlock:  	mutex_unlock(&dev->struct_mutex); @@ -1109,7 +1357,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)  			goto unlock;  	}  	if (!obj->gtt_space) { -		ret = i915_gem_object_bind_to_gtt(obj, 0, true); +		ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);  		if (ret)  			goto unlock; @@ -1270,6 +1518,42 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,  	return i915_gem_get_gtt_size(dev, size, tiling_mode);  } +static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) +{ +	struct drm_i915_private *dev_priv = obj->base.dev->dev_private; +	int ret; + +	if (obj->base.map_list.map) +		return 0; + +	ret = drm_gem_create_mmap_offset(&obj->base); +	if (ret != -ENOSPC) +		return ret; + +	/* Badly fragmented mmap space? The only way we can recover +	 * space is by destroying unwanted objects. We can't randomly release +	 * mmap_offsets as userspace expects them to be persistent for the +	 * lifetime of the objects. The closest we can is to release the +	 * offsets on purgeable objects by truncating it and marking it purged, +	 * which prevents userspace from ever using that object again. +	 */ +	i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT); +	ret = drm_gem_create_mmap_offset(&obj->base); +	if (ret != -ENOSPC) +		return ret; + +	i915_gem_shrink_all(dev_priv); +	return drm_gem_create_mmap_offset(&obj->base); +} + +static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) +{ +	if (!obj->base.map_list.map) +		return; + +	drm_gem_free_mmap_offset(&obj->base); +} +  int  i915_gem_mmap_gtt(struct drm_file *file,  		  struct drm_device *dev, @@ -1301,11 +1585,9 @@ i915_gem_mmap_gtt(struct drm_file *file,  		goto out;  	} -	if (!obj->base.map_list.map) { -		ret = drm_gem_create_mmap_offset(&obj->base); -		if (ret) -			goto out; -	} +	ret = i915_gem_object_create_mmap_offset(obj); +	if (ret) +		goto out;  	*offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; @@ -1340,64 +1622,58 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,  	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);  } -int -i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, -			      gfp_t gfpmask) +/* Immediately discard the backing storage */ +static void +i915_gem_object_truncate(struct drm_i915_gem_object *obj)  { -	int page_count, i; -	struct address_space *mapping;  	struct inode *inode; -	struct page *page; -	if (obj->pages || obj->sg_table) -		return 0; +	i915_gem_object_free_mmap_offset(obj); -	/* Get the list of pages out of our struct file.  They'll be pinned -	 * at this point until we release them. -	 */ -	page_count = obj->base.size / PAGE_SIZE; -	BUG_ON(obj->pages != NULL); -	obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); -	if (obj->pages == NULL) -		return -ENOMEM; +	if (obj->base.filp == NULL) +		return; +	/* Our goal here is to return as much of the memory as +	 * is possible back to the system as we are called from OOM. +	 * To do this we must instruct the shmfs to drop all of its +	 * backing pages, *now*. +	 */  	inode = obj->base.filp->f_path.dentry->d_inode; -	mapping = inode->i_mapping; -	gfpmask |= mapping_gfp_mask(mapping); - -	for (i = 0; i < page_count; i++) { -		page = shmem_read_mapping_page_gfp(mapping, i, gfpmask); -		if (IS_ERR(page)) -			goto err_pages; - -		obj->pages[i] = page; -	} - -	if (i915_gem_object_needs_bit17_swizzle(obj)) -		i915_gem_object_do_bit_17_swizzle(obj); - -	return 0; +	shmem_truncate_range(inode, 0, (loff_t)-1); -err_pages: -	while (i--) -		page_cache_release(obj->pages[i]); +	obj->madv = __I915_MADV_PURGED; +} -	drm_free_large(obj->pages); -	obj->pages = NULL; -	return PTR_ERR(page); +static inline int +i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) +{ +	return obj->madv == I915_MADV_DONTNEED;  } -static void +static int  i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)  {  	int page_count = obj->base.size / PAGE_SIZE; -	int i; +	int ret, i; -	if (!obj->pages) -		return; +	BUG_ON(obj->gtt_space); +	if (obj->pages == NULL) +		return 0; + +	BUG_ON(obj->gtt_space);  	BUG_ON(obj->madv == __I915_MADV_PURGED); +	ret = i915_gem_object_set_to_cpu_domain(obj, true); +	if (ret) { +		/* In the event of a disaster, abandon all caches and +		 * hope for the best. +		 */ +		WARN_ON(ret != -EIO); +		i915_gem_clflush_object(obj); +		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; +	} +  	if (i915_gem_object_needs_bit17_swizzle(obj))  		i915_gem_object_save_bit_17_swizzle(obj); @@ -1417,6 +1693,129 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)  	drm_free_large(obj->pages);  	obj->pages = NULL; + +	list_del(&obj->gtt_list); + +	if (i915_gem_object_is_purgeable(obj)) +		i915_gem_object_truncate(obj); + +	return 0; +} + +static long +i915_gem_purge(struct drm_i915_private *dev_priv, long target) +{ +	struct drm_i915_gem_object *obj, *next; +	long count = 0; + +	list_for_each_entry_safe(obj, next, +				 &dev_priv->mm.unbound_list, +				 gtt_list) { +		if (i915_gem_object_is_purgeable(obj) && +		    i915_gem_object_put_pages_gtt(obj) == 0) { +			count += obj->base.size >> PAGE_SHIFT; +			if (count >= target) +				return count; +		} +	} + +	list_for_each_entry_safe(obj, next, +				 &dev_priv->mm.inactive_list, +				 mm_list) { +		if (i915_gem_object_is_purgeable(obj) && +		    i915_gem_object_unbind(obj) == 0 && +		    i915_gem_object_put_pages_gtt(obj) == 0) { +			count += obj->base.size >> PAGE_SHIFT; +			if (count >= target) +				return count; +		} +	} + +	return count; +} + +static void +i915_gem_shrink_all(struct drm_i915_private *dev_priv) +{ +	struct drm_i915_gem_object *obj, *next; + +	i915_gem_evict_everything(dev_priv->dev); + +	list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list) +		i915_gem_object_put_pages_gtt(obj); +} + +int +i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) +{ +	struct drm_i915_private *dev_priv = obj->base.dev->dev_private; +	int page_count, i; +	struct address_space *mapping; +	struct page *page; +	gfp_t gfp; + +	if (obj->pages || obj->sg_table) +		return 0; + +	/* Assert that the object is not currently in any GPU domain. As it +	 * wasn't in the GTT, there shouldn't be any way it could have been in +	 * a GPU cache +	 */ +	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); +	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); + +	/* Get the list of pages out of our struct file.  They'll be pinned +	 * at this point until we release them. +	 */ +	page_count = obj->base.size / PAGE_SIZE; +	obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); +	if (obj->pages == NULL) +		return -ENOMEM; + +	/* Fail silently without starting the shrinker */ +	mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; +	gfp = mapping_gfp_mask(mapping); +	gfp |= __GFP_NORETRY | __GFP_NOWARN; +	gfp &= ~(__GFP_IO | __GFP_WAIT); +	for (i = 0; i < page_count; i++) { +		page = shmem_read_mapping_page_gfp(mapping, i, gfp); +		if (IS_ERR(page)) { +			i915_gem_purge(dev_priv, page_count); +			page = shmem_read_mapping_page_gfp(mapping, i, gfp); +		} +		if (IS_ERR(page)) { +			/* We've tried hard to allocate the memory by reaping +			 * our own buffer, now let the real VM do its job and +			 * go down in flames if truly OOM. +			 */ +			gfp &= ~(__GFP_NORETRY | __GFP_NOWARN); +			gfp |= __GFP_IO | __GFP_WAIT; + +			i915_gem_shrink_all(dev_priv); +			page = shmem_read_mapping_page_gfp(mapping, i, gfp); +			if (IS_ERR(page)) +				goto err_pages; + +			gfp |= __GFP_NORETRY | __GFP_NOWARN; +			gfp &= ~(__GFP_IO | __GFP_WAIT); +		} + +		obj->pages[i] = page; +	} + +	if (i915_gem_object_needs_bit17_swizzle(obj)) +		i915_gem_object_do_bit_17_swizzle(obj); + +	list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); +	return 0; + +err_pages: +	while (i--) +		page_cache_release(obj->pages[i]); + +	drm_free_large(obj->pages); +	obj->pages = NULL; +	return PTR_ERR(page);  }  void @@ -1486,32 +1885,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)  	WARN_ON(i915_verify_lists(dev));  } -/* Immediately discard the backing storage */ -static void -i915_gem_object_truncate(struct drm_i915_gem_object *obj) -{ -	struct inode *inode; - -	/* Our goal here is to return as much of the memory as -	 * is possible back to the system as we are called from OOM. -	 * To do this we must instruct the shmfs to drop all of its -	 * backing pages, *now*. -	 */ -	inode = obj->base.filp->f_path.dentry->d_inode; -	shmem_truncate_range(inode, 0, (loff_t)-1); - -	if (obj->base.map_list.map) -		drm_gem_free_mmap_offset(&obj->base); - -	obj->madv = __I915_MADV_PURGED; -} - -static inline int -i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) -{ -	return obj->madv == I915_MADV_DONTNEED; -} -  static u32  i915_gem_get_seqno(struct drm_device *dev)  { @@ -1698,6 +2071,7 @@ void i915_gem_reset(struct drm_device *dev)  		obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;  	} +  	/* The fence registers are invalidated so clear them out */  	i915_gem_reset_fences(dev);  } @@ -1821,197 +2195,6 @@ i915_gem_retire_work_handler(struct work_struct *work)  	mutex_unlock(&dev->struct_mutex);  } -int -i915_gem_check_wedge(struct drm_i915_private *dev_priv, -		     bool interruptible) -{ -	if (atomic_read(&dev_priv->mm.wedged)) { -		struct completion *x = &dev_priv->error_completion; -		bool recovery_complete; -		unsigned long flags; - -		/* Give the error handler a chance to run. */ -		spin_lock_irqsave(&x->wait.lock, flags); -		recovery_complete = x->done > 0; -		spin_unlock_irqrestore(&x->wait.lock, flags); - -		/* Non-interruptible callers can't handle -EAGAIN, hence return -		 * -EIO unconditionally for these. */ -		if (!interruptible) -			return -EIO; - -		/* Recovery complete, but still wedged means reset failure. */ -		if (recovery_complete) -			return -EIO; - -		return -EAGAIN; -	} - -	return 0; -} - -/* - * Compare seqno against outstanding lazy request. Emit a request if they are - * equal. - */ -static int -i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) -{ -	int ret; - -	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); - -	ret = 0; -	if (seqno == ring->outstanding_lazy_request) -		ret = i915_add_request(ring, NULL, NULL); - -	return ret; -} - -/** - * __wait_seqno - wait until execution of seqno has finished - * @ring: the ring expected to report seqno - * @seqno: duh! - * @interruptible: do an interruptible wait (normally yes) - * @timeout: in - how long to wait (NULL forever); out - how much time remaining - * - * Returns 0 if the seqno was found within the alloted time. Else returns the - * errno with remaining time filled in timeout argument. - */ -static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, -			bool interruptible, struct timespec *timeout) -{ -	drm_i915_private_t *dev_priv = ring->dev->dev_private; -	struct timespec before, now, wait_time={1,0}; -	unsigned long timeout_jiffies; -	long end; -	bool wait_forever = true; -	int ret; - -	if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) -		return 0; - -	trace_i915_gem_request_wait_begin(ring, seqno); - -	if (timeout != NULL) { -		wait_time = *timeout; -		wait_forever = false; -	} - -	timeout_jiffies = timespec_to_jiffies(&wait_time); - -	if (WARN_ON(!ring->irq_get(ring))) -		return -ENODEV; - -	/* Record current time in case interrupted by signal, or wedged * */ -	getrawmonotonic(&before); - -#define EXIT_COND \ -	(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ -	atomic_read(&dev_priv->mm.wedged)) -	do { -		if (interruptible) -			end = wait_event_interruptible_timeout(ring->irq_queue, -							       EXIT_COND, -							       timeout_jiffies); -		else -			end = wait_event_timeout(ring->irq_queue, EXIT_COND, -						 timeout_jiffies); - -		ret = i915_gem_check_wedge(dev_priv, interruptible); -		if (ret) -			end = ret; -	} while (end == 0 && wait_forever); - -	getrawmonotonic(&now); - -	ring->irq_put(ring); -	trace_i915_gem_request_wait_end(ring, seqno); -#undef EXIT_COND - -	if (timeout) { -		struct timespec sleep_time = timespec_sub(now, before); -		*timeout = timespec_sub(*timeout, sleep_time); -	} - -	switch (end) { -	case -EIO: -	case -EAGAIN: /* Wedged */ -	case -ERESTARTSYS: /* Signal */ -		return (int)end; -	case 0: /* Timeout */ -		if (timeout) -			set_normalized_timespec(timeout, 0, 0); -		return -ETIME; -	default: /* Completed */ -		WARN_ON(end < 0); /* We're not aware of other errors */ -		return 0; -	} -} - -/** - * Waits for a sequence number to be signaled, and cleans up the - * request and object lists appropriately for that event. - */ -int -i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) -{ -	drm_i915_private_t *dev_priv = ring->dev->dev_private; -	int ret = 0; - -	BUG_ON(seqno == 0); - -	ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); -	if (ret) -		return ret; - -	ret = i915_gem_check_olr(ring, seqno); -	if (ret) -		return ret; - -	ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL); - -	return ret; -} - -/** - * Ensures that all rendering to the object has completed and the object is - * safe to unbind from the GTT or access from the CPU. - */ -static __must_check int -i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, -			       bool readonly) -{ -	u32 seqno; -	int ret; - -	/* If there is rendering queued on the buffer being evicted, wait for -	 * it. -	 */ -	if (readonly) -		seqno = obj->last_write_seqno; -	else -		seqno = obj->last_read_seqno; -	if (seqno == 0) -		return 0; - -	ret = i915_wait_seqno(obj->ring, seqno); -	if (ret) -		return ret; - -	/* Manually manage the write flush as we may have not yet retired -	 * the buffer. -	 */ -	if (obj->last_write_seqno && -	    i915_seqno_passed(seqno, obj->last_write_seqno)) { -		obj->last_write_seqno = 0; -		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; -	} - -	i915_gem_retire_requests_ring(obj->ring); -	return 0; -} -  /**   * Ensures that an object will eventually get non-busy by flushing any required   * write domains, emitting any outstanding lazy request and retiring and @@ -2199,6 +2382,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)  	if (obj->pin_count)  		return -EBUSY; +	BUG_ON(obj->pages == NULL); +  	ret = i915_gem_object_finish_gpu(obj);  	if (ret)  		return ret; @@ -2209,22 +2394,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)  	i915_gem_object_finish_gtt(obj); -	/* Move the object to the CPU domain to ensure that -	 * any possible CPU writes while it's not in the GTT -	 * are flushed when we go to remap it. -	 */ -	if (ret == 0) -		ret = i915_gem_object_set_to_cpu_domain(obj, 1); -	if (ret == -ERESTARTSYS) -		return ret; -	if (ret) { -		/* In the event of a disaster, abandon all caches and -		 * hope for the best. -		 */ -		i915_gem_clflush_object(obj); -		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; -	} -  	/* release the fence reg _after_ flushing */  	ret = i915_gem_object_put_fence(obj);  	if (ret) @@ -2240,10 +2409,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)  	}  	i915_gem_gtt_finish_object(obj); -	i915_gem_object_put_pages_gtt(obj); - -	list_del_init(&obj->gtt_list); -	list_del_init(&obj->mm_list); +	list_del(&obj->mm_list); +	list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);  	/* Avoid an unnecessary call to unbind on rebind. */  	obj->map_and_fenceable = true; @@ -2251,10 +2418,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)  	obj->gtt_space = NULL;  	obj->gtt_offset = 0; -	if (i915_gem_object_is_purgeable(obj)) -		i915_gem_object_truncate(obj); - -	return ret; +	return 0;  }  static int i915_ring_idle(struct intel_ring_buffer *ring) @@ -2273,11 +2437,11 @@ int i915_gpu_idle(struct drm_device *dev)  	/* Flush everything onto the inactive list. */  	for_each_ring(ring, dev_priv, i) { -		ret = i915_ring_idle(ring); +		ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);  		if (ret)  			return ret; -		ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID); +		ret = i915_ring_idle(ring);  		if (ret)  			return ret;  	} @@ -2662,12 +2826,12 @@ static void i915_gem_verify_gtt(struct drm_device *dev)  static int  i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,  			    unsigned alignment, -			    bool map_and_fenceable) +			    bool map_and_fenceable, +			    bool nonblocking)  {  	struct drm_device *dev = obj->base.dev;  	drm_i915_private_t *dev_priv = dev->dev_private;  	struct drm_mm_node *free_space; -	gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;  	u32 size, fence_size, fence_alignment, unfenced_alignment;  	bool mappable, fenceable;  	int ret; @@ -2707,6 +2871,10 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,  		return -E2BIG;  	} +	ret = i915_gem_object_get_pages_gtt(obj); +	if (ret) +		return ret; +   search_free:  	if (map_and_fenceable)  		free_space = @@ -2733,12 +2901,10 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,  							 false);  	}  	if (obj->gtt_space == NULL) { -		/* If the gtt is empty and we're still having trouble -		 * fitting our object in, we're out of memory. -		 */  		ret = i915_gem_evict_something(dev, size, alignment,  					       obj->cache_level, -					       map_and_fenceable); +					       map_and_fenceable, +					       nonblocking);  		if (ret)  			return ret; @@ -2752,55 +2918,20 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,  		return -EINVAL;  	} -	ret = i915_gem_object_get_pages_gtt(obj, gfpmask); -	if (ret) { -		drm_mm_put_block(obj->gtt_space); -		obj->gtt_space = NULL; - -		if (ret == -ENOMEM) { -			/* first try to reclaim some memory by clearing the GTT */ -			ret = i915_gem_evict_everything(dev, false); -			if (ret) { -				/* now try to shrink everyone else */ -				if (gfpmask) { -					gfpmask = 0; -					goto search_free; -				} - -				return -ENOMEM; -			} - -			goto search_free; -		} - -		return ret; -	}  	ret = i915_gem_gtt_prepare_object(obj);  	if (ret) { -		i915_gem_object_put_pages_gtt(obj);  		drm_mm_put_block(obj->gtt_space);  		obj->gtt_space = NULL; - -		if (i915_gem_evict_everything(dev, false)) -			return ret; - -		goto search_free; +		return ret;  	}  	if (!dev_priv->mm.aliasing_ppgtt)  		i915_gem_gtt_bind_object(obj, obj->cache_level); -	list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list); +	list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);  	list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); -	/* Assert that the object is not currently in any GPU domain. As it -	 * wasn't in the GTT, there shouldn't be any way it could have been in -	 * a GPU cache -	 */ -	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); -	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); -  	obj->gtt_offset = obj->gtt_space->start;  	fenceable = @@ -3113,7 +3244,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,  	 * (e.g. libkms for the bootup splash), we have to ensure that we  	 * always use map_and_fenceable for all scanout buffers.  	 */ -	ret = i915_gem_object_pin(obj, alignment, true); +	ret = i915_gem_object_pin(obj, alignment, true, false);  	if (ret)  		return ret; @@ -3250,7 +3381,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)  int  i915_gem_object_pin(struct drm_i915_gem_object *obj,  		    uint32_t alignment, -		    bool map_and_fenceable) +		    bool map_and_fenceable, +		    bool nonblocking)  {  	int ret; @@ -3274,7 +3406,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,  	if (obj->gtt_space == NULL) {  		ret = i915_gem_object_bind_to_gtt(obj, alignment, -						  map_and_fenceable); +						  map_and_fenceable, +						  nonblocking);  		if (ret)  			return ret;  	} @@ -3332,7 +3465,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,  	obj->user_pin_count++;  	obj->pin_filp = file;  	if (obj->user_pin_count == 1) { -		ret = i915_gem_object_pin(obj, args->alignment, true); +		ret = i915_gem_object_pin(obj, args->alignment, true, false);  		if (ret)  			goto out;  	} @@ -3464,9 +3597,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,  	if (obj->madv != __I915_MADV_PURGED)  		obj->madv = args->madv; -	/* if the object is no longer bound, discard its backing storage */ -	if (i915_gem_object_is_purgeable(obj) && -	    obj->gtt_space == NULL) +	/* if the object is no longer attached, discard its backing storage */ +	if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)  		i915_gem_object_truncate(obj);  	args->retained = obj->madv != __I915_MADV_PURGED; @@ -3478,10 +3610,26 @@ unlock:  	return ret;  } +void i915_gem_object_init(struct drm_i915_gem_object *obj) +{ +	obj->base.driver_private = NULL; + +	INIT_LIST_HEAD(&obj->mm_list); +	INIT_LIST_HEAD(&obj->gtt_list); +	INIT_LIST_HEAD(&obj->ring_list); +	INIT_LIST_HEAD(&obj->exec_list); + +	obj->fence_reg = I915_FENCE_REG_NONE; +	obj->madv = I915_MADV_WILLNEED; +	/* Avoid an unnecessary call to unbind on the first bind. */ +	obj->map_and_fenceable = true; + +	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); +} +  struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,  						  size_t size)  { -	struct drm_i915_private *dev_priv = dev->dev_private;  	struct drm_i915_gem_object *obj;  	struct address_space *mapping;  	u32 mask; @@ -3505,7 +3653,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,  	mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;  	mapping_set_gfp_mask(mapping, mask); -	i915_gem_info_add_obj(dev_priv, size); +	i915_gem_object_init(obj);  	obj->base.write_domain = I915_GEM_DOMAIN_CPU;  	obj->base.read_domains = I915_GEM_DOMAIN_CPU; @@ -3527,16 +3675,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,  	} else  		obj->cache_level = I915_CACHE_NONE; -	obj->base.driver_private = NULL; -	obj->fence_reg = I915_FENCE_REG_NONE; -	INIT_LIST_HEAD(&obj->mm_list); -	INIT_LIST_HEAD(&obj->gtt_list); -	INIT_LIST_HEAD(&obj->ring_list); -	INIT_LIST_HEAD(&obj->exec_list); -	obj->madv = I915_MADV_WILLNEED; -	/* Avoid an unnecessary call to unbind on the first bind. */ -	obj->map_and_fenceable = true; -  	return obj;  } @@ -3573,8 +3711,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)  		dev_priv->mm.interruptible = was_interruptible;  	} -	if (obj->base.map_list.map) -		drm_gem_free_mmap_offset(&obj->base); +	i915_gem_object_put_pages_gtt(obj); +	i915_gem_object_free_mmap_offset(obj);  	drm_gem_object_release(&obj->base);  	i915_gem_info_remove_obj(dev_priv, obj->base.size); @@ -3605,7 +3743,7 @@ i915_gem_idle(struct drm_device *dev)  	/* Under UMS, be paranoid and evict. */  	if (!drm_core_check_feature(dev, DRIVER_MODESET)) -		i915_gem_evict_everything(dev, false); +		i915_gem_evict_everything(dev);  	i915_gem_reset_fences(dev); @@ -3963,8 +4101,9 @@ i915_gem_load(struct drm_device *dev)  	INIT_LIST_HEAD(&dev_priv->mm.active_list);  	INIT_LIST_HEAD(&dev_priv->mm.inactive_list); +	INIT_LIST_HEAD(&dev_priv->mm.unbound_list); +	INIT_LIST_HEAD(&dev_priv->mm.bound_list);  	INIT_LIST_HEAD(&dev_priv->mm.fence_list); -	INIT_LIST_HEAD(&dev_priv->mm.gtt_list);  	for (i = 0; i < I915_NUM_RINGS; i++)  		init_ring_lists(&dev_priv->ring[i]);  	for (i = 0; i < I915_MAX_NUM_FENCES; i++) @@ -4209,13 +4348,6 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)  }  static int -i915_gpu_is_active(struct drm_device *dev) -{ -	drm_i915_private_t *dev_priv = dev->dev_private; -	return !list_empty(&dev_priv->mm.active_list); -} - -static int  i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)  {  	struct drm_i915_private *dev_priv = @@ -4223,60 +4355,26 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)  			     struct drm_i915_private,  			     mm.inactive_shrinker);  	struct drm_device *dev = dev_priv->dev; -	struct drm_i915_gem_object *obj, *next; +	struct drm_i915_gem_object *obj;  	int nr_to_scan = sc->nr_to_scan;  	int cnt;  	if (!mutex_trylock(&dev->struct_mutex))  		return 0; -	/* "fast-path" to count number of available objects */ -	if (nr_to_scan == 0) { -		cnt = 0; -		list_for_each_entry(obj, -				    &dev_priv->mm.inactive_list, -				    mm_list) -			cnt++; -		mutex_unlock(&dev->struct_mutex); -		return cnt / 100 * sysctl_vfs_cache_pressure; +	if (nr_to_scan) { +		nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan); +		if (nr_to_scan > 0) +			i915_gem_shrink_all(dev_priv);  	} -rescan: -	/* first scan for clean buffers */ -	i915_gem_retire_requests(dev); - -	list_for_each_entry_safe(obj, next, -				 &dev_priv->mm.inactive_list, -				 mm_list) { -		if (i915_gem_object_is_purgeable(obj)) { -			if (i915_gem_object_unbind(obj) == 0 && -			    --nr_to_scan == 0) -				break; -		} -	} - -	/* second pass, evict/count anything still on the inactive list */  	cnt = 0; -	list_for_each_entry_safe(obj, next, -				 &dev_priv->mm.inactive_list, -				 mm_list) { -		if (nr_to_scan && -		    i915_gem_object_unbind(obj) == 0) -			nr_to_scan--; -		else -			cnt++; -	} +	list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) +		cnt += obj->base.size >> PAGE_SHIFT; +	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) +		if (obj->pin_count == 0) +			cnt += obj->base.size >> PAGE_SHIFT; -	if (nr_to_scan && i915_gpu_is_active(dev)) { -		/* -		 * We are desperate for pages, so as a last resort, wait -		 * for the GPU to finish and discard whatever we can. -		 * This has a dramatic impact to reduce the number of -		 * OOM-killer events whilst running the GPU aggressively. -		 */ -		if (i915_gpu_idle(dev) == 0) -			goto rescan; -	}  	mutex_unlock(&dev->struct_mutex); -	return cnt / 100 * sysctl_vfs_cache_pressure; +	return cnt;  } diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 5c2d354cebb..4aa7ecf77ed 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -221,7 +221,7 @@ static int create_default_context(struct drm_i915_private *dev_priv)  	 * default context.  	 */  	dev_priv->ring[RCS].default_context = ctx; -	ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false); +	ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);  	if (ret)  		goto err_destroy; @@ -374,7 +374,7 @@ static int do_switch(struct i915_hw_context *to)  	if (from_obj == to->obj)  		return 0; -	ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false); +	ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);  	if (ret)  		return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index ceaad5af01a..43c95307f99 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -33,7 +33,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme  	struct drm_i915_gem_object *obj = attachment->dmabuf->priv;  	struct drm_device *dev = obj->base.dev;  	int npages = obj->base.size / PAGE_SIZE; -	struct sg_table *sg = NULL; +	struct sg_table *sg;  	int ret;  	int nents; @@ -41,10 +41,10 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme  	if (ret)  		return ERR_PTR(ret); -	if (!obj->pages) { -		ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN); -		if (ret) -			goto out; +	ret = i915_gem_object_get_pages_gtt(obj); +	if (ret) { +		sg = ERR_PTR(ret); +		goto out;  	}  	/* link the pages into an SG then map the sg */ @@ -89,12 +89,10 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)  		goto out_unlock;  	} -	if (!obj->pages) { -		ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN); -		if (ret) { -			mutex_unlock(&dev->struct_mutex); -			return ERR_PTR(ret); -		} +	ret = i915_gem_object_get_pages_gtt(obj); +	if (ret) { +		mutex_unlock(&dev->struct_mutex); +		return ERR_PTR(ret);  	}  	obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL); diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 7279c31d4a9..a2d8acde855 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -45,7 +45,7 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)  int  i915_gem_evict_something(struct drm_device *dev, int min_size,  			 unsigned alignment, unsigned cache_level, -			 bool mappable) +			 bool mappable, bool nonblocking)  {  	drm_i915_private_t *dev_priv = dev->dev_private;  	struct list_head eviction_list, unwind_list; @@ -92,12 +92,16 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,  			goto found;  	} +	if (nonblocking) +		goto none; +  	/* Now merge in the soon-to-be-expired objects... */  	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {  		if (mark_free(obj, &unwind_list))  			goto found;  	} +none:  	/* Nothing found, clean up and bail out! */  	while (!list_empty(&unwind_list)) {  		obj = list_first_entry(&unwind_list, @@ -148,7 +152,7 @@ found:  }  int -i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) +i915_gem_evict_everything(struct drm_device *dev)  {  	drm_i915_private_t *dev_priv = dev->dev_private;  	struct drm_i915_gem_object *obj, *next; @@ -160,7 +164,7 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)  	if (lists_empty)  		return -ENOSPC; -	trace_i915_gem_evict_everything(dev, purgeable_only); +	trace_i915_gem_evict_everything(dev);  	/* The gpu_idle will flush everything in the write domain to the  	 * active list. Then we must move everything off the active list @@ -174,12 +178,9 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)  	/* Having flushed everything, unbind() should never raise an error */  	list_for_each_entry_safe(obj, next, -				 &dev_priv->mm.inactive_list, mm_list) { -		if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) { -			if (obj->pin_count == 0) -				WARN_ON(i915_gem_object_unbind(obj)); -		} -	} +				 &dev_priv->mm.inactive_list, mm_list) +		if (obj->pin_count == 0) +			WARN_ON(i915_gem_object_unbind(obj));  	return 0;  } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index afb312ee050..e6b2205ecf6 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -95,6 +95,7 @@ eb_destroy(struct eb_objects *eb)  static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)  {  	return (obj->base.write_domain == I915_GEM_DOMAIN_CPU || +		!obj->map_and_fenceable ||  		obj->cache_level != I915_CACHE_NONE);  } @@ -330,7 +331,8 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,  	return ret;  } -#define  __EXEC_OBJECT_HAS_FENCE (1<<31) +#define  __EXEC_OBJECT_HAS_PIN (1<<31) +#define  __EXEC_OBJECT_HAS_FENCE (1<<30)  static int  need_reloc_mappable(struct drm_i915_gem_object *obj) @@ -340,9 +342,10 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)  }  static int -pin_and_fence_object(struct drm_i915_gem_object *obj, -		     struct intel_ring_buffer *ring) +i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, +				   struct intel_ring_buffer *ring)  { +	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;  	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;  	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;  	bool need_fence, need_mappable; @@ -354,15 +357,17 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,  		obj->tiling_mode != I915_TILING_NONE;  	need_mappable = need_fence || need_reloc_mappable(obj); -	ret = i915_gem_object_pin(obj, entry->alignment, need_mappable); +	ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);  	if (ret)  		return ret; +	entry->flags |= __EXEC_OBJECT_HAS_PIN; +  	if (has_fenced_gpu_access) {  		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {  			ret = i915_gem_object_get_fence(obj);  			if (ret) -				goto err_unpin; +				return ret;  			if (i915_gem_object_pin_fence(obj))  				entry->flags |= __EXEC_OBJECT_HAS_FENCE; @@ -371,12 +376,35 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,  		}  	} +	/* Ensure ppgtt mapping exists if needed */ +	if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) { +		i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, +				       obj, obj->cache_level); + +		obj->has_aliasing_ppgtt_mapping = 1; +	} +  	entry->offset = obj->gtt_offset;  	return 0; +} -err_unpin: -	i915_gem_object_unpin(obj); -	return ret; +static void +i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) +{ +	struct drm_i915_gem_exec_object2 *entry; + +	if (!obj->gtt_space) +		return; + +	entry = obj->exec_entry; + +	if (entry->flags & __EXEC_OBJECT_HAS_FENCE) +		i915_gem_object_unpin_fence(obj); + +	if (entry->flags & __EXEC_OBJECT_HAS_PIN) +		i915_gem_object_unpin(obj); + +	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);  }  static int @@ -384,11 +412,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,  			    struct drm_file *file,  			    struct list_head *objects)  { -	drm_i915_private_t *dev_priv = ring->dev->dev_private;  	struct drm_i915_gem_object *obj; -	int ret, retry; -	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;  	struct list_head ordered_objects; +	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; +	int retry;  	INIT_LIST_HEAD(&ordered_objects);  	while (!list_empty(objects)) { @@ -426,12 +453,12 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,  	 * 2.  Bind new objects.  	 * 3.  Decrement pin count.  	 * -	 * This avoid unnecessary unbinding of later objects in order to makr +	 * This avoid unnecessary unbinding of later objects in order to make  	 * room for the earlier objects *unless* we need to defragment.  	 */  	retry = 0;  	do { -		ret = 0; +		int ret = 0;  		/* Unbind any ill-fitting objects or pin. */  		list_for_each_entry(obj, objects, exec_list) { @@ -451,7 +478,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,  			    (need_mappable && !obj->map_and_fenceable))  				ret = i915_gem_object_unbind(obj);  			else -				ret = pin_and_fence_object(obj, ring); +				ret = i915_gem_execbuffer_reserve_object(obj, ring);  			if (ret)  				goto err;  		} @@ -461,77 +488,22 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,  			if (obj->gtt_space)  				continue; -			ret = pin_and_fence_object(obj, ring); -			if (ret) { -				int ret_ignore; - -				/* This can potentially raise a harmless -				 * -EINVAL if we failed to bind in the above -				 * call. It cannot raise -EINTR since we know -				 * that the bo is freshly bound and so will -				 * not need to be flushed or waited upon. -				 */ -				ret_ignore = i915_gem_object_unbind(obj); -				(void)ret_ignore; -				WARN_ON(obj->gtt_space); -				break; -			} +			ret = i915_gem_execbuffer_reserve_object(obj, ring); +			if (ret) +				goto err;  		} -		/* Decrement pin count for bound objects */ -		list_for_each_entry(obj, objects, exec_list) { -			struct drm_i915_gem_exec_object2 *entry; - -			if (!obj->gtt_space) -				continue; - -			entry = obj->exec_entry; -			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { -				i915_gem_object_unpin_fence(obj); -				entry->flags &= ~__EXEC_OBJECT_HAS_FENCE; -			} - -			i915_gem_object_unpin(obj); - -			/* ... and ensure ppgtt mapping exist if needed. */ -			if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) { -				i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, -						       obj, obj->cache_level); +err:		/* Decrement pin count for bound objects */ +		list_for_each_entry(obj, objects, exec_list) +			i915_gem_execbuffer_unreserve_object(obj); -				obj->has_aliasing_ppgtt_mapping = 1; -			} -		} - -		if (ret != -ENOSPC || retry > 1) +		if (ret != -ENOSPC || retry++)  			return ret; -		/* First attempt, just clear anything that is purgeable. -		 * Second attempt, clear the entire GTT. -		 */ -		ret = i915_gem_evict_everything(ring->dev, retry == 0); +		ret = i915_gem_evict_everything(ring->dev);  		if (ret)  			return ret; - -		retry++;  	} while (1); - -err: -	list_for_each_entry_continue_reverse(obj, objects, exec_list) { -		struct drm_i915_gem_exec_object2 *entry; - -		if (!obj->gtt_space) -			continue; - -		entry = obj->exec_entry; -		if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { -			i915_gem_object_unpin_fence(obj); -			entry->flags &= ~__EXEC_OBJECT_HAS_FENCE; -		} - -		i915_gem_object_unpin(obj); -	} - -	return ret;  }  static int diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 3b3b731a17c..18477314d85 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -261,7 +261,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,  		pte_flags |= GEN6_PTE_CACHE_LLC;  		break;  	case I915_CACHE_NONE: -		pte_flags |= GEN6_PTE_UNCACHED; +		if (IS_HASWELL(dev)) +			pte_flags |= HSW_PTE_UNCACHED; +		else +			pte_flags |= GEN6_PTE_UNCACHED;  		break;  	default:  		BUG(); @@ -348,7 +351,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)  	intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,  			      (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); -	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { +	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {  		i915_gem_clflush_object(obj);  		i915_gem_gtt_bind_object(obj, obj->cache_level);  	} diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index a61b41a8c60..d6010135e40 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -310,7 +310,7 @@ static void ironlake_handle_rps_change(struct drm_device *dev)  	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); -	new_delay = dev_priv->cur_delay; +	new_delay = dev_priv->ips.cur_delay;  	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);  	busy_up = I915_READ(RCPREVBSYTUPAVG); @@ -320,19 +320,19 @@ static void ironlake_handle_rps_change(struct drm_device *dev)  	/* Handle RCS change request from hw */  	if (busy_up > max_avg) { -		if (dev_priv->cur_delay != dev_priv->max_delay) -			new_delay = dev_priv->cur_delay - 1; -		if (new_delay < dev_priv->max_delay) -			new_delay = dev_priv->max_delay; +		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) +			new_delay = dev_priv->ips.cur_delay - 1; +		if (new_delay < dev_priv->ips.max_delay) +			new_delay = dev_priv->ips.max_delay;  	} else if (busy_down < min_avg) { -		if (dev_priv->cur_delay != dev_priv->min_delay) -			new_delay = dev_priv->cur_delay + 1; -		if (new_delay > dev_priv->min_delay) -			new_delay = dev_priv->min_delay; +		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) +			new_delay = dev_priv->ips.cur_delay + 1; +		if (new_delay > dev_priv->ips.min_delay) +			new_delay = dev_priv->ips.min_delay;  	}  	if (ironlake_set_drps(dev, new_delay)) -		dev_priv->cur_delay = new_delay; +		dev_priv->ips.cur_delay = new_delay;  	spin_unlock_irqrestore(&mchdev_lock, flags); @@ -853,6 +853,35 @@ static void i915_error_work_func(struct work_struct *work)  	}  } +/* NB: please notice the memset */ +static void i915_get_extra_instdone(struct drm_device *dev, +				    uint32_t *instdone) +{ +	struct drm_i915_private *dev_priv = dev->dev_private; +	memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); + +	switch(INTEL_INFO(dev)->gen) { +	case 2: +	case 3: +		instdone[0] = I915_READ(INSTDONE); +		break; +	case 4: +	case 5: +	case 6: +		instdone[0] = I915_READ(INSTDONE_I965); +		instdone[1] = I915_READ(INSTDONE1); +		break; +	default: +		WARN_ONCE(1, "Unsupported platform\n"); +	case 7: +		instdone[0] = I915_READ(GEN7_INSTDONE_1); +		instdone[1] = I915_READ(GEN7_SC_INSTDONE); +		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); +		instdone[3] = I915_READ(GEN7_ROW_INSTDONE); +		break; +	} +} +  #ifdef CONFIG_DEBUG_FS  static struct drm_i915_error_object *  i915_error_object_create(struct drm_i915_private *dev_priv, @@ -1091,10 +1120,8 @@ static void i915_record_ring_state(struct drm_device *dev,  		error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));  		error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));  		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); -		if (ring->id == RCS) { -			error->instdone1 = I915_READ(INSTDONE1); +		if (ring->id == RCS)  			error->bbaddr = I915_READ64(BB_ADDR); -		}  	} else {  		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);  		error->ipeir[ring->id] = I915_READ(IPEIR); @@ -1210,6 +1237,11 @@ static void i915_capture_error_state(struct drm_device *dev)  		error->done_reg = I915_READ(DONE_REG);  	} +	if (INTEL_INFO(dev)->gen == 7) +		error->err_int = I915_READ(GEN7_ERR_INT); + +	i915_get_extra_instdone(dev, error->extra_instdone); +  	i915_gem_record_fences(dev, error);  	i915_gem_record_rings(dev, error); @@ -1221,7 +1253,7 @@ static void i915_capture_error_state(struct drm_device *dev)  	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)  		i++;  	error->active_bo_count = i; -	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) +	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)  		if (obj->pin_count)  			i++;  	error->pinned_bo_count = i - error->active_bo_count; @@ -1246,7 +1278,7 @@ static void i915_capture_error_state(struct drm_device *dev)  		error->pinned_bo_count =  			capture_pinned_bo(error->pinned_bo,  					  error->pinned_bo_count, -					  &dev_priv->mm.gtt_list); +					  &dev_priv->mm.bound_list);  	do_gettimeofday(&error->time); @@ -1285,24 +1317,26 @@ void i915_destroy_error_state(struct drm_device *dev)  static void i915_report_and_clear_eir(struct drm_device *dev)  {  	struct drm_i915_private *dev_priv = dev->dev_private; +	uint32_t instdone[I915_NUM_INSTDONE_REG];  	u32 eir = I915_READ(EIR); -	int pipe; +	int pipe, i;  	if (!eir)  		return;  	pr_err("render error detected, EIR: 0x%08x\n", eir); +	i915_get_extra_instdone(dev, instdone); +  	if (IS_G4X(dev)) {  		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {  			u32 ipeir = I915_READ(IPEIR_I965);  			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));  			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); -			pr_err("  INSTDONE: 0x%08x\n", -			       I915_READ(INSTDONE_I965)); +			for (i = 0; i < ARRAY_SIZE(instdone); i++) +				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);  			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS)); -			pr_err("  INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));  			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));  			I915_WRITE(IPEIR_I965, ipeir);  			POSTING_READ(IPEIR_I965); @@ -1336,12 +1370,13 @@ static void i915_report_and_clear_eir(struct drm_device *dev)  	if (eir & I915_ERROR_INSTRUCTION) {  		pr_err("instruction error\n");  		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM)); +		for (i = 0; i < ARRAY_SIZE(instdone); i++) +			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);  		if (INTEL_INFO(dev)->gen < 4) {  			u32 ipeir = I915_READ(IPEIR);  			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));  			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR)); -			pr_err("  INSTDONE: 0x%08x\n", I915_READ(INSTDONE));  			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));  			I915_WRITE(IPEIR, ipeir);  			POSTING_READ(IPEIR); @@ -1350,10 +1385,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)  			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));  			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); -			pr_err("  INSTDONE: 0x%08x\n", -			       I915_READ(INSTDONE_I965));  			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS)); -			pr_err("  INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));  			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));  			I915_WRITE(IPEIR_I965, ipeir);  			POSTING_READ(IPEIR_I965); @@ -1668,7 +1700,7 @@ void i915_hangcheck_elapsed(unsigned long data)  {  	struct drm_device *dev = (struct drm_device *)data;  	drm_i915_private_t *dev_priv = dev->dev_private; -	uint32_t acthd[I915_NUM_RINGS], instdone, instdone1; +	uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];  	struct intel_ring_buffer *ring;  	bool err = false, idle;  	int i; @@ -1696,25 +1728,16 @@ void i915_hangcheck_elapsed(unsigned long data)  		return;  	} -	if (INTEL_INFO(dev)->gen < 4) { -		instdone = I915_READ(INSTDONE); -		instdone1 = 0; -	} else { -		instdone = I915_READ(INSTDONE_I965); -		instdone1 = I915_READ(INSTDONE1); -	} - +	i915_get_extra_instdone(dev, instdone);  	if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 && -	    dev_priv->last_instdone == instdone && -	    dev_priv->last_instdone1 == instdone1) { +	    memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) {  		if (i915_hangcheck_hung(dev))  			return;  	} else {  		dev_priv->hangcheck_count = 0;  		memcpy(dev_priv->last_acthd, acthd, sizeof(acthd)); -		dev_priv->last_instdone = instdone; -		dev_priv->last_instdone1 = instdone1; +		memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone));  	}  repeat: diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 1e5f77a4a1e..a828e90602b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -115,6 +115,7 @@  #define GEN6_PTE_VALID			(1 << 0)  #define GEN6_PTE_UNCACHED		(1 << 1) +#define HSW_PTE_UNCACHED		(0)  #define GEN6_PTE_CACHE_LLC		(2 << 1)  #define GEN6_PTE_CACHE_LLC_MLC		(3 << 1)  #define GEN6_PTE_CACHE_BITS		(3 << 1) @@ -478,6 +479,11 @@  #define IPEIR_I965	0x02064  #define IPEHR_I965	0x02068  #define INSTDONE_I965	0x0206c +#define GEN7_INSTDONE_1		0x0206c +#define GEN7_SC_INSTDONE	0x07100 +#define GEN7_SAMPLER_INSTDONE	0x0e160 +#define GEN7_ROW_INSTDONE	0x0e164 +#define I915_NUM_INSTDONE_REG	4  #define RING_IPEIR(base)	((base)+0x64)  #define RING_IPEHR(base)	((base)+0x68)  #define RING_INSTDONE(base)	((base)+0x6c) @@ -500,6 +506,8 @@  #define DMA_FADD_I8XX	0x020d0  #define ERROR_GEN6	0x040a0 +#define GEN7_ERR_INT	0x44040 +#define   ERR_INT_MMIO_UNCLAIMED (1<<13)  /* GM45+ chicken bits -- debug workaround bits that may be required   * for various sorts of correct behavior.  The top 16 bits of each are diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index c5ee7ee3b17..da733a3fe1e 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -93,6 +93,7 @@ static struct attribute_group rc6_attr_group = {  	.name = power_group_name,  	.attrs =  rc6_attrs  }; +#endif  static int l3_access_valid(struct drm_device *dev, loff_t offset)  { @@ -206,13 +207,14 @@ void i915_setup_sysfs(struct drm_device *dev)  {  	int ret; +#ifdef CONFIG_PM  	if (INTEL_INFO(dev)->gen >= 6) {  		ret = sysfs_merge_group(&dev->primary->kdev.kobj,  					&rc6_attr_group);  		if (ret)  			DRM_ERROR("RC6 residency sysfs setup failed\n");  	} - +#endif  	if (HAS_L3_GPU_CACHE(dev)) {  		ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);  		if (ret) @@ -225,14 +227,3 @@ void i915_teardown_sysfs(struct drm_device *dev)  	device_remove_bin_file(&dev->primary->kdev,  &dpf_attrs);  	sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);  } -#else -void i915_setup_sysfs(struct drm_device *dev) -{ -	return; -} - -void i915_teardown_sysfs(struct drm_device *dev) -{ -	return; -} -#endif /* CONFIG_PM */ diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index fe90b3a84a6..8134421b89a 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -214,22 +214,18 @@ TRACE_EVENT(i915_gem_evict,  );  TRACE_EVENT(i915_gem_evict_everything, -	    TP_PROTO(struct drm_device *dev, bool purgeable), -	    TP_ARGS(dev, purgeable), +	    TP_PROTO(struct drm_device *dev), +	    TP_ARGS(dev),  	    TP_STRUCT__entry(  			     __field(u32, dev) -			     __field(bool, purgeable)  			    ),  	    TP_fast_assign(  			   __entry->dev = dev->primary->index; -			   __entry->purgeable = purgeable;  			  ), -	    TP_printk("dev=%d%s", -		      __entry->dev, -		      __entry->purgeable ? ", purgeable only" : "") +	    TP_printk("dev=%d", __entry->dev)  );  TRACE_EVENT(i915_gem_ring_dispatch, @@ -434,6 +430,21 @@ TRACE_EVENT(i915_reg_rw,  		(u32)(__entry->val >> 32))  ); +TRACE_EVENT(intel_gpu_freq_change, +	    TP_PROTO(u32 freq), +	    TP_ARGS(freq), + +	    TP_STRUCT__entry( +			     __field(u32, freq) +			     ), + +	    TP_fast_assign( +			   __entry->freq = freq; +			   ), + +	    TP_printk("new_freq=%u", __entry->freq) +); +  #endif /* _I915_TRACE_H_ */  /* This part must be outside protection */ diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index b9f08f66a4a..c42b9809f86 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -393,6 +393,36 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)  	return ret;  } +static struct edid *intel_crt_get_edid(struct drm_connector *connector, +				struct i2c_adapter *i2c) +{ +	struct edid *edid; + +	edid = drm_get_edid(connector, i2c); + +	if (!edid && !intel_gmbus_is_forced_bit(i2c)) { +		DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n"); +		intel_gmbus_force_bit(i2c, true); +		edid = drm_get_edid(connector, i2c); +		intel_gmbus_force_bit(i2c, false); +	} + +	return edid; +} + +/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */ +static int intel_crt_ddc_get_modes(struct drm_connector *connector, +				struct i2c_adapter *adapter) +{ +	struct edid *edid; + +	edid = intel_crt_get_edid(connector, adapter); +	if (!edid) +		return 0; + +	return intel_connector_update_modes(connector, edid); +} +  static bool intel_crt_detect_ddc(struct drm_connector *connector)  {  	struct intel_crt *crt = intel_attached_crt(connector); @@ -403,7 +433,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)  	BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);  	i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); -	edid = drm_get_edid(connector, i2c); +	edid = intel_crt_get_edid(connector, i2c);  	if (edid) {  		bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL; @@ -609,13 +639,13 @@ static int intel_crt_get_modes(struct drm_connector *connector)  	struct i2c_adapter *i2c;  	i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); -	ret = intel_ddc_get_modes(connector, i2c); +	ret = intel_crt_ddc_get_modes(connector, i2c);  	if (ret || !IS_G4X(dev))  		return ret;  	/* Try to probe digital port for output in DVI-I -> VGA mode. */  	i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB); -	return intel_ddc_get_modes(connector, i2c); +	return intel_crt_ddc_get_modes(connector, i2c);  }  static int intel_crt_set_property(struct drm_connector *connector, diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 44318bf8e7b..e061acdde45 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3671,6 +3671,13 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,  	if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))  		drm_mode_set_crtcinfo(adjusted_mode, 0); +	/* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes +	 * with a hsync front porch of 0. +	 */ +	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && +		adjusted_mode->hsync_start == adjusted_mode->hdisplay) +		return false; +  	return true;  } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d391e67231b..c59710db653 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -850,10 +850,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,  	 * supposed to be read-only.  	 */  	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; -	intel_dp->DP |=  DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;  	/* Handle DP bits in common between all three register formats */ -  	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;  	switch (intel_dp->lane_count) { @@ -2190,7 +2188,6 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada  		ret = drm_add_edid_modes(connector, intel_dp->edid);  		drm_edid_to_eld(connector,  				intel_dp->edid); -		connector->display_info.raw_edid = NULL;  		return intel_dp->edid_mode_count;  	} @@ -2236,7 +2233,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)  		edid = intel_dp_get_edid(connector, &intel_dp->adapter);  		if (edid) {  			intel_dp->has_audio = drm_detect_monitor_audio(edid); -			connector->display_info.raw_edid = NULL;  			kfree(edid);  		}  	} @@ -2301,8 +2297,6 @@ intel_dp_detect_audio(struct drm_connector *connector)  	edid = intel_dp_get_edid(connector, &intel_dp->adapter);  	if (edid) {  		has_audio = drm_detect_monitor_audio(edid); - -		connector->display_info.raw_edid = NULL;  		kfree(edid);  	} diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 19e69285ba3..4f2b2d6a248 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -41,7 +41,11 @@  			ret__ = -ETIMEDOUT;				\  			break;						\  		}							\ -		if (W && drm_can_sleep()) msleep(W);	\ +		if (W && drm_can_sleep())  {				\ +			msleep(W);					\ +		} else {						\ +			cpu_relax();					\ +		}							\  	}								\  	ret__;								\  }) @@ -387,6 +391,8 @@ struct intel_fbc_work {  	int interval;  }; +int intel_connector_update_modes(struct drm_connector *connector, +				struct edid *edid);  int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);  extern void intel_attach_force_audio_property(struct drm_connector *connector); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 7acf2d91af3..5d02aad0de8 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -794,7 +794,6 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)  						drm_detect_hdmi_monitor(edid);  			intel_hdmi->has_audio = drm_detect_monitor_audio(edid);  		} -		connector->display_info.raw_edid = NULL;  		kfree(edid);  	} @@ -835,8 +834,6 @@ intel_hdmi_detect_audio(struct drm_connector *connector)  	if (edid) {  		if (edid->input & DRM_EDID_INPUT_DIGITAL)  			has_audio = drm_detect_monitor_audio(edid); - -		connector->display_info.raw_edid = NULL;  		kfree(edid);  	} diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index 45848b9b670..4bc1c0fc342 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c @@ -33,6 +33,24 @@  #include "i915_drv.h"  /** + * intel_connector_update_modes - update connector from edid + * @connector: DRM connector device to use + * @edid: previously read EDID information + */ +int intel_connector_update_modes(struct drm_connector *connector, +				struct edid *edid) +{ +	int ret; + +	drm_mode_connector_update_edid_property(connector, edid); +	ret = drm_add_edid_modes(connector, edid); +	drm_edid_to_eld(connector, edid); +	kfree(edid); + +	return ret; +} + +/**   * intel_ddc_get_modes - get modelist from monitor   * @connector: DRM connector device to use   * @adapter: i2c adapter @@ -43,18 +61,12 @@ int intel_ddc_get_modes(struct drm_connector *connector,  			struct i2c_adapter *adapter)  {  	struct edid *edid; -	int ret = 0;  	edid = drm_get_edid(connector, adapter); -	if (edid) { -		drm_mode_connector_update_edid_property(connector, edid); -		ret = drm_add_edid_modes(connector, edid); -		drm_edid_to_eld(connector, edid); -		connector->display_info.raw_edid = NULL; -		kfree(edid); -	} +	if (!edid) +		return 0; -	return ret; +	return intel_connector_update_modes(connector, edid);  }  static const struct drm_prop_enum_list force_audio_names[] = { diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index c0f48580405..afd0f30ab88 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -1383,7 +1383,7 @@ void intel_setup_overlay(struct drm_device *dev)  		}  		overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;  	} else { -		ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true); +		ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false);  		if (ret) {  			DRM_ERROR("failed to pin overlay register bo\n");  			goto out_free_bo; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index c0407aa5baa..36c64091bc9 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -31,6 +31,8 @@  #include "../../../platform/x86/intel_ips.h"  #include <linux/module.h> +#define FORCEWAKE_ACK_TIMEOUT_MS 2 +  /* FBC, or Frame Buffer Compression, is a technique employed to compress the   * framebuffer contents in-memory, aiming at reducing the required bandwidth   * during in-memory transfers and, therefore, reduce the power packet. @@ -593,7 +595,7 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev)  		break;  	} -	dev_priv->r_t = dev_priv->mem_freq; +	dev_priv->ips.r_t = dev_priv->mem_freq;  	switch (csipll & 0x3ff) {  	case 0x00c: @@ -625,11 +627,11 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev)  	}  	if (dev_priv->fsb_freq == 3200) { -		dev_priv->c_m = 0; +		dev_priv->ips.c_m = 0;  	} else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { -		dev_priv->c_m = 1; +		dev_priv->ips.c_m = 1;  	} else { -		dev_priv->c_m = 2; +		dev_priv->ips.c_m = 2;  	}  } @@ -2138,7 +2140,7 @@ intel_alloc_context_page(struct drm_device *dev)  		return NULL;  	} -	ret = i915_gem_object_pin(ctx, 4096, true); +	ret = i915_gem_object_pin(ctx, 4096, true, false);  	if (ret) {  		DRM_ERROR("failed to pin power context: %d\n", ret);  		goto err_unref; @@ -2162,12 +2164,6 @@ err_unref:  /**   * Lock protecting IPS related data structures - *   - i915_mch_dev - *   - dev_priv->max_delay - *   - dev_priv->min_delay - *   - dev_priv->fmax - *   - dev_priv->gpu_busy - *   - dev_priv->gfx_power   */  DEFINE_SPINLOCK(mchdev_lock); @@ -2230,12 +2226,12 @@ static void ironlake_enable_drps(struct drm_device *dev)  	vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>  		PXVFREQ_PX_SHIFT; -	dev_priv->fmax = fmax; /* IPS callback will increase this */ -	dev_priv->fstart = fstart; +	dev_priv->ips.fmax = fmax; /* IPS callback will increase this */ +	dev_priv->ips.fstart = fstart; -	dev_priv->max_delay = fstart; -	dev_priv->min_delay = fmin; -	dev_priv->cur_delay = fstart; +	dev_priv->ips.max_delay = fstart; +	dev_priv->ips.min_delay = fmin; +	dev_priv->ips.cur_delay = fstart;  	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",  			 fmax, fmin, fstart); @@ -2258,11 +2254,11 @@ static void ironlake_enable_drps(struct drm_device *dev)  	ironlake_set_drps(dev, fstart); -	dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + +	dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +  		I915_READ(0x112e0); -	dev_priv->last_time1 = jiffies_to_msecs(jiffies); -	dev_priv->last_count2 = I915_READ(0x112f4); -	getrawmonotonic(&dev_priv->last_time2); +	dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies); +	dev_priv->ips.last_count2 = I915_READ(0x112f4); +	getrawmonotonic(&dev_priv->ips.last_time2);  	spin_unlock_irq(&mchdev_lock);  } @@ -2284,7 +2280,7 @@ static void ironlake_disable_drps(struct drm_device *dev)  	I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);  	/* Go back to the starting frequency */ -	ironlake_set_drps(dev, dev_priv->fstart); +	ironlake_set_drps(dev, dev_priv->ips.fstart);  	mdelay(1);  	rgvswctl |= MEMCTL_CMD_STS;  	I915_WRITE(MEMSWCTL, rgvswctl); @@ -2343,6 +2339,8 @@ void gen6_set_rps(struct drm_device *dev, u8 val)  	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);  	dev_priv->rps.cur_delay = val; + +	trace_intel_gpu_freq_change(val * 50);  }  static void gen6_disable_rps(struct drm_device *dev) @@ -2372,6 +2370,11 @@ int intel_enable_rc6(const struct drm_device *dev)  		return i915_enable_rc6;  	if (INTEL_INFO(dev)->gen == 5) { +#ifdef CONFIG_INTEL_IOMMU +		/* Disable rc6 on ilk if VT-d is on. */ +		if (intel_iommu_gfx_mapped) +			return false; +#endif  		DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n");  		return INTEL_RC6_ENABLE;  	} @@ -2482,17 +2485,10 @@ static void gen6_enable_rps(struct drm_device *dev)  		   dev_priv->rps.max_delay << 24 |  		   dev_priv->rps.min_delay << 16); -	if (IS_HASWELL(dev)) { -		I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); -		I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); -		I915_WRITE(GEN6_RP_UP_EI, 66000); -		I915_WRITE(GEN6_RP_DOWN_EI, 350000); -	} else { -		I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); -		I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); -		I915_WRITE(GEN6_RP_UP_EI, 100000); -		I915_WRITE(GEN6_RP_DOWN_EI, 5000000); -	} +	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); +	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); +	I915_WRITE(GEN6_RP_UP_EI, 66000); +	I915_WRITE(GEN6_RP_DOWN_EI, 350000);  	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);  	I915_WRITE(GEN6_RP_CONTROL, @@ -2743,7 +2739,7 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)  	assert_spin_locked(&mchdev_lock); -	diff1 = now - dev_priv->last_time1; +	diff1 = now - dev_priv->ips.last_time1;  	/* Prevent division-by-zero if we are asking too fast.  	 * Also, we don't get interesting results if we are polling @@ -2751,7 +2747,7 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)  	 * in such cases.  	 */  	if (diff1 <= 10) -		return dev_priv->chipset_power; +		return dev_priv->ips.chipset_power;  	count1 = I915_READ(DMIEC);  	count2 = I915_READ(DDREC); @@ -2760,16 +2756,16 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)  	total_count = count1 + count2 + count3;  	/* FIXME: handle per-counter overflow */ -	if (total_count < dev_priv->last_count1) { -		diff = ~0UL - dev_priv->last_count1; +	if (total_count < dev_priv->ips.last_count1) { +		diff = ~0UL - dev_priv->ips.last_count1;  		diff += total_count;  	} else { -		diff = total_count - dev_priv->last_count1; +		diff = total_count - dev_priv->ips.last_count1;  	}  	for (i = 0; i < ARRAY_SIZE(cparams); i++) { -		if (cparams[i].i == dev_priv->c_m && -		    cparams[i].t == dev_priv->r_t) { +		if (cparams[i].i == dev_priv->ips.c_m && +		    cparams[i].t == dev_priv->ips.r_t) {  			m = cparams[i].m;  			c = cparams[i].c;  			break; @@ -2780,10 +2776,10 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)  	ret = ((m * diff) + c);  	ret = div_u64(ret, 10); -	dev_priv->last_count1 = total_count; -	dev_priv->last_time1 = now; +	dev_priv->ips.last_count1 = total_count; +	dev_priv->ips.last_time1 = now; -	dev_priv->chipset_power = ret; +	dev_priv->ips.chipset_power = ret;  	return ret;  } @@ -2954,7 +2950,7 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)  	assert_spin_locked(&mchdev_lock);  	getrawmonotonic(&now); -	diff1 = timespec_sub(now, dev_priv->last_time2); +	diff1 = timespec_sub(now, dev_priv->ips.last_time2);  	/* Don't divide by 0 */  	diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; @@ -2963,20 +2959,20 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)  	count = I915_READ(GFXEC); -	if (count < dev_priv->last_count2) { -		diff = ~0UL - dev_priv->last_count2; +	if (count < dev_priv->ips.last_count2) { +		diff = ~0UL - dev_priv->ips.last_count2;  		diff += count;  	} else { -		diff = count - dev_priv->last_count2; +		diff = count - dev_priv->ips.last_count2;  	} -	dev_priv->last_count2 = count; -	dev_priv->last_time2 = now; +	dev_priv->ips.last_count2 = count; +	dev_priv->ips.last_time2 = now;  	/* More magic constants... */  	diff = diff * 1181;  	diff = div_u64(diff, diffms * 10); -	dev_priv->gfx_power = diff; +	dev_priv->ips.gfx_power = diff;  }  void i915_update_gfx_val(struct drm_i915_private *dev_priv) @@ -3018,14 +3014,14 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)  	corr = corr * ((150142 * state1) / 10000 - 78642);  	corr /= 100000; -	corr2 = (corr * dev_priv->corr); +	corr2 = (corr * dev_priv->ips.corr);  	state2 = (corr2 * state1) / 10000;  	state2 /= 100; /* convert to mW */  	__i915_update_gfx_val(dev_priv); -	return dev_priv->gfx_power + state2; +	return dev_priv->ips.gfx_power + state2;  }  /** @@ -3073,8 +3069,8 @@ bool i915_gpu_raise(void)  	}  	dev_priv = i915_mch_dev; -	if (dev_priv->max_delay > dev_priv->fmax) -		dev_priv->max_delay--; +	if (dev_priv->ips.max_delay > dev_priv->ips.fmax) +		dev_priv->ips.max_delay--;  out_unlock:  	spin_unlock_irq(&mchdev_lock); @@ -3101,8 +3097,8 @@ bool i915_gpu_lower(void)  	}  	dev_priv = i915_mch_dev; -	if (dev_priv->max_delay < dev_priv->min_delay) -		dev_priv->max_delay++; +	if (dev_priv->ips.max_delay < dev_priv->ips.min_delay) +		dev_priv->ips.max_delay++;  out_unlock:  	spin_unlock_irq(&mchdev_lock); @@ -3156,9 +3152,9 @@ bool i915_gpu_turbo_disable(void)  	}  	dev_priv = i915_mch_dev; -	dev_priv->max_delay = dev_priv->fstart; +	dev_priv->ips.max_delay = dev_priv->ips.fstart; -	if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) +	if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))  		ret = false;  out_unlock: @@ -3273,7 +3269,7 @@ static void intel_init_emon(struct drm_device *dev)  	lcfuse = I915_READ(LCFUSE02); -	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); +	dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);  }  void intel_disable_gt_powersave(struct drm_device *dev) @@ -3968,14 +3964,16 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)  	else  		forcewake_ack = FORCEWAKE_ACK; -	if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500)) -		DRM_ERROR("Force wake wait timed out\n"); +	if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, +			    FORCEWAKE_ACK_TIMEOUT_MS)) +		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");  	I915_WRITE_NOTRACE(FORCEWAKE, 1); -	POSTING_READ(FORCEWAKE); +	POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ -	if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) -		DRM_ERROR("Force wake wait timed out\n"); +	if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), +			    FORCEWAKE_ACK_TIMEOUT_MS)) +		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");  	__gen6_gt_wait_for_thread_c0(dev_priv);  } @@ -3989,14 +3987,16 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)  	else  		forcewake_ack = FORCEWAKE_MT_ACK; -	if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500)) -		DRM_ERROR("Force wake wait timed out\n"); +	if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, +			    FORCEWAKE_ACK_TIMEOUT_MS)) +		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");  	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); -	POSTING_READ(FORCEWAKE_MT); +	POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ -	if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) -		DRM_ERROR("Force wake wait timed out\n"); +	if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), +			    FORCEWAKE_ACK_TIMEOUT_MS)) +		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");  	__gen6_gt_wait_for_thread_c0(dev_priv);  } @@ -4029,14 +4029,14 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)  static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)  {  	I915_WRITE_NOTRACE(FORCEWAKE, 0); -	POSTING_READ(FORCEWAKE); +	/* gen6_gt_check_fifodbg doubles as the POSTING_READ */  	gen6_gt_check_fifodbg(dev_priv);  }  static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)  {  	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); -	POSTING_READ(FORCEWAKE_MT); +	/* gen6_gt_check_fifodbg doubles as the POSTING_READ */  	gen6_gt_check_fifodbg(dev_priv);  } @@ -4075,24 +4075,24 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)  static void vlv_force_wake_get(struct drm_i915_private *dev_priv)  { -	/* Already awake? */ -	if ((I915_READ(0x130094) & 0xa1) == 0xa1) -		return; +	if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0, +			    FORCEWAKE_ACK_TIMEOUT_MS)) +		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); -	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff); -	POSTING_READ(FORCEWAKE_VLV); +	I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1)); -	if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), 500)) -		DRM_ERROR("Force wake wait timed out\n"); +	if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), +			    FORCEWAKE_ACK_TIMEOUT_MS)) +		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");  	__gen6_gt_wait_for_thread_c0(dev_priv);  }  static void vlv_force_wake_put(struct drm_i915_private *dev_priv)  { -	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000); -	/* FIXME: confirm VLV behavior with Punit folks */ -	POSTING_READ(FORCEWAKE_VLV); +	I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1)); +	/* The below doubles as a POSTING_READ */ +	gen6_gt_check_fifodbg(dev_priv);  }  void intel_gt_init(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index c828169c73a..55cdb4d30a1 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -218,6 +218,11 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,  	u32 scratch_addr = pc->gtt_offset + 128;  	int ret; +	/* Force SNB workarounds for PIPE_CONTROL flushes */ +	ret = intel_emit_post_sync_nonzero_flush(ring); +	if (ret) +		return ret; +  	/* Just flush everything.  Experiments have shown that reducing the  	 * number of bits based on the write domains has little performance  	 * impact. @@ -258,17 +263,80 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,  }  static int -gen6_render_ring_flush__wa(struct intel_ring_buffer *ring, -			   u32 invalidate_domains, u32 flush_domains) +gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)  {  	int ret; -	/* Force SNB workarounds for PIPE_CONTROL flushes */ -	ret = intel_emit_post_sync_nonzero_flush(ring); +	ret = intel_ring_begin(ring, 4);  	if (ret)  		return ret; -	return gen6_render_ring_flush(ring, invalidate_domains, flush_domains); +	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); +	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | +			      PIPE_CONTROL_STALL_AT_SCOREBOARD); +	intel_ring_emit(ring, 0); +	intel_ring_emit(ring, 0); +	intel_ring_advance(ring); + +	return 0; +} + +static int +gen7_render_ring_flush(struct intel_ring_buffer *ring, +		       u32 invalidate_domains, u32 flush_domains) +{ +	u32 flags = 0; +	struct pipe_control *pc = ring->private; +	u32 scratch_addr = pc->gtt_offset + 128; +	int ret; + +	/* +	 * Ensure that any following seqno writes only happen when the render +	 * cache is indeed flushed. +	 * +	 * Workaround: 4th PIPE_CONTROL command (except the ones with only +	 * read-cache invalidate bits set) must have the CS_STALL bit set. We +	 * don't try to be clever and just set it unconditionally. +	 */ +	flags |= PIPE_CONTROL_CS_STALL; + +	/* Just flush everything.  Experiments have shown that reducing the +	 * number of bits based on the write domains has little performance +	 * impact. +	 */ +	if (flush_domains) { +		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; +		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; +	} +	if (invalidate_domains) { +		flags |= PIPE_CONTROL_TLB_INVALIDATE; +		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; +		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; +		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; +		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; +		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; +		/* +		 * TLB invalidate requires a post-sync write. +		 */ +		flags |= PIPE_CONTROL_QW_WRITE; + +		/* Workaround: we must issue a pipe_control with CS-stall bit +		 * set before a pipe_control command that has the state cache +		 * invalidate bit set. */ +		gen7_render_ring_cs_stall_wa(ring); +	} + +	ret = intel_ring_begin(ring, 4); +	if (ret) +		return ret; + +	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); +	intel_ring_emit(ring, flags); +	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); +	intel_ring_emit(ring, 0); +	intel_ring_advance(ring); + +	return 0;  }  static void ring_write_tail(struct intel_ring_buffer *ring, @@ -391,7 +459,7 @@ init_pipe_control(struct intel_ring_buffer *ring)  	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); -	ret = i915_gem_object_pin(obj, 4096, true); +	ret = i915_gem_object_pin(obj, 4096, true, false);  	if (ret)  		goto err_unref; @@ -979,7 +1047,7 @@ static int init_status_page(struct intel_ring_buffer *ring)  	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); -	ret = i915_gem_object_pin(obj, 4096, true); +	ret = i915_gem_object_pin(obj, 4096, true, false);  	if (ret != 0) {  		goto err_unref;  	} @@ -1036,7 +1104,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,  	ring->obj = obj; -	ret = i915_gem_object_pin(obj, PAGE_SIZE, true); +	ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);  	if (ret)  		goto err_unref; @@ -1385,9 +1453,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)  	if (INTEL_INFO(dev)->gen >= 6) {  		ring->add_request = gen6_add_request; -		ring->flush = gen6_render_ring_flush; +		ring->flush = gen7_render_ring_flush;  		if (INTEL_INFO(dev)->gen == 6) -			ring->flush = gen6_render_ring_flush__wa; +			ring->flush = gen6_render_ring_flush;  		ring->irq_get = gen6_ring_get_irq;  		ring->irq_put = gen6_ring_put_irq;  		ring->irq_enable_mask = GT_USER_INTERRUPT; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 07d39212dca..39c319827f9 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -97,7 +97,7 @@ struct intel_sdvo {  	/*  	 * Hotplug activation bits for this device  	 */ -	uint8_t hotplug_active[2]; +	uint16_t hotplug_active;  	/**  	 * This is used to select the color range of RBG outputs in HDMI mode. @@ -1340,25 +1340,29 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in  	return true;  } -static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo) +static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)  {  	struct drm_device *dev = intel_sdvo->base.base.dev; -	u8 response[2]; +	uint16_t hotplug;  	/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise  	 * on the line. */  	if (IS_I945G(dev) || IS_I945GM(dev)) -		return false; +		return 0; + +	if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, +					&hotplug, sizeof(hotplug))) +		return 0; -	return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, -				    &response, 2) && response[0]; +	return hotplug;  }  static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)  {  	struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); -	intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2); +	intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, +			&intel_sdvo->hotplug_active, 2);  }  static bool @@ -1434,7 +1438,6 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)  			}  		} else  			status = connector_status_disconnected; -		connector->display_info.raw_edid = NULL;  		kfree(edid);  	} @@ -1508,7 +1511,6 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)  			else  				ret = connector_status_disconnected; -			connector->display_info.raw_edid = NULL;  			kfree(edid);  		} else  			ret = connector_status_connected; @@ -1554,7 +1556,6 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)  			drm_add_edid_modes(connector, edid);  		} -		connector->display_info.raw_edid = NULL;  		kfree(edid);  	}  } @@ -1781,6 +1782,7 @@ static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)  	edid = intel_sdvo_get_edid(connector);  	if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)  		has_audio = drm_detect_monitor_audio(edid); +	kfree(edid);  	return has_audio;  } @@ -2151,17 +2153,18 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)  	intel_connector = &intel_sdvo_connector->base;  	connector = &intel_connector->base; -	if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) { +	if (intel_sdvo_get_hotplug_support(intel_sdvo) & +		intel_sdvo_connector->output_flag) {  		connector->polled = DRM_CONNECTOR_POLL_HPD; -		intel_sdvo->hotplug_active[0] |= 1 << device; +		intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;  		/* Some SDVO devices have one-shot hotplug interrupts.  		 * Ensure that they get re-enabled when an interrupt happens.  		 */  		intel_encoder->hot_plug = intel_sdvo_enable_hotplug;  		intel_sdvo_enable_hotplug(intel_encoder); -	} -	else +	} else {  		connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; +	}  	encoder->encoder_type = DRM_MODE_ENCODER_TMDS;  	connector->connector_type = DRM_MODE_CONNECTOR_DVID; @@ -2660,7 +2663,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)  		hotplug_mask = intel_sdvo->is_sdvob ?  			SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;  	} -	dev_priv->hotplug_supported_mask |= hotplug_mask;  	drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs); @@ -2672,14 +2674,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)  	if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))  		goto err; -	/* Set up hotplug command - note paranoia about contents of reply. -	 * We assume that the hardware is in a sane state, and only touch -	 * the bits we think we understand. -	 */ -	intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, -			     &intel_sdvo->hotplug_active, 2); -	intel_sdvo->hotplug_active[0] &= ~0x3; -  	if (intel_sdvo_output_setup(intel_sdvo,  				    intel_sdvo->caps.output_flags) != true) {  		DRM_DEBUG_KMS("SDVO output failed to setup on %s\n", @@ -2687,6 +2681,12 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)  		goto err;  	} +	/* Only enable the hotplug irq if we need it, to work around noisy +	 * hotplug lines. +	 */ +	if (intel_sdvo->hotplug_active) +		dev_priv->hotplug_supported_mask |= hotplug_mask; +  	intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);  	/* Set the input timing to the screen. Assume always input 0. */ diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index 6f13b356323..d22cbbf3a20 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -195,7 +195,6 @@ struct mga_device {  		struct drm_global_reference mem_global_ref;  		struct ttm_bo_global_ref bo_global_ref;  		struct ttm_bo_device bdev; -		atomic_t validate_sequence;  	} ttm;  	u32 reg_1e24; /* SE model number */ diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index b69642d5d85..c7420e83c0b 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1399,7 +1399,6 @@ static int mga_vga_get_modes(struct drm_connector *connector)  	if (edid) {  		drm_mode_connector_update_edid_property(connector, edid);  		ret = drm_add_edid_modes(connector, edid); -		connector->display_info.raw_edid = NULL;  		kfree(edid);  	}  	return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index fc841e87b34..26ebffebe71 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -211,11 +211,6 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,  	return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);  } -static int nouveau_dsm_init(void) -{ -	return 0; -} -  static int nouveau_dsm_get_client_id(struct pci_dev *pdev)  {  	/* easy option one - intel vendor ID means Integrated */ @@ -232,7 +227,6 @@ static int nouveau_dsm_get_client_id(struct pci_dev *pdev)  static struct vga_switcheroo_handler nouveau_dsm_handler = {  	.switchto = nouveau_dsm_switchto,  	.power_state = nouveau_dsm_power_state, -	.init = nouveau_dsm_init,  	.get_client_id = nouveau_dsm_get_client_id,  }; diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index c6fcb5b86a4..f4d4505fe83 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -444,11 +444,28 @@ union atom_enable_ss {  static void atombios_crtc_program_ss(struct radeon_device *rdev,  				     int enable,  				     int pll_id, +				     int crtc_id,  				     struct radeon_atom_ss *ss)  { +	unsigned i;  	int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);  	union atom_enable_ss args; +	if (!enable) { +		for (i = 0; i < rdev->num_crtc; i++) { +			if (rdev->mode_info.crtcs[i] && +			    rdev->mode_info.crtcs[i]->enabled && +			    i != crtc_id && +			    pll_id == rdev->mode_info.crtcs[i]->pll_id) { +				/* one other crtc is using this pll don't turn +				 * off spread spectrum as it might turn off +				 * display on active crtc +				 */ +				return; +			} +		} +	} +  	memset(&args, 0, sizeof(args));  	if (ASIC_IS_DCE5(rdev)) { @@ -1028,7 +1045,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode  		radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,  					  &ref_div, &post_div); -	atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, &ss); +	atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);  	atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,  				  encoder_mode, radeon_encoder->encoder_id, mode->clock, @@ -1051,7 +1068,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode  			ss.step = step_size;  		} -		atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, &ss); +		atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);  	}  } @@ -1572,11 +1589,11 @@ void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev)  								   ASIC_INTERNAL_SS_ON_DCPLL,  								   rdev->clock.default_dispclk);  		if (ss_enabled) -			atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, &ss); +			atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, -1, &ss);  		/* XXX: DCE5, make sure voltage, dispclk is high enough */  		atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);  		if (ss_enabled) -			atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, &ss); +			atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, -1, &ss);  	}  } diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 3dab49cb1d4..ab74e6b149e 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -47,13 +47,17 @@ struct r600_cs_track {  	u32			npipes;  	/* value we track */  	u32			sq_config; +	u32			log_nsamples;  	u32			nsamples;  	u32			cb_color_base_last[8];  	struct radeon_bo	*cb_color_bo[8];  	u64			cb_color_bo_mc[8]; -	u32			cb_color_bo_offset[8]; -	struct radeon_bo	*cb_color_frag_bo[8]; /* unused */ -	struct radeon_bo	*cb_color_tile_bo[8]; /* unused */ +	u64			cb_color_bo_offset[8]; +	struct radeon_bo	*cb_color_frag_bo[8]; +	u64			cb_color_frag_offset[8]; +	struct radeon_bo	*cb_color_tile_bo[8]; +	u64			cb_color_tile_offset[8]; +	u32			cb_color_mask[8];  	u32			cb_color_info[8];  	u32			cb_color_view[8];  	u32			cb_color_size_idx[8]; /* unused */ @@ -349,10 +353,6 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)  	unsigned array_mode;  	u32 format; -	if (G_0280A0_TILE_MODE(track->cb_color_info[i])) { -		dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n"); -		return -EINVAL; -	}  	size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];  	format = G_0280A0_FORMAT(track->cb_color_info[i]);  	if (!r600_fmt_is_valid_color(format)) { @@ -420,7 +420,8 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)  	}  	/* check offset */ -	tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * r600_fmt_get_blocksize(format); +	tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * +	      r600_fmt_get_blocksize(format) * track->nsamples;  	switch (array_mode) {  	default:  	case V_0280A0_ARRAY_LINEAR_GENERAL: @@ -441,7 +442,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)  			 * broken userspace.  			 */  		} else { -			dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big (%d %d) (%d %d %d)\n", +			dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",  				 __func__, i, array_mode,  				 track->cb_color_bo_offset[i], tmp,  				 radeon_bo_size(track->cb_color_bo[i]), @@ -458,6 +459,51 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)  	tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |  		S_028060_SLICE_TILE_MAX(slice_tile_max - 1);  	ib[track->cb_color_size_idx[i]] = tmp; + +	/* FMASK/CMASK */ +	switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) { +	case V_0280A0_TILE_DISABLE: +		break; +	case V_0280A0_FRAG_ENABLE: +		if (track->nsamples > 1) { +			uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]); +			/* the tile size is 8x8, but the size is in units of bits. +			 * for bytes, do just * 8. */ +			uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1); + +			if (bytes + track->cb_color_frag_offset[i] > +			    radeon_bo_size(track->cb_color_frag_bo[i])) { +				dev_warn(p->dev, "%s FMASK_TILE_MAX too large " +					 "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n", +					 __func__, tile_max, bytes, +					 track->cb_color_frag_offset[i], +					 radeon_bo_size(track->cb_color_frag_bo[i])); +				return -EINVAL; +			} +		} +		/* fall through */ +	case V_0280A0_CLEAR_ENABLE: +	{ +		uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]); +		/* One block = 128x128 pixels, one 8x8 tile has 4 bits.. +		 * (128*128) / (8*8) / 2 = 128 bytes per block. */ +		uint32_t bytes = (block_max + 1) * 128; + +		if (bytes + track->cb_color_tile_offset[i] > +		    radeon_bo_size(track->cb_color_tile_bo[i])) { +			dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large " +				 "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n", +				 __func__, block_max, bytes, +				 track->cb_color_tile_offset[i], +				 radeon_bo_size(track->cb_color_tile_bo[i])); +			return -EINVAL; +		} +		break; +	} +	default: +		dev_warn(p->dev, "%s invalid tile mode\n", __func__); +		return -EINVAL; +	}  	return 0;  } @@ -566,7 +612,7 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)  		ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;  		nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; -		tmp = ntiles * bpe * 64 * nviews; +		tmp = ntiles * bpe * 64 * nviews * track->nsamples;  		if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {  			dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",  					array_mode, @@ -1231,6 +1277,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)  		break;  	case R_028C04_PA_SC_AA_CONFIG:  		tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx)); +		track->log_nsamples = tmp;  		track->nsamples = 1 << tmp;  		track->cb_dirty = true;  		break; @@ -1312,16 +1359,21 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)  				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);  				return -EINVAL;  			} -			ib[idx] = track->cb_color_base_last[tmp];  			track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp]; +			track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp]; +			ib[idx] = track->cb_color_base_last[tmp];  		} else {  			r = r600_cs_packet_next_reloc(p, &reloc);  			if (r) {  				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);  				return -EINVAL;  			} -			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);  			track->cb_color_frag_bo[tmp] = reloc->robj; +			track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8; +			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); +		} +		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { +			track->cb_dirty = true;  		}  		break;  	case R_0280C0_CB_COLOR0_TILE: @@ -1338,16 +1390,35 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)  				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);  				return -EINVAL;  			} -			ib[idx] = track->cb_color_base_last[tmp];  			track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp]; +			track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp]; +			ib[idx] = track->cb_color_base_last[tmp];  		} else {  			r = r600_cs_packet_next_reloc(p, &reloc);  			if (r) {  				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);  				return -EINVAL;  			} -			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);  			track->cb_color_tile_bo[tmp] = reloc->robj; +			track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8; +			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); +		} +		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { +			track->cb_dirty = true; +		} +		break; +	case R_028100_CB_COLOR0_MASK: +	case R_028104_CB_COLOR1_MASK: +	case R_028108_CB_COLOR2_MASK: +	case R_02810C_CB_COLOR3_MASK: +	case R_028110_CB_COLOR4_MASK: +	case R_028114_CB_COLOR5_MASK: +	case R_028118_CB_COLOR6_MASK: +	case R_02811C_CB_COLOR7_MASK: +		tmp = (reg - R_028100_CB_COLOR0_MASK) / 4; +		track->cb_color_mask[tmp] = ib[idx]; +		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { +			track->cb_dirty = true;  		}  		break;  	case CB_COLOR0_BASE: @@ -1492,7 +1563,7 @@ unsigned r600_mip_minify(unsigned size, unsigned level)  }  static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, -			      unsigned w0, unsigned h0, unsigned d0, unsigned format, +			      unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format,  			      unsigned block_align, unsigned height_align, unsigned base_align,  			      unsigned *l0_size, unsigned *mipmap_size)  { @@ -1520,7 +1591,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,  		depth = r600_mip_minify(d0, i); -		size = nbx * nby * blocksize; +		size = nbx * nby * blocksize * nsamples;  		if (nfaces)  			size *= nfaces;  		else @@ -1672,7 +1743,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,  		nfaces = larray - barray + 1;  	} -	r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, format, +	r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format,  			  pitch_align, height_align, base_align,  			  &l0_size, &mipmap_size);  	/* using get ib will give us the offset into the texture bo */ diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index fd328f4c3ea..bdb69a63062 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -92,6 +92,20 @@  #define R_028094_CB_COLOR5_VIEW                      0x028094  #define R_028098_CB_COLOR6_VIEW                      0x028098  #define R_02809C_CB_COLOR7_VIEW                      0x02809C +#define R_028100_CB_COLOR0_MASK                      0x028100 +#define   S_028100_CMASK_BLOCK_MAX(x)                  (((x) & 0xFFF) << 0) +#define   G_028100_CMASK_BLOCK_MAX(x)                  (((x) >> 0) & 0xFFF) +#define   C_028100_CMASK_BLOCK_MAX                     0xFFFFF000 +#define   S_028100_FMASK_TILE_MAX(x)                   (((x) & 0xFFFFF) << 12) +#define   G_028100_FMASK_TILE_MAX(x)                   (((x) >> 12) & 0xFFFFF) +#define   C_028100_FMASK_TILE_MAX                      0x00000FFF +#define R_028104_CB_COLOR1_MASK                      0x028104 +#define R_028108_CB_COLOR2_MASK                      0x028108 +#define R_02810C_CB_COLOR3_MASK                      0x02810C +#define R_028110_CB_COLOR4_MASK                      0x028110 +#define R_028114_CB_COLOR5_MASK                      0x028114 +#define R_028118_CB_COLOR6_MASK                      0x028118 +#define R_02811C_CB_COLOR7_MASK                      0x02811C  #define CB_COLOR0_INFO                                  0x280a0  #	define CB_FORMAT(x)				((x) << 2)  #       define CB_ARRAY_MODE(x)                         ((x) << 8) @@ -1400,6 +1414,9 @@  #define   S_0280A0_TILE_MODE(x)                        (((x) & 0x3) << 18)  #define   G_0280A0_TILE_MODE(x)                        (((x) >> 18) & 0x3)  #define   C_0280A0_TILE_MODE                           0xFFF3FFFF +#define     V_0280A0_TILE_DISABLE			0 +#define     V_0280A0_CLEAR_ENABLE			1 +#define     V_0280A0_FRAG_ENABLE			2  #define   S_0280A0_BLEND_CLAMP(x)                      (((x) & 0x1) << 20)  #define   G_0280A0_BLEND_CLAMP(x)                      (((x) >> 20) & 0x1)  #define   C_0280A0_BLEND_CLAMP                         0xFFEFFFFF diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 99304194a65..59a15315ae9 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -142,21 +142,6 @@ struct radeon_device;  /*   * BIOS.   */ -#define ATRM_BIOS_PAGE 4096 - -#if defined(CONFIG_VGA_SWITCHEROO) -bool radeon_atrm_supported(struct pci_dev *pdev); -int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len); -#else -static inline bool radeon_atrm_supported(struct pci_dev *pdev) -{ -	return false; -} - -static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){ -	return -EINVAL; -} -#endif  bool radeon_get_bios(struct radeon_device *rdev);  /* diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index f9c21f9d16b..d67d4f3eb6f 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -452,7 +452,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,  	}  	/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */ -	if ((dev->pdev->device == 0x9802) && +	if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&  	    (dev->pdev->subsystem_vendor == 0x1734) &&  	    (dev->pdev->subsystem_device == 0x11bd)) {  		if (*connector_type == DRM_MODE_CONNECTOR_VGA) { diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 98724fcb008..2a2cf0b88a2 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -30,57 +30,8 @@ static struct radeon_atpx_priv {  	/* handle for device - and atpx */  	acpi_handle dhandle;  	acpi_handle atpx_handle; -	acpi_handle atrm_handle;  } radeon_atpx_priv; -/* retrieve the ROM in 4k blocks */ -static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios, -			    int offset, int len) -{ -	acpi_status status; -	union acpi_object atrm_arg_elements[2], *obj; -	struct acpi_object_list atrm_arg; -	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; - -	atrm_arg.count = 2; -	atrm_arg.pointer = &atrm_arg_elements[0]; - -	atrm_arg_elements[0].type = ACPI_TYPE_INTEGER; -	atrm_arg_elements[0].integer.value = offset; - -	atrm_arg_elements[1].type = ACPI_TYPE_INTEGER; -	atrm_arg_elements[1].integer.value = len; - -	status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer); -	if (ACPI_FAILURE(status)) { -		printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status)); -		return -ENODEV; -	} - -	obj = (union acpi_object *)buffer.pointer; -	memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length); -	len = obj->buffer.length; -	kfree(buffer.pointer); -	return len; -} - -bool radeon_atrm_supported(struct pci_dev *pdev) -{ -	/* get the discrete ROM only via ATRM */ -	if (!radeon_atpx_priv.atpx_detected) -		return false; - -	if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) -		return false; -	return true; -} - - -int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len) -{ -	return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len); -} -  static int radeon_atpx_get_version(acpi_handle handle)  {  	acpi_status status; @@ -198,7 +149,7 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,  static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)  { -	acpi_handle dhandle, atpx_handle, atrm_handle; +	acpi_handle dhandle, atpx_handle;  	acpi_status status;  	dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); @@ -209,13 +160,8 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)  	if (ACPI_FAILURE(status))  		return false; -	status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); -	if (ACPI_FAILURE(status)) -		return false; -  	radeon_atpx_priv.dhandle = dhandle;  	radeon_atpx_priv.atpx_handle = atpx_handle; -	radeon_atpx_priv.atrm_handle = atrm_handle;  	return true;  } diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 501f4881e5a..d306cc8fdea 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -32,6 +32,7 @@  #include <linux/vga_switcheroo.h>  #include <linux/slab.h> +#include <linux/acpi.h>  /*   * BIOS.   */ @@ -98,16 +99,81 @@ static bool radeon_read_bios(struct radeon_device *rdev)  	return true;  } +#ifdef CONFIG_ACPI  /* ATRM is used to get the BIOS on the discrete cards in   * dual-gpu systems.   */ +/* retrieve the ROM in 4k blocks */ +#define ATRM_BIOS_PAGE 4096 +/** + * radeon_atrm_call - fetch a chunk of the vbios + * + * @atrm_handle: acpi ATRM handle + * @bios: vbios image pointer + * @offset: offset of vbios image data to fetch + * @len: length of vbios image data to fetch + * + * Executes ATRM to fetch a chunk of the discrete + * vbios image on PX systems (all asics). + * Returns the length of the buffer fetched. + */ +static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios, +			    int offset, int len) +{ +	acpi_status status; +	union acpi_object atrm_arg_elements[2], *obj; +	struct acpi_object_list atrm_arg; +	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; + +	atrm_arg.count = 2; +	atrm_arg.pointer = &atrm_arg_elements[0]; + +	atrm_arg_elements[0].type = ACPI_TYPE_INTEGER; +	atrm_arg_elements[0].integer.value = offset; + +	atrm_arg_elements[1].type = ACPI_TYPE_INTEGER; +	atrm_arg_elements[1].integer.value = len; + +	status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer); +	if (ACPI_FAILURE(status)) { +		printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status)); +		return -ENODEV; +	} + +	obj = (union acpi_object *)buffer.pointer; +	memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length); +	len = obj->buffer.length; +	kfree(buffer.pointer); +	return len; +} +  static bool radeon_atrm_get_bios(struct radeon_device *rdev)  {  	int ret;  	int size = 256 * 1024;  	int i; +	struct pci_dev *pdev = NULL; +	acpi_handle dhandle, atrm_handle; +	acpi_status status; +	bool found = false; + +	/* ATRM is for the discrete card only */ +	if (rdev->flags & RADEON_IS_IGP) +		return false; -	if (!radeon_atrm_supported(rdev->pdev)) +	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { +		dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); +		if (!dhandle) +			continue; + +		status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); +		if (!ACPI_FAILURE(status)) { +			found = true; +			break; +		} +	} + +	if (!found)  		return false;  	rdev->bios = kmalloc(size, GFP_KERNEL); @@ -117,9 +183,10 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)  	}  	for (i = 0; i < size / ATRM_BIOS_PAGE; i++) { -		ret = radeon_atrm_get_bios_chunk(rdev->bios, -						 (i * ATRM_BIOS_PAGE), -						 ATRM_BIOS_PAGE); +		ret = radeon_atrm_call(atrm_handle, +				       rdev->bios, +				       (i * ATRM_BIOS_PAGE), +				       ATRM_BIOS_PAGE);  		if (ret < ATRM_BIOS_PAGE)  			break;  	} @@ -130,6 +197,12 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)  	}  	return true;  } +#else +static inline bool radeon_atrm_get_bios(struct radeon_device *rdev) +{ +	return false; +} +#endif  static bool ni_read_disabled_bios(struct radeon_device *rdev)  { @@ -476,6 +549,61 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)  		return legacy_read_disabled_bios(rdev);  } +#ifdef CONFIG_ACPI +static bool radeon_acpi_vfct_bios(struct radeon_device *rdev) +{ +	bool ret = false; +	struct acpi_table_header *hdr; +	acpi_size tbl_size; +	UEFI_ACPI_VFCT *vfct; +	GOP_VBIOS_CONTENT *vbios; +	VFCT_IMAGE_HEADER *vhdr; + +	if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size))) +		return false; +	if (tbl_size < sizeof(UEFI_ACPI_VFCT)) { +		DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n"); +		goto out_unmap; +	} + +	vfct = (UEFI_ACPI_VFCT *)hdr; +	if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) { +		DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n"); +		goto out_unmap; +	} + +	vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset); +	vhdr = &vbios->VbiosHeader; +	DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n", +			vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction, +			vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength); + +	if (vhdr->PCIBus != rdev->pdev->bus->number || +	    vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) || +	    vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) || +	    vhdr->VendorID != rdev->pdev->vendor || +	    vhdr->DeviceID != rdev->pdev->device) { +		DRM_INFO("ACPI VFCT table is not for this card\n"); +		goto out_unmap; +	}; + +	if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) { +		DRM_ERROR("ACPI VFCT image truncated\n"); +		goto out_unmap; +	} + +	rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL); +	ret = !!rdev->bios; + +out_unmap: +	return ret; +} +#else +static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev) +{ +	return false; +} +#endif  bool radeon_get_bios(struct radeon_device *rdev)  { @@ -484,6 +612,8 @@ bool radeon_get_bios(struct radeon_device *rdev)  	r = radeon_atrm_get_bios(rdev);  	if (r == false) +		r = radeon_acpi_vfct_bios(rdev); +	if (r == false)  		r = igp_read_bios_from_vram(rdev);  	if (r == false)  		r = radeon_read_bios(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index d7269f48d37..27d22d709c9 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -62,9 +62,10 @@   *   2.18.0 - r600-eg: allow "invalid" DB formats   *   2.19.0 - r600-eg: MSAA textures   *   2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query + *   2.21.0 - r600-r700: FMASK and CMASK   */  #define KMS_DRIVER_MAJOR	2 -#define KMS_DRIVER_MINOR	20 +#define KMS_DRIVER_MINOR	21  #define KMS_DRIVER_PATCHLEVEL	0  int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);  int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 1cb014b571a..9024e722283 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -132,6 +132,7 @@ int radeon_bo_create(struct radeon_device *rdev,  	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,  				       sizeof(struct radeon_bo)); +retry:  	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);  	if (bo == NULL)  		return -ENOMEM; @@ -145,8 +146,6 @@ int radeon_bo_create(struct radeon_device *rdev,  	bo->surface_reg = -1;  	INIT_LIST_HEAD(&bo->list);  	INIT_LIST_HEAD(&bo->va); - -retry:  	radeon_ttm_placement_from_domain(bo, domain);  	/* Kernel allocation are uninterruptible */  	down_read(&rdev->pm.mclk_lock); diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index ec79b375043..43c431a2686 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -706,6 +706,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig  	if (radeon_debugfs_ring_init(rdev, ring)) {  		DRM_ERROR("Failed to register debugfs file for rings !\n");  	} +	radeon_ring_lockup_update(ring);  	return 0;  } diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600 index 5e659b034d9..f93e45d869f 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r600 +++ b/drivers/gpu/drm/radeon/reg_srcs/r600 @@ -744,14 +744,6 @@ r600 0x9400  0x00028C38 CB_CLRCMP_DST  0x00028C3C CB_CLRCMP_MSK  0x00028C34 CB_CLRCMP_SRC -0x00028100 CB_COLOR0_MASK -0x00028104 CB_COLOR1_MASK -0x00028108 CB_COLOR2_MASK -0x0002810C CB_COLOR3_MASK -0x00028110 CB_COLOR4_MASK -0x00028114 CB_COLOR5_MASK -0x00028118 CB_COLOR6_MASK -0x0002811C CB_COLOR7_MASK  0x00028808 CB_COLOR_CONTROL  0x0002842C CB_FOG_BLUE  0x00028428 CB_FOG_GREEN diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index f8187ead7b3..0df71eacd58 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -472,7 +472,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)  	else  		tmp = pgprot_noncached(tmp);  #endif -#if defined(__sparc__) +#if defined(__sparc__) || defined(__mips__)  	if (!(caching_flags & TTM_PL_FLAG_CACHED))  		tmp = pgprot_noncached(tmp);  #endif diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig index 0b5e096d39a..56e0bf31d42 100644 --- a/drivers/gpu/drm/udl/Kconfig +++ b/drivers/gpu/drm/udl/Kconfig @@ -1,6 +1,7 @@  config DRM_UDL  	tristate "DisplayLink"  	depends on DRM && EXPERIMENTAL +	depends on USB_ARCH_HAS_HCD  	select DRM_USB  	select FB_SYS_FILLRECT  	select FB_SYS_COPYAREA diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index ba055e9ca00..2d98ff92f3b 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c @@ -57,11 +57,8 @@ static int udl_get_modes(struct drm_connector *connector)  	edid = (struct edid *)udl_get_edid(udl); -	connector->display_info.raw_edid = (char *)edid; -  	drm_mode_connector_update_edid_property(connector, edid);  	ret = drm_add_edid_modes(connector, edid); -	connector->display_info.raw_edid = NULL;  	kfree(edid);  	return ret;  } diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index f5dd89e891d..9159d48d1df 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -354,8 +354,7 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc,  static void udl_crtc_disable(struct drm_crtc *crtc)  { - - +	udl_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);  }  static void udl_crtc_destroy(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 6b0078ffa76..c50724bd30f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1688,15 +1688,19 @@ int vmw_du_page_flip(struct drm_crtc *crtc,  	struct vmw_private *dev_priv = vmw_priv(crtc->dev);  	struct drm_framebuffer *old_fb = crtc->fb;  	struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb); -	struct drm_file *file_priv = event->base.file_priv; +	struct drm_file *file_priv ;  	struct vmw_fence_obj *fence = NULL;  	struct drm_clip_rect clips;  	int ret; +	if (event == NULL) +		return -EINVAL; +  	/* require ScreenObject support for page flipping */  	if (!dev_priv->sou_priv)  		return -ENOSYS; +	file_priv = event->base.file_priv;  	if (!vmw_kms_screen_object_flippable(dev_priv, crtc))  		return -EINVAL; diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index 5b3c7d135dc..e25cf31faab 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -70,27 +70,12 @@ static struct vgasr_priv vgasr_priv = {  	.clients = LIST_HEAD_INIT(vgasr_priv.clients),  }; -int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) -{ -	mutex_lock(&vgasr_mutex); -	if (vgasr_priv.handler) { -		mutex_unlock(&vgasr_mutex); -		return -EINVAL; -	} - -	vgasr_priv.handler = handler; -	mutex_unlock(&vgasr_mutex); -	return 0; -} -EXPORT_SYMBOL(vga_switcheroo_register_handler); - -void vga_switcheroo_unregister_handler(void) +static bool vga_switcheroo_ready(void)  { -	mutex_lock(&vgasr_mutex); -	vgasr_priv.handler = NULL; -	mutex_unlock(&vgasr_mutex); +	/* we're ready if we get two clients + handler */ +	return !vgasr_priv.active && +	       vgasr_priv.registered_clients == 2 && vgasr_priv.handler;  } -EXPORT_SYMBOL(vga_switcheroo_unregister_handler);  static void vga_switcheroo_enable(void)  { @@ -98,7 +83,8 @@ static void vga_switcheroo_enable(void)  	struct vga_switcheroo_client *client;  	/* call the handler to init */ -	vgasr_priv.handler->init(); +	if (vgasr_priv.handler->init) +		vgasr_priv.handler->init();  	list_for_each_entry(client, &vgasr_priv.clients, list) {  		if (client->id != -1) @@ -113,6 +99,37 @@ static void vga_switcheroo_enable(void)  	vgasr_priv.active = true;  } +int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) +{ +	mutex_lock(&vgasr_mutex); +	if (vgasr_priv.handler) { +		mutex_unlock(&vgasr_mutex); +		return -EINVAL; +	} + +	vgasr_priv.handler = handler; +	if (vga_switcheroo_ready()) { +		printk(KERN_INFO "vga_switcheroo: enabled\n"); +		vga_switcheroo_enable(); +	} +	mutex_unlock(&vgasr_mutex); +	return 0; +} +EXPORT_SYMBOL(vga_switcheroo_register_handler); + +void vga_switcheroo_unregister_handler(void) +{ +	mutex_lock(&vgasr_mutex); +	vgasr_priv.handler = NULL; +	if (vgasr_priv.active) { +		pr_info("vga_switcheroo: disabled\n"); +		vga_switcheroo_debugfs_fini(&vgasr_priv); +		vgasr_priv.active = false; +	} +	mutex_unlock(&vgasr_mutex); +} +EXPORT_SYMBOL(vga_switcheroo_unregister_handler); +  static int register_client(struct pci_dev *pdev,  			   const struct vga_switcheroo_client_ops *ops,  			   int id, bool active) @@ -134,9 +151,7 @@ static int register_client(struct pci_dev *pdev,  	if (client_is_vga(client))  		vgasr_priv.registered_clients++; -	/* if we get two clients + handler */ -	if (!vgasr_priv.active && -	    vgasr_priv.registered_clients == 2 && vgasr_priv.handler) { +	if (vga_switcheroo_ready()) {  		printk(KERN_INFO "vga_switcheroo: enabled\n");  		vga_switcheroo_enable();  	} diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 60ea284407c..8bf8a64e511 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1624,7 +1624,6 @@ static const struct hid_device_id hid_have_special_driver[] = {  	{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) }, -	{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) }, diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index 351d1f4593e..4ee57894872 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c @@ -34,6 +34,12 @@ static const struct dmi_system_id __initconst atk_force_new_if[] = {  		.matches = {  			DMI_MATCH(DMI_BOARD_NAME, "SABERTOOTH X58")  		} +	}, { +		/* Old interface reads the same sensor for fan0 and fan1 */ +		.ident = "Asus M5A78L", +		.matches = { +			DMI_MATCH(DMI_BOARD_NAME, "M5A78L") +		}  	},  	{ }  }; diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index faa16f80db9..0fa356fe82c 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -196,7 +196,7 @@ struct tjmax {  	int tjmax;  }; -static struct tjmax __cpuinitconst tjmax_table[] = { +static const struct tjmax __cpuinitconst tjmax_table[] = {  	{ "CPU D410", 100000 },  	{ "CPU D425", 100000 },  	{ "CPU D510", 100000 }, diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c index ab4825205a9..5b1a6a66644 100644 --- a/drivers/hwmon/w83627hf.c +++ b/drivers/hwmon/w83627hf.c @@ -1206,7 +1206,7 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr,  	int err = -ENODEV;  	u16 val; -	static const __initdata char *names[] = { +	static __initconst char *const names[] = {  		"W83627HF",  		"W83627THF",  		"W83697HF", diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c index aedb94f34bf..dae3ddfe761 100644 --- a/drivers/i2c/busses/i2c-diolan-u2c.c +++ b/drivers/i2c/busses/i2c-diolan-u2c.c @@ -405,6 +405,7 @@ static int diolan_usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,  			}  		}  	} +	ret = num;  abort:  	sret = diolan_i2c_stop(dev);  	if (sret < 0 && ret >= 0) diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index 5e6f1eed4f8..61b00edacb0 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c @@ -350,10 +350,6 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)  	i2c_clk = clk_get_rate(dev->clk); -	/* fallback to std. mode if machine has not provided it */ -	if (dev->cfg.clk_freq == 0) -		dev->cfg.clk_freq = 100000; -  	/*  	 * The spec says, in case of std. mode the divider is  	 * 2 whereas it is 3 for fast and fastplus mode of @@ -911,20 +907,32 @@ static const struct i2c_algorithm nmk_i2c_algo = {  	.functionality	= nmk_i2c_functionality  }; +static struct nmk_i2c_controller u8500_i2c = { +	/* +	 * Slave data setup time; 250ns, 100ns, and 10ns, which +	 * is 14, 6 and 2 respectively for a 48Mhz i2c clock. +	 */ +	.slsu           = 0xe, +	.tft            = 1,      /* Tx FIFO threshold */ +	.rft            = 8,      /* Rx FIFO threshold */ +	.clk_freq       = 400000, /* fast mode operation */ +	.timeout        = 200,    /* Slave response timeout(ms) */ +	.sm             = I2C_FREQ_MODE_FAST, +}; +  static atomic_t adapter_id = ATOMIC_INIT(0);  static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)  {  	int ret = 0; -	struct nmk_i2c_controller *pdata = -			adev->dev.platform_data; +	struct nmk_i2c_controller *pdata = adev->dev.platform_data;  	struct nmk_i2c_dev	*dev;  	struct i2c_adapter *adap; -	if (!pdata) { -		dev_warn(&adev->dev, "no platform data\n"); -		return -ENODEV; -	} +	if (!pdata) +		/* No i2c configuration found, using the default. */ +		pdata = &u8500_i2c; +  	dev = kzalloc(sizeof(struct nmk_i2c_dev), GFP_KERNEL);  	if (!dev) {  		dev_err(&adev->dev, "cannot allocate memory\n"); diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 6849635b268..5d19a49803c 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -584,7 +584,7 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)  	r = pm_runtime_get_sync(dev->dev);  	if (IS_ERR_VALUE(r)) -		return r; +		goto out;  	r = omap_i2c_wait_for_bb(dev);  	if (r < 0) diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 66eb53fac20..9a08c57bc93 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -712,7 +712,7 @@ static int __devexit tegra_i2c_remove(struct platform_device *pdev)  	return 0;  } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP  static int tegra_i2c_suspend(struct device *dev)  {  	struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c index 92406097efe..8d1e32d7cd9 100644 --- a/drivers/ide/ide-pm.c +++ b/drivers/ide/ide-pm.c @@ -4,7 +4,7 @@  int generic_ide_suspend(struct device *dev, pm_message_t mesg)  { -	ide_drive_t *drive = dev_get_drvdata(dev); +	ide_drive_t *drive = to_ide_device(dev);  	ide_drive_t *pair = ide_get_pair_dev(drive);  	ide_hwif_t *hwif = drive->hwif;  	struct request *rq; @@ -40,7 +40,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)  int generic_ide_resume(struct device *dev)  { -	ide_drive_t *drive = dev_get_drvdata(dev); +	ide_drive_t *drive = to_ide_device(dev);  	ide_drive_t *pair = ide_get_pair_dev(drive);  	ide_hwif_t *hwif = drive->hwif;  	struct request *rq; diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index f559088869f..e8726177d10 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -606,8 +606,9 @@ static int __init intel_idle_init(void)  	intel_idle_cpuidle_driver_init();  	retval = cpuidle_register_driver(&intel_idle_driver);  	if (retval) { +		struct cpuidle_driver *drv = cpuidle_get_driver();  		printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", -			cpuidle_get_driver()->name); +			drv ? drv->name : "none");  		return retval;  	} diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c index 59fbb3ae40e..e35bb8f6fe7 100644 --- a/drivers/iio/frequency/adf4350.c +++ b/drivers/iio/frequency/adf4350.c @@ -129,7 +129,7 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)  {  	struct adf4350_platform_data *pdata = st->pdata;  	u64 tmp; -	u32 div_gcd, prescaler; +	u32 div_gcd, prescaler, chspc;  	u16 mdiv, r_cnt = 0;  	u8 band_sel_div; @@ -158,14 +158,20 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)  	if (pdata->ref_div_factor)  		r_cnt = pdata->ref_div_factor - 1; -	do  { -		r_cnt = adf4350_tune_r_cnt(st, r_cnt); +	chspc = st->chspc; -		st->r1_mod = st->fpfd / st->chspc; -		while (st->r1_mod > ADF4350_MAX_MODULUS) { -			r_cnt = adf4350_tune_r_cnt(st, r_cnt); -			st->r1_mod = st->fpfd / st->chspc; -		} +	do  { +		do { +			do { +				r_cnt = adf4350_tune_r_cnt(st, r_cnt); +				st->r1_mod = st->fpfd / chspc; +				if (r_cnt > ADF4350_MAX_R_CNT) { +					/* try higher spacing values */ +					chspc++; +					r_cnt = 0; +				} +			} while ((st->r1_mod > ADF4350_MAX_MODULUS) && r_cnt); +		} while (r_cnt == 0);  		tmp = freq * (u64)st->r1_mod + (st->fpfd > 1);  		do_div(tmp, st->fpfd); /* Div round closest (n + d/2)/d */ @@ -194,7 +200,7 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)  	st->regs[ADF4350_REG0] = ADF4350_REG0_INT(st->r0_int) |  				 ADF4350_REG0_FRACT(st->r0_fract); -	st->regs[ADF4350_REG1] = ADF4350_REG1_PHASE(0) | +	st->regs[ADF4350_REG1] = ADF4350_REG1_PHASE(1) |  				 ADF4350_REG1_MOD(st->r1_mod) |  				 prescaler; diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c index 1cbb449b319..9a99f43094f 100644 --- a/drivers/iio/light/adjd_s311.c +++ b/drivers/iio/light/adjd_s311.c @@ -271,9 +271,10 @@ static int adjd_s311_update_scan_mode(struct iio_dev *indio_dev,  	const unsigned long *scan_mask)  {  	struct adjd_s311_data *data = iio_priv(indio_dev); -	data->buffer = krealloc(data->buffer, indio_dev->scan_bytes, -				GFP_KERNEL); -	if (!data->buffer) + +	kfree(data->buffer); +	data->buffer = kmalloc(indio_dev->scan_bytes, GFP_KERNEL); +	if (data->buffer == NULL)  		return -ENOMEM;  	return 0; diff --git a/drivers/iio/light/lm3533-als.c b/drivers/iio/light/lm3533-als.c index c3e7bac1312..e45712a921c 100644 --- a/drivers/iio/light/lm3533-als.c +++ b/drivers/iio/light/lm3533-als.c @@ -404,7 +404,7 @@ out:  	return ret;  } -static int show_thresh_either_en(struct device *dev, +static ssize_t show_thresh_either_en(struct device *dev,  					struct device_attribute *attr,  					char *buf)  { @@ -424,7 +424,7 @@ static int show_thresh_either_en(struct device *dev,  	return scnprintf(buf, PAGE_SIZE, "%u\n", enable);  } -static int store_thresh_either_en(struct device *dev, +static ssize_t store_thresh_either_en(struct device *dev,  					struct device_attribute *attr,  					const char *buf, size_t len)  { diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 6bf85042289..055ed59838d 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -267,6 +267,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,  	if (!uevent)  		return event->event == RDMA_CM_EVENT_CONNECT_REQUEST; +	mutex_lock(&ctx->file->mut);  	uevent->cm_id = cm_id;  	ucma_set_event_context(ctx, event, uevent);  	uevent->resp.event = event->event; @@ -277,7 +278,6 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,  		ucma_copy_conn_event(&uevent->resp.param.conn,  				     &event->param.conn); -	mutex_lock(&ctx->file->mut);  	if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {  		if (!ctx->backlog) {  			ret = -ENOMEM; diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c index 8c81992fa6d..e4a73158fc7 100644 --- a/drivers/infiniband/hw/amso1100/c2_rnic.c +++ b/drivers/infiniband/hw/amso1100/c2_rnic.c @@ -439,7 +439,7 @@ static int c2_rnic_close(struct c2_dev *c2dev)  /*   * Called by c2_probe to initialize the RNIC. This principally - * involves initalizing the various limits and resouce pools that + * involves initializing the various limits and resource pools that   * comprise the RNIC instance.   */  int __devinit c2_rnic_init(struct c2_dev *c2dev) diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 77b6b182778..aaf88ef9409 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -1680,7 +1680,7 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)   * T3A does 3 things when a TERM is received:   * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet   * 2) generate an async event on the QP with the TERMINATE opcode - * 3) post a TERMINATE opcde cqe into the associated CQ. + * 3) post a TERMINATE opcode cqe into the associated CQ.   *   * For (1), we save the message in the qp for later consumer consumption.   * For (2), we move the QP into TERMINATE, post a QP event and disconnect. diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index c27141fef1a..9c2ae7efd00 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -125,6 +125,7 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)  {  	struct ib_ah *new_ah;  	struct ib_ah_attr ah_attr; +	unsigned long flags;  	if (!dev->send_agent[port_num - 1][0])  		return; @@ -139,11 +140,11 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)  	if (IS_ERR(new_ah))  		return; -	spin_lock(&dev->sm_lock); +	spin_lock_irqsave(&dev->sm_lock, flags);  	if (dev->sm_ah[port_num - 1])  		ib_destroy_ah(dev->sm_ah[port_num - 1]);  	dev->sm_ah[port_num - 1] = new_ah; -	spin_unlock(&dev->sm_lock); +	spin_unlock_irqrestore(&dev->sm_lock, flags);  }  /* @@ -197,13 +198,15 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,  static void node_desc_override(struct ib_device *dev,  			       struct ib_mad *mad)  { +	unsigned long flags; +  	if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||  	     mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&  	    mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&  	    mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) { -		spin_lock(&to_mdev(dev)->sm_lock); +		spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);  		memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64); -		spin_unlock(&to_mdev(dev)->sm_lock); +		spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);  	}  } @@ -213,6 +216,7 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma  	struct ib_mad_send_buf *send_buf;  	struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];  	int ret; +	unsigned long flags;  	if (agent) {  		send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, @@ -225,13 +229,13 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma  		 * wrong following the IB spec strictly, but we know  		 * it's OK for our devices).  		 */ -		spin_lock(&dev->sm_lock); +		spin_lock_irqsave(&dev->sm_lock, flags);  		memcpy(send_buf->mad, mad, sizeof *mad);  		if ((send_buf->ah = dev->sm_ah[port_num - 1]))  			ret = ib_post_send_mad(send_buf, NULL);  		else  			ret = -EINVAL; -		spin_unlock(&dev->sm_lock); +		spin_unlock_irqrestore(&dev->sm_lock, flags);  		if (ret)  			ib_free_send_mad(send_buf); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index fe2088cfa6e..cc05579ebce 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -423,6 +423,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,  				 struct ib_device_modify *props)  {  	struct mlx4_cmd_mailbox *mailbox; +	unsigned long flags;  	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)  		return -EOPNOTSUPP; @@ -430,9 +431,9 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,  	if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))  		return 0; -	spin_lock(&to_mdev(ibdev)->sm_lock); +	spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);  	memcpy(ibdev->node_desc, props->node_desc, 64); -	spin_unlock(&to_mdev(ibdev)->sm_lock); +	spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);  	/*  	 * If possible, pass node desc to FW, so it can generate diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index a6d8ea060ea..f585eddef4b 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1407,6 +1407,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,  	struct mlx4_wqe_mlx_seg *mlx = wqe;  	struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;  	struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); +	struct net_device *ndev;  	union ib_gid sgid;  	u16 pkey;  	int send_size; @@ -1483,7 +1484,10 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,  		memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);  		/* FIXME: cache smac value? */ -		smac = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]->dev_addr; +		ndev = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]; +		if (!ndev) +			return -ENODEV; +		smac = ndev->dev_addr;  		memcpy(sqp->ud_header.eth.smac_h, smac, 6);  		if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))  			mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 5a044526e4f..c4e0131f1b5 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -161,7 +161,7 @@ static void ocrdma_add_default_sgid(struct ocrdma_dev *dev)  	ocrdma_get_guid(dev, &sgid->raw[8]);  } -#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#if IS_ENABLED(CONFIG_VLAN_8021Q)  static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)  {  	struct net_device *netdev, *tmp; @@ -202,14 +202,13 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)  	return 0;  } -#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_VLAN_8021Q) +#if IS_ENABLED(CONFIG_IPV6)  static int ocrdma_inet6addr_event(struct notifier_block *notifier,  				  unsigned long event, void *ptr)  {  	struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; -	struct net_device *event_netdev = ifa->idev->dev; -	struct net_device *netdev = NULL; +	struct net_device *netdev = ifa->idev->dev;  	struct ib_event gid_event;  	struct ocrdma_dev *dev;  	bool found = false; @@ -217,11 +216,12 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,  	bool is_vlan = false;  	u16 vid = 0; -	netdev = vlan_dev_real_dev(event_netdev); -	if (netdev != event_netdev) { -		is_vlan = true; -		vid = vlan_dev_vlan_id(event_netdev); +	is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN; +	if (is_vlan) { +		vid = vlan_dev_vlan_id(netdev); +		netdev = vlan_dev_real_dev(netdev);  	} +  	rcu_read_lock();  	list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) {  		if (dev->nic_info.netdev == netdev) { diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 0d7280af99b..3f6b21e9dc1 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -6346,8 +6346,10 @@ static int qib_init_7322_variables(struct qib_devdata *dd)  			dd->piobcnt4k * dd->align4k;  		dd->piovl15base	= ioremap_nocache(vl15off,  						  NUM_VL15_BUFS * dd->align4k); -		if (!dd->piovl15base) +		if (!dd->piovl15base) { +			ret = -ENOMEM;  			goto bail; +		}  	}  	qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c index a322d5171a2..50a8a0d4fe6 100644 --- a/drivers/infiniband/hw/qib/qib_sd7220.c +++ b/drivers/infiniband/hw/qib/qib_sd7220.c @@ -372,7 +372,7 @@ static void qib_sd_trimdone_monitor(struct qib_devdata *dd,  		/* Read CTRL reg for each channel to check TRIMDONE */  		if (baduns & (1 << chn)) {  			qib_dev_err(dd, -				"Reseting TRIMDONE on chn %d (%s)\n", +				"Resetting TRIMDONE on chn %d (%s)\n",  				chn, where);  			ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,  				IB_CTRL2(chn), 0x10, 0x10); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 95ecf4eadf5..24683fda8e2 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -1271,12 +1271,15 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path  void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)  {  	struct ipoib_dev_priv *priv = netdev_priv(tx->dev); +	unsigned long flags;  	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { +		spin_lock_irqsave(&priv->lock, flags);  		list_move(&tx->list, &priv->cm.reap_list);  		queue_work(ipoib_workqueue, &priv->cm.reap_task);  		ipoib_dbg(priv, "Reap connection for gid %pI6\n",  			  tx->neigh->daddr + 4);  		tx->neigh = NULL; +		spin_unlock_irqrestore(&priv->lock, flags);  	}  } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 97920b77a5d..3e2085a3ee4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1052,7 +1052,7 @@ void ipoib_neigh_free(struct ipoib_neigh *neigh)  	for (n = rcu_dereference_protected(*np,  					    lockdep_is_held(&ntbl->rwlock));  	     n != NULL; -	     n = rcu_dereference_protected(neigh->hnext, +	     n = rcu_dereference_protected(*np,  					lockdep_is_held(&ntbl->rwlock))) {  		if (n == neigh) {  			/* found */ diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index bcbf22ee0aa..1b5b0c73005 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -586,24 +586,62 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,  			scmnd->sc_data_direction);  } -static void srp_remove_req(struct srp_target_port *target, -			   struct srp_request *req, s32 req_lim_delta) +/** + * srp_claim_req - Take ownership of the scmnd associated with a request. + * @target: SRP target port. + * @req: SRP request. + * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take + *         ownership of @req->scmnd if it equals @scmnd. + * + * Return value: + * Either NULL or a pointer to the SCSI command the caller became owner of. + */ +static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target, +				       struct srp_request *req, +				       struct scsi_cmnd *scmnd) +{ +	unsigned long flags; + +	spin_lock_irqsave(&target->lock, flags); +	if (!scmnd) { +		scmnd = req->scmnd; +		req->scmnd = NULL; +	} else if (req->scmnd == scmnd) { +		req->scmnd = NULL; +	} else { +		scmnd = NULL; +	} +	spin_unlock_irqrestore(&target->lock, flags); + +	return scmnd; +} + +/** + * srp_free_req() - Unmap data and add request to the free request list. + */ +static void srp_free_req(struct srp_target_port *target, +			 struct srp_request *req, struct scsi_cmnd *scmnd, +			 s32 req_lim_delta)  {  	unsigned long flags; -	srp_unmap_data(req->scmnd, target, req); +	srp_unmap_data(scmnd, target, req); +  	spin_lock_irqsave(&target->lock, flags);  	target->req_lim += req_lim_delta; -	req->scmnd = NULL;  	list_add_tail(&req->list, &target->free_reqs);  	spin_unlock_irqrestore(&target->lock, flags);  }  static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)  { -	req->scmnd->result = DID_RESET << 16; -	req->scmnd->scsi_done(req->scmnd); -	srp_remove_req(target, req, 0); +	struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL); + +	if (scmnd) { +		scmnd->result = DID_RESET << 16; +		scmnd->scsi_done(scmnd); +		srp_free_req(target, req, scmnd, 0); +	}  }  static int srp_reconnect_target(struct srp_target_port *target) @@ -1073,11 +1111,18 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)  		complete(&target->tsk_mgmt_done);  	} else {  		req = &target->req_ring[rsp->tag]; -		scmnd = req->scmnd; -		if (!scmnd) +		scmnd = srp_claim_req(target, req, NULL); +		if (!scmnd) {  			shost_printk(KERN_ERR, target->scsi_host,  				     "Null scmnd for RSP w/tag %016llx\n",  				     (unsigned long long) rsp->tag); + +			spin_lock_irqsave(&target->lock, flags); +			target->req_lim += be32_to_cpu(rsp->req_lim_delta); +			spin_unlock_irqrestore(&target->lock, flags); + +			return; +		}  		scmnd->result = rsp->status;  		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { @@ -1092,7 +1137,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)  		else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))  			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); -		srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta)); +		srp_free_req(target, req, scmnd, +			     be32_to_cpu(rsp->req_lim_delta)); +  		scmnd->host_scribble = NULL;  		scmnd->scsi_done(scmnd);  	} @@ -1631,25 +1678,17 @@ static int srp_abort(struct scsi_cmnd *scmnd)  {  	struct srp_target_port *target = host_to_target(scmnd->device->host);  	struct srp_request *req = (struct srp_request *) scmnd->host_scribble; -	int ret = SUCCESS;  	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); -	if (!req || target->qp_in_error) +	if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd))  		return FAILED; -	if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, -			      SRP_TSK_ABORT_TASK)) -		return FAILED; - -	if (req->scmnd) { -		if (!target->tsk_mgmt_status) { -			srp_remove_req(target, req, 0); -			scmnd->result = DID_ABORT << 16; -		} else -			ret = FAILED; -	} +	srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, +			  SRP_TSK_ABORT_TASK); +	srp_free_req(target, req, scmnd, 0); +	scmnd->result = DID_ABORT << 16; -	return ret; +	return SUCCESS;  }  static int srp_reset_device(struct scsi_cmnd *scmnd) diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 7a0ce8d4288..9e1449f8c6a 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -1469,7 +1469,7 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,   *   * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping   * the data that has been transferred via IB RDMA had to be postponed until the - * check_stop_free() callback.  None of this is nessecary anymore and needs to + * check_stop_free() callback.  None of this is necessary anymore and needs to   * be cleaned up.   */  static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch, diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 0a2ea317120..18a89b760aa 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -1111,7 +1111,7 @@ static void print_iommu_info(void)  		if (iommu->cap & (1 << IOMMU_CAP_EFR)) {  			pr_info("AMD-Vi:  Extended features: "); -			for (i = 0; ARRAY_SIZE(feat_str); ++i) { +			for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {  				if (iommu_feature(iommu, (1ULL << i)))  					pr_cont(" %s", feat_str[i]);  			} diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index e0b18f3ae9a..af8904de1d4 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -736,6 +736,7 @@ int __init parse_ioapics_under_ir(void)  {  	struct dmar_drhd_unit *drhd;  	int ir_supported = 0; +	int ioapic_idx;  	for_each_drhd_unit(drhd) {  		struct intel_iommu *iommu = drhd->iommu; @@ -748,13 +749,20 @@ int __init parse_ioapics_under_ir(void)  		}  	} -	if (ir_supported && ir_ioapic_num != nr_ioapics) { -		printk(KERN_WARNING -		       "Not all IO-APIC's listed under remapping hardware\n"); -		return -1; +	if (!ir_supported) +		return 0; + +	for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) { +		int ioapic_id = mpc_ioapic_id(ioapic_idx); +		if (!map_ioapic_to_ir(ioapic_id)) { +			pr_err(FW_BUG "ioapic %d has no mapping iommu, " +			       "interrupt remapping will be disabled\n", +			       ioapic_id); +			return -1; +		}  	} -	return ir_supported; +	return 1;  }  int __init ir_dev_scope_init(void) diff --git a/drivers/md/md.c b/drivers/md/md.c index fcd098794d3..3f6203a4c7e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1108,8 +1108,11 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor  			ret = 0;  	}  	rdev->sectors = rdev->sb_start; -	/* Limit to 4TB as metadata cannot record more than that */ -	if (rdev->sectors >= (2ULL << 32)) +	/* Limit to 4TB as metadata cannot record more than that. +	 * (not needed for Linear and RAID0 as metadata doesn't +	 * record this size) +	 */ +	if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)  		rdev->sectors = (2ULL << 32) - 2;  	if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) @@ -1400,7 +1403,7 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)  	/* Limit to 4TB as metadata cannot record more than that.  	 * 4TB == 2^32 KB, or 2*2^32 sectors.  	 */ -	if (num_sectors >= (2ULL << 32)) +	if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)  		num_sectors = (2ULL << 32) - 2;  	md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,  		       rdev->sb_page); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index de5ed6fd880..1c2eb38f3c5 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -659,7 +659,11 @@ static int raid10_mergeable_bvec(struct request_queue *q,  		max = biovec->bv_len;  	if (mddev->merge_check_needed) { -		struct r10bio r10_bio; +		struct { +			struct r10bio r10_bio; +			struct r10dev devs[conf->copies]; +		} on_stack; +		struct r10bio *r10_bio = &on_stack.r10_bio;  		int s;  		if (conf->reshape_progress != MaxSector) {  			/* Cannot give any guidance during reshape */ @@ -667,18 +671,18 @@ static int raid10_mergeable_bvec(struct request_queue *q,  				return biovec->bv_len;  			return 0;  		} -		r10_bio.sector = sector; -		raid10_find_phys(conf, &r10_bio); +		r10_bio->sector = sector; +		raid10_find_phys(conf, r10_bio);  		rcu_read_lock();  		for (s = 0; s < conf->copies; s++) { -			int disk = r10_bio.devs[s].devnum; +			int disk = r10_bio->devs[s].devnum;  			struct md_rdev *rdev = rcu_dereference(  				conf->mirrors[disk].rdev);  			if (rdev && !test_bit(Faulty, &rdev->flags)) {  				struct request_queue *q =  					bdev_get_queue(rdev->bdev);  				if (q->merge_bvec_fn) { -					bvm->bi_sector = r10_bio.devs[s].addr +					bvm->bi_sector = r10_bio->devs[s].addr  						+ rdev->data_offset;  					bvm->bi_bdev = rdev->bdev;  					max = min(max, q->merge_bvec_fn( @@ -690,7 +694,7 @@ static int raid10_mergeable_bvec(struct request_queue *q,  				struct request_queue *q =  					bdev_get_queue(rdev->bdev);  				if (q->merge_bvec_fn) { -					bvm->bi_sector = r10_bio.devs[s].addr +					bvm->bi_sector = r10_bio->devs[s].addr  						+ rdev->data_offset;  					bvm->bi_bdev = rdev->bdev;  					max = min(max, q->merge_bvec_fn( @@ -4414,14 +4418,18 @@ static int handle_reshape_read_error(struct mddev *mddev,  {  	/* Use sync reads to get the blocks from somewhere else */  	int sectors = r10_bio->sectors; -	struct r10bio r10b;  	struct r10conf *conf = mddev->private; +	struct { +		struct r10bio r10_bio; +		struct r10dev devs[conf->copies]; +	} on_stack; +	struct r10bio *r10b = &on_stack.r10_bio;  	int slot = 0;  	int idx = 0;  	struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec; -	r10b.sector = r10_bio->sector; -	__raid10_find_phys(&conf->prev, &r10b); +	r10b->sector = r10_bio->sector; +	__raid10_find_phys(&conf->prev, r10b);  	while (sectors) {  		int s = sectors; @@ -4432,7 +4440,7 @@ static int handle_reshape_read_error(struct mddev *mddev,  			s = PAGE_SIZE >> 9;  		while (!success) { -			int d = r10b.devs[slot].devnum; +			int d = r10b->devs[slot].devnum;  			struct md_rdev *rdev = conf->mirrors[d].rdev;  			sector_t addr;  			if (rdev == NULL || @@ -4440,7 +4448,7 @@ static int handle_reshape_read_error(struct mddev *mddev,  			    !test_bit(In_sync, &rdev->flags))  				goto failed; -			addr = r10b.devs[slot].addr + idx * PAGE_SIZE; +			addr = r10b->devs[slot].addr + idx * PAGE_SIZE;  			success = sync_page_io(rdev,  					       addr,  					       s << 9, diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index 007c2c68dd8..1054cf60234 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h @@ -110,7 +110,7 @@ struct r10bio {  	 * We choose the number when they are allocated.  	 * We sometimes need an extra bio to write to the replacement.  	 */ -	struct { +	struct r10dev {  		struct bio	*bio;  		union {  			struct bio	*repl_bio; /* used for resync and diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c index 664e460f247..aac622200e9 100644 --- a/drivers/media/dvb/siano/smsusb.c +++ b/drivers/media/dvb/siano/smsusb.c @@ -481,7 +481,7 @@ static int smsusb_resume(struct usb_interface *intf)  	return 0;  } -static const struct usb_device_id smsusb_id_table[] __devinitconst = { +static const struct usb_device_id smsusb_id_table[] = {  	{ USB_DEVICE(0x187f, 0x0010),  		.driver_info = SMS1XXX_BOARD_SIANO_STELLAR },  	{ USB_DEVICE(0x187f, 0x0100), diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c index d0b6bb50763..72ded29728b 100644 --- a/drivers/media/radio/radio-shark.c +++ b/drivers/media/radio/radio-shark.c @@ -35,6 +35,11 @@  #include <media/v4l2-device.h>  #include <sound/tea575x-tuner.h> +#if defined(CONFIG_LEDS_CLASS) || \ +    (defined(CONFIG_LEDS_CLASS_MODULE) && defined(CONFIG_RADIO_SHARK_MODULE)) +#define SHARK_USE_LEDS 1 +#endif +  /*   * Version Information   */ @@ -56,44 +61,18 @@ MODULE_LICENSE("GPL");  enum { BLUE_LED, BLUE_PULSE_LED, RED_LED, NO_LEDS }; -static void shark_led_set_blue(struct led_classdev *led_cdev, -			       enum led_brightness value); -static void shark_led_set_blue_pulse(struct led_classdev *led_cdev, -				     enum led_brightness value); -static void shark_led_set_red(struct led_classdev *led_cdev, -			      enum led_brightness value); - -static const struct led_classdev shark_led_templates[NO_LEDS] = { -	[BLUE_LED] = { -		.name		= "%s:blue:", -		.brightness	= LED_OFF, -		.max_brightness = 127, -		.brightness_set = shark_led_set_blue, -	}, -	[BLUE_PULSE_LED] = { -		.name		= "%s:blue-pulse:", -		.brightness	= LED_OFF, -		.max_brightness = 255, -		.brightness_set = shark_led_set_blue_pulse, -	}, -	[RED_LED] = { -		.name		= "%s:red:", -		.brightness	= LED_OFF, -		.max_brightness = 1, -		.brightness_set = shark_led_set_red, -	}, -}; -  struct shark_device {  	struct usb_device *usbdev;  	struct v4l2_device v4l2_dev;  	struct snd_tea575x tea; +#ifdef SHARK_USE_LEDS  	struct work_struct led_work;  	struct led_classdev leds[NO_LEDS];  	char led_names[NO_LEDS][32];  	atomic_t brightness[NO_LEDS];  	unsigned long brightness_new; +#endif  	u8 *transfer_buffer;  	u32 last_val; @@ -175,20 +154,13 @@ static struct snd_tea575x_ops shark_tea_ops = {  	.read_val  = shark_read_val,  }; +#ifdef SHARK_USE_LEDS  static void shark_led_work(struct work_struct *work)  {  	struct shark_device *shark =  		container_of(work, struct shark_device, led_work);  	int i, res, brightness, actual_len; -	/* -	 * We use the v4l2_dev lock and registered bit to ensure the device -	 * does not get unplugged and unreffed while we're running. -	 */ -	mutex_lock(&shark->tea.mutex); -	if (!video_is_registered(&shark->tea.vd)) -		goto leave; -  	for (i = 0; i < 3; i++) {  		if (!test_and_clear_bit(i, &shark->brightness_new))  			continue; @@ -208,8 +180,6 @@ static void shark_led_work(struct work_struct *work)  			v4l2_err(&shark->v4l2_dev, "set LED %s error: %d\n",  				 shark->led_names[i], res);  	} -leave: -	mutex_unlock(&shark->tea.mutex);  }  static void shark_led_set_blue(struct led_classdev *led_cdev, @@ -245,19 +215,78 @@ static void shark_led_set_red(struct led_classdev *led_cdev,  	schedule_work(&shark->led_work);  } +static const struct led_classdev shark_led_templates[NO_LEDS] = { +	[BLUE_LED] = { +		.name		= "%s:blue:", +		.brightness	= LED_OFF, +		.max_brightness = 127, +		.brightness_set = shark_led_set_blue, +	}, +	[BLUE_PULSE_LED] = { +		.name		= "%s:blue-pulse:", +		.brightness	= LED_OFF, +		.max_brightness = 255, +		.brightness_set = shark_led_set_blue_pulse, +	}, +	[RED_LED] = { +		.name		= "%s:red:", +		.brightness	= LED_OFF, +		.max_brightness = 1, +		.brightness_set = shark_led_set_red, +	}, +}; + +static int shark_register_leds(struct shark_device *shark, struct device *dev) +{ +	int i, retval; + +	INIT_WORK(&shark->led_work, shark_led_work); +	for (i = 0; i < NO_LEDS; i++) { +		shark->leds[i] = shark_led_templates[i]; +		snprintf(shark->led_names[i], sizeof(shark->led_names[0]), +			 shark->leds[i].name, shark->v4l2_dev.name); +		shark->leds[i].name = shark->led_names[i]; +		retval = led_classdev_register(dev, &shark->leds[i]); +		if (retval) { +			v4l2_err(&shark->v4l2_dev, +				 "couldn't register led: %s\n", +				 shark->led_names[i]); +			return retval; +		} +	} +	return 0; +} + +static void shark_unregister_leds(struct shark_device *shark) +{ +	int i; + +	for (i = 0; i < NO_LEDS; i++) +		led_classdev_unregister(&shark->leds[i]); + +	cancel_work_sync(&shark->led_work); +} +#else +static int shark_register_leds(struct shark_device *shark, struct device *dev) +{ +	v4l2_warn(&shark->v4l2_dev, +		  "CONFIG_LED_CLASS not enabled, LED support disabled\n"); +	return 0; +} +static inline void shark_unregister_leds(struct shark_device *shark) { } +#endif +  static void usb_shark_disconnect(struct usb_interface *intf)  {  	struct v4l2_device *v4l2_dev = usb_get_intfdata(intf);  	struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); -	int i;  	mutex_lock(&shark->tea.mutex);  	v4l2_device_disconnect(&shark->v4l2_dev);  	snd_tea575x_exit(&shark->tea);  	mutex_unlock(&shark->tea.mutex); -	for (i = 0; i < NO_LEDS; i++) -		led_classdev_unregister(&shark->leds[i]); +	shark_unregister_leds(shark);  	v4l2_device_put(&shark->v4l2_dev);  } @@ -266,7 +295,6 @@ static void usb_shark_release(struct v4l2_device *v4l2_dev)  {  	struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); -	cancel_work_sync(&shark->led_work);  	v4l2_device_unregister(&shark->v4l2_dev);  	kfree(shark->transfer_buffer);  	kfree(shark); @@ -276,7 +304,7 @@ static int usb_shark_probe(struct usb_interface *intf,  			   const struct usb_device_id *id)  {  	struct shark_device *shark; -	int i, retval = -ENOMEM; +	int retval = -ENOMEM;  	shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL);  	if (!shark) @@ -286,17 +314,13 @@ static int usb_shark_probe(struct usb_interface *intf,  	if (!shark->transfer_buffer)  		goto err_alloc_buffer; -	/* -	 * Work around a bug in usbhid/hid-core.c, where it leaves a dangling -	 * pointer in intfdata causing v4l2-device.c to not set it. Which -	 * results in usb_shark_disconnect() referencing the dangling pointer -	 * -	 * REMOVE (as soon as the above bug is fixed, patch submitted) -	 */ -	usb_set_intfdata(intf, NULL); +	v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance); + +	retval = shark_register_leds(shark, &intf->dev); +	if (retval) +		goto err_reg_leds;  	shark->v4l2_dev.release = usb_shark_release; -	v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance);  	retval = v4l2_device_register(&intf->dev, &shark->v4l2_dev);  	if (retval) {  		v4l2_err(&shark->v4l2_dev, "couldn't register v4l2_device\n"); @@ -320,32 +344,13 @@ static int usb_shark_probe(struct usb_interface *intf,  		goto err_init_tea;  	} -	INIT_WORK(&shark->led_work, shark_led_work); -	for (i = 0; i < NO_LEDS; i++) { -		shark->leds[i] = shark_led_templates[i]; -		snprintf(shark->led_names[i], sizeof(shark->led_names[0]), -			 shark->leds[i].name, shark->v4l2_dev.name); -		shark->leds[i].name = shark->led_names[i]; -		/* -		 * We don't fail the probe if we fail to register the leds, -		 * because once we've called snd_tea575x_init, the /dev/radio0 -		 * node may be opened from userspace holding a reference to us! -		 * -		 * Note we cannot register the leds first instead as -		 * shark_led_work depends on the v4l2 mutex and registered bit. -		 */ -		retval = led_classdev_register(&intf->dev, &shark->leds[i]); -		if (retval) -			v4l2_err(&shark->v4l2_dev, -				 "couldn't register led: %s\n", -				 shark->led_names[i]); -	} -  	return 0;  err_init_tea:  	v4l2_device_unregister(&shark->v4l2_dev);  err_reg_dev: +	shark_unregister_leds(shark); +err_reg_leds:  	kfree(shark->transfer_buffer);  err_alloc_buffer:  	kfree(shark); diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c index b9575de3e7e..7b4efdfaae2 100644 --- a/drivers/media/radio/radio-shark2.c +++ b/drivers/media/radio/radio-shark2.c @@ -35,6 +35,11 @@  #include <media/v4l2-device.h>  #include "radio-tea5777.h" +#if defined(CONFIG_LEDS_CLASS) || \ +    (defined(CONFIG_LEDS_CLASS_MODULE) && defined(CONFIG_RADIO_SHARK2_MODULE)) +#define SHARK_USE_LEDS 1 +#endif +  MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");  MODULE_DESCRIPTION("Griffin radioSHARK2, USB radio receiver driver");  MODULE_LICENSE("GPL"); @@ -43,7 +48,6 @@ static int debug;  module_param(debug, int, 0);  MODULE_PARM_DESC(debug, "Debug level (0-1)"); -  #define SHARK_IN_EP		0x83  #define SHARK_OUT_EP		0x05 @@ -54,36 +58,18 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");  enum { BLUE_LED, RED_LED, NO_LEDS }; -static void shark_led_set_blue(struct led_classdev *led_cdev, -			       enum led_brightness value); -static void shark_led_set_red(struct led_classdev *led_cdev, -			      enum led_brightness value); - -static const struct led_classdev shark_led_templates[NO_LEDS] = { -	[BLUE_LED] = { -		.name		= "%s:blue:", -		.brightness	= LED_OFF, -		.max_brightness = 127, -		.brightness_set = shark_led_set_blue, -	}, -	[RED_LED] = { -		.name		= "%s:red:", -		.brightness	= LED_OFF, -		.max_brightness = 1, -		.brightness_set = shark_led_set_red, -	}, -}; -  struct shark_device {  	struct usb_device *usbdev;  	struct v4l2_device v4l2_dev;  	struct radio_tea5777 tea; +#ifdef SHARK_USE_LEDS  	struct work_struct led_work;  	struct led_classdev leds[NO_LEDS];  	char led_names[NO_LEDS][32];  	atomic_t brightness[NO_LEDS];  	unsigned long brightness_new; +#endif  	u8 *transfer_buffer;  }; @@ -161,18 +147,12 @@ static struct radio_tea5777_ops shark_tea_ops = {  	.read_reg  = shark_read_reg,  }; +#ifdef SHARK_USE_LEDS  static void shark_led_work(struct work_struct *work)  {  	struct shark_device *shark =  		container_of(work, struct shark_device, led_work);  	int i, res, brightness, actual_len; -	/* -	 * We use the v4l2_dev lock and registered bit to ensure the device -	 * does not get unplugged and unreffed while we're running. -	 */ -	mutex_lock(&shark->tea.mutex); -	if (!video_is_registered(&shark->tea.vd)) -		goto leave;  	for (i = 0; i < 2; i++) {  		if (!test_and_clear_bit(i, &shark->brightness_new)) @@ -191,8 +171,6 @@ static void shark_led_work(struct work_struct *work)  			v4l2_err(&shark->v4l2_dev, "set LED %s error: %d\n",  				 shark->led_names[i], res);  	} -leave: -	mutex_unlock(&shark->tea.mutex);  }  static void shark_led_set_blue(struct led_classdev *led_cdev, @@ -217,19 +195,72 @@ static void shark_led_set_red(struct led_classdev *led_cdev,  	schedule_work(&shark->led_work);  } +static const struct led_classdev shark_led_templates[NO_LEDS] = { +	[BLUE_LED] = { +		.name		= "%s:blue:", +		.brightness	= LED_OFF, +		.max_brightness = 127, +		.brightness_set = shark_led_set_blue, +	}, +	[RED_LED] = { +		.name		= "%s:red:", +		.brightness	= LED_OFF, +		.max_brightness = 1, +		.brightness_set = shark_led_set_red, +	}, +}; + +static int shark_register_leds(struct shark_device *shark, struct device *dev) +{ +	int i, retval; + +	INIT_WORK(&shark->led_work, shark_led_work); +	for (i = 0; i < NO_LEDS; i++) { +		shark->leds[i] = shark_led_templates[i]; +		snprintf(shark->led_names[i], sizeof(shark->led_names[0]), +			 shark->leds[i].name, shark->v4l2_dev.name); +		shark->leds[i].name = shark->led_names[i]; +		retval = led_classdev_register(dev, &shark->leds[i]); +		if (retval) { +			v4l2_err(&shark->v4l2_dev, +				 "couldn't register led: %s\n", +				 shark->led_names[i]); +			return retval; +		} +	} +	return 0; +} + +static void shark_unregister_leds(struct shark_device *shark) +{ +	int i; + +	for (i = 0; i < NO_LEDS; i++) +		led_classdev_unregister(&shark->leds[i]); + +	cancel_work_sync(&shark->led_work); +} +#else +static int shark_register_leds(struct shark_device *shark, struct device *dev) +{ +	v4l2_warn(&shark->v4l2_dev, +		  "CONFIG_LED_CLASS not enabled, LED support disabled\n"); +	return 0; +} +static inline void shark_unregister_leds(struct shark_device *shark) { } +#endif +  static void usb_shark_disconnect(struct usb_interface *intf)  {  	struct v4l2_device *v4l2_dev = usb_get_intfdata(intf);  	struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); -	int i;  	mutex_lock(&shark->tea.mutex);  	v4l2_device_disconnect(&shark->v4l2_dev);  	radio_tea5777_exit(&shark->tea);  	mutex_unlock(&shark->tea.mutex); -	for (i = 0; i < NO_LEDS; i++) -		led_classdev_unregister(&shark->leds[i]); +	shark_unregister_leds(shark);  	v4l2_device_put(&shark->v4l2_dev);  } @@ -238,7 +269,6 @@ static void usb_shark_release(struct v4l2_device *v4l2_dev)  {  	struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); -	cancel_work_sync(&shark->led_work);  	v4l2_device_unregister(&shark->v4l2_dev);  	kfree(shark->transfer_buffer);  	kfree(shark); @@ -248,7 +278,7 @@ static int usb_shark_probe(struct usb_interface *intf,  			   const struct usb_device_id *id)  {  	struct shark_device *shark; -	int i, retval = -ENOMEM; +	int retval = -ENOMEM;  	shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL);  	if (!shark) @@ -258,17 +288,13 @@ static int usb_shark_probe(struct usb_interface *intf,  	if (!shark->transfer_buffer)  		goto err_alloc_buffer; -	/* -	 * Work around a bug in usbhid/hid-core.c, where it leaves a dangling -	 * pointer in intfdata causing v4l2-device.c to not set it. Which -	 * results in usb_shark_disconnect() referencing the dangling pointer -	 * -	 * REMOVE (as soon as the above bug is fixed, patch submitted) -	 */ -	usb_set_intfdata(intf, NULL); +	v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance); + +	retval = shark_register_leds(shark, &intf->dev); +	if (retval) +		goto err_reg_leds;  	shark->v4l2_dev.release = usb_shark_release; -	v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance);  	retval = v4l2_device_register(&intf->dev, &shark->v4l2_dev);  	if (retval) {  		v4l2_err(&shark->v4l2_dev, "couldn't register v4l2_device\n"); @@ -292,32 +318,13 @@ static int usb_shark_probe(struct usb_interface *intf,  		goto err_init_tea;  	} -	INIT_WORK(&shark->led_work, shark_led_work); -	for (i = 0; i < NO_LEDS; i++) { -		shark->leds[i] = shark_led_templates[i]; -		snprintf(shark->led_names[i], sizeof(shark->led_names[0]), -			 shark->leds[i].name, shark->v4l2_dev.name); -		shark->leds[i].name = shark->led_names[i]; -		/* -		 * We don't fail the probe if we fail to register the leds, -		 * because once we've called radio_tea5777_init, the /dev/radio0 -		 * node may be opened from userspace holding a reference to us! -		 * -		 * Note we cannot register the leds first instead as -		 * shark_led_work depends on the v4l2 mutex and registered bit. -		 */ -		retval = led_classdev_register(&intf->dev, &shark->leds[i]); -		if (retval) -			v4l2_err(&shark->v4l2_dev, -				 "couldn't register led: %s\n", -				 shark->led_names[i]); -	} -  	return 0;  err_init_tea:  	v4l2_device_unregister(&shark->v4l2_dev);  err_reg_dev: +	shark_unregister_leds(shark); +err_reg_leds:  	kfree(shark->transfer_buffer);  err_alloc_buffer:  	kfree(shark); diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c index 9e38132afec..9bb65e170d9 100644 --- a/drivers/media/radio/si470x/radio-si470x-common.c +++ b/drivers/media/radio/si470x/radio-si470x-common.c @@ -151,6 +151,7 @@ static const struct v4l2_frequency_band bands[] = {  		.index = 0,  		.capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |  			    V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO | +			    V4L2_TUNER_CAP_FREQ_BANDS |  			    V4L2_TUNER_CAP_HWSEEK_BOUNDED |  			    V4L2_TUNER_CAP_HWSEEK_WRAP,  		.rangelow   =  87500 * 16, @@ -162,6 +163,7 @@ static const struct v4l2_frequency_band bands[] = {  		.index = 1,  		.capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |  			    V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO | +			    V4L2_TUNER_CAP_FREQ_BANDS |  			    V4L2_TUNER_CAP_HWSEEK_BOUNDED |  			    V4L2_TUNER_CAP_HWSEEK_WRAP,  		.rangelow   =  76000 * 16, @@ -173,6 +175,7 @@ static const struct v4l2_frequency_band bands[] = {  		.index = 2,  		.capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |  			    V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO | +			    V4L2_TUNER_CAP_FREQ_BANDS |  			    V4L2_TUNER_CAP_HWSEEK_BOUNDED |  			    V4L2_TUNER_CAP_HWSEEK_WRAP,  		.rangelow   =  76000 * 16, diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c index 643a6ff7c5d..f867f04cccc 100644 --- a/drivers/media/radio/si470x/radio-si470x-i2c.c +++ b/drivers/media/radio/si470x/radio-si470x-i2c.c @@ -225,8 +225,9 @@ int si470x_vidioc_querycap(struct file *file, void *priv,  {  	strlcpy(capability->driver, DRIVER_NAME, sizeof(capability->driver));  	strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card)); -	capability->capabilities = V4L2_CAP_HW_FREQ_SEEK | -		V4L2_CAP_TUNER | V4L2_CAP_RADIO; +	capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_READWRITE | +		V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE; +	capability->capabilities = capability->device_caps | V4L2_CAP_DEVICE_CAPS;  	return 0;  } diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c index 146be4263ea..be076f7181e 100644 --- a/drivers/media/radio/si470x/radio-si470x-usb.c +++ b/drivers/media/radio/si470x/radio-si470x-usb.c @@ -531,7 +531,7 @@ int si470x_vidioc_querycap(struct file *file, void *priv,  	strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card));  	usb_make_path(radio->usbdev, capability->bus_info,  			sizeof(capability->bus_info)); -	capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | +	capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_READWRITE |  		V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE;  	capability->capabilities = capability->device_caps | V4L2_CAP_DEVICE_CAPS;  	return 0; diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig index 5180390be7a..8be57634ba6 100644 --- a/drivers/media/rc/Kconfig +++ b/drivers/media/rc/Kconfig @@ -261,6 +261,7 @@ config IR_WINBOND_CIR  config IR_IGUANA  	tristate "IguanaWorks USB IR Transceiver" +	depends on USB_ARCH_HAS_HCD  	depends on RC_CORE  	select USB  	---help--- diff --git a/drivers/media/video/gspca/jl2005bcd.c b/drivers/media/video/gspca/jl2005bcd.c index cf9d9fca5b8..234777116e5 100644 --- a/drivers/media/video/gspca/jl2005bcd.c +++ b/drivers/media/video/gspca/jl2005bcd.c @@ -512,7 +512,7 @@ static const struct sd_desc sd_desc = {  };  /* -- module initialisation -- */ -static const __devinitdata struct usb_device_id device_table[] = { +static const struct usb_device_id device_table[] = {  	{USB_DEVICE(0x0979, 0x0227)},  	{}  }; diff --git a/drivers/media/video/gspca/spca506.c b/drivers/media/video/gspca/spca506.c index 969bb5a4cd9..bab01c86c31 100644 --- a/drivers/media/video/gspca/spca506.c +++ b/drivers/media/video/gspca/spca506.c @@ -579,7 +579,7 @@ static const struct sd_desc sd_desc = {  };  /* -- module initialisation -- */ -static const struct usb_device_id device_table[] __devinitconst = { +static const struct usb_device_id device_table[] = {  	{USB_DEVICE(0x06e1, 0xa190)},  /*fixme: may be IntelPCCameraPro BRIDGE_SPCA505  	{USB_DEVICE(0x0733, 0x0430)}, */ diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c index 7efe9ad7acc..0b91a5cd38e 100644 --- a/drivers/media/video/mem2mem_testdev.c +++ b/drivers/media/video/mem2mem_testdev.c @@ -431,7 +431,7 @@ static int vidioc_querycap(struct file *file, void *priv,  	strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);  	strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);  	strlcpy(cap->bus_info, MEM2MEM_NAME, sizeof(cap->bus_info)); -	cap->capabilities = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING; +	cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;  	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;  	return 0;  } diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c index d2e6f82ecfa..560a65aa703 100644 --- a/drivers/media/video/mx1_camera.c +++ b/drivers/media/video/mx1_camera.c @@ -403,7 +403,7 @@ static void mx1_camera_activate(struct mx1_camera_dev *pcdev)  	dev_dbg(pcdev->icd->parent, "Activate device\n"); -	clk_enable(pcdev->clk); +	clk_prepare_enable(pcdev->clk);  	/* enable CSI before doing anything else */  	__raw_writel(csicr1, pcdev->base + CSICR1); @@ -422,7 +422,7 @@ static void mx1_camera_deactivate(struct mx1_camera_dev *pcdev)  	/* Disable all CSI interface */  	__raw_writel(0x00, pcdev->base + CSICR1); -	clk_disable(pcdev->clk); +	clk_disable_unprepare(pcdev->clk);  }  /* diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c index 637bde8aca2..ac175406e58 100644 --- a/drivers/media/video/mx2_camera.c +++ b/drivers/media/video/mx2_camera.c @@ -272,7 +272,7 @@ struct mx2_camera_dev {  	struct device		*dev;  	struct soc_camera_host	soc_host;  	struct soc_camera_device *icd; -	struct clk		*clk_csi, *clk_emma; +	struct clk		*clk_csi, *clk_emma_ahb, *clk_emma_ipg;  	unsigned int		irq_csi, irq_emma;  	void __iomem		*base_csi, *base_emma; @@ -407,7 +407,7 @@ static void mx2_camera_deactivate(struct mx2_camera_dev *pcdev)  {  	unsigned long flags; -	clk_disable(pcdev->clk_csi); +	clk_disable_unprepare(pcdev->clk_csi);  	writel(0, pcdev->base_csi + CSICR1);  	if (cpu_is_mx27()) {  		writel(0, pcdev->base_emma + PRP_CNTL); @@ -435,7 +435,7 @@ static int mx2_camera_add_device(struct soc_camera_device *icd)  	if (pcdev->icd)  		return -EBUSY; -	ret = clk_enable(pcdev->clk_csi); +	ret = clk_prepare_enable(pcdev->clk_csi);  	if (ret < 0)  		return ret; @@ -1633,23 +1633,34 @@ static int __devinit mx27_camera_emma_init(struct mx2_camera_dev *pcdev)  		goto exit_iounmap;  	} -	pcdev->clk_emma = clk_get(NULL, "emma"); -	if (IS_ERR(pcdev->clk_emma)) { -		err = PTR_ERR(pcdev->clk_emma); +	pcdev->clk_emma_ipg = clk_get(pcdev->dev, "emma-ipg"); +	if (IS_ERR(pcdev->clk_emma_ipg)) { +		err = PTR_ERR(pcdev->clk_emma_ipg);  		goto exit_free_irq;  	} -	clk_enable(pcdev->clk_emma); +	clk_prepare_enable(pcdev->clk_emma_ipg); + +	pcdev->clk_emma_ahb = clk_get(pcdev->dev, "emma-ahb"); +	if (IS_ERR(pcdev->clk_emma_ahb)) { +		err = PTR_ERR(pcdev->clk_emma_ahb); +		goto exit_clk_emma_ipg_put; +	} + +	clk_prepare_enable(pcdev->clk_emma_ahb);  	err = mx27_camera_emma_prp_reset(pcdev);  	if (err) -		goto exit_clk_emma_put; +		goto exit_clk_emma_ahb_put;  	return err; -exit_clk_emma_put: -	clk_disable(pcdev->clk_emma); -	clk_put(pcdev->clk_emma); +exit_clk_emma_ahb_put: +	clk_disable_unprepare(pcdev->clk_emma_ahb); +	clk_put(pcdev->clk_emma_ahb); +exit_clk_emma_ipg_put: +	clk_disable_unprepare(pcdev->clk_emma_ipg); +	clk_put(pcdev->clk_emma_ipg);  exit_free_irq:  	free_irq(pcdev->irq_emma, pcdev);  exit_iounmap: @@ -1685,7 +1696,7 @@ static int __devinit mx2_camera_probe(struct platform_device *pdev)  		goto exit;  	} -	pcdev->clk_csi = clk_get(&pdev->dev, NULL); +	pcdev->clk_csi = clk_get(&pdev->dev, "ahb");  	if (IS_ERR(pcdev->clk_csi)) {  		dev_err(&pdev->dev, "Could not get csi clock\n");  		err = PTR_ERR(pcdev->clk_csi); @@ -1785,8 +1796,10 @@ exit_free_emma:  eallocctx:  	if (cpu_is_mx27()) {  		free_irq(pcdev->irq_emma, pcdev); -		clk_disable(pcdev->clk_emma); -		clk_put(pcdev->clk_emma); +		clk_disable_unprepare(pcdev->clk_emma_ipg); +		clk_put(pcdev->clk_emma_ipg); +		clk_disable_unprepare(pcdev->clk_emma_ahb); +		clk_put(pcdev->clk_emma_ahb);  		iounmap(pcdev->base_emma);  		release_mem_region(pcdev->res_emma->start, resource_size(pcdev->res_emma));  	} @@ -1825,8 +1838,10 @@ static int __devexit mx2_camera_remove(struct platform_device *pdev)  	iounmap(pcdev->base_csi);  	if (cpu_is_mx27()) { -		clk_disable(pcdev->clk_emma); -		clk_put(pcdev->clk_emma); +		clk_disable_unprepare(pcdev->clk_emma_ipg); +		clk_put(pcdev->clk_emma_ipg); +		clk_disable_unprepare(pcdev->clk_emma_ahb); +		clk_put(pcdev->clk_emma_ahb);  		iounmap(pcdev->base_emma);  		res = pcdev->res_emma;  		release_mem_region(res->start, resource_size(res)); diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c index f13643d3135..af2297dd49c 100644 --- a/drivers/media/video/mx3_camera.c +++ b/drivers/media/video/mx3_camera.c @@ -61,15 +61,9 @@  #define MAX_VIDEO_MEM 16 -enum csi_buffer_state { -	CSI_BUF_NEEDS_INIT, -	CSI_BUF_PREPARED, -}; -  struct mx3_camera_buffer {  	/* common v4l buffer stuff -- must be first */  	struct vb2_buffer			vb; -	enum csi_buffer_state			state;  	struct list_head			queue;  	/* One descriptot per scatterlist (per frame) */ @@ -285,7 +279,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)  		goto error;  	} -	if (buf->state == CSI_BUF_NEEDS_INIT) { +	if (!buf->txd) {  		sg_dma_address(sg)	= vb2_dma_contig_plane_dma_addr(vb, 0);  		sg_dma_len(sg)		= new_size; @@ -298,7 +292,6 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)  		txd->callback_param	= txd;  		txd->callback		= mx3_cam_dma_done; -		buf->state		= CSI_BUF_PREPARED;  		buf->txd		= txd;  	} else {  		txd = buf->txd; @@ -385,7 +378,6 @@ static void mx3_videobuf_release(struct vb2_buffer *vb)  	/* Doesn't hurt also if the list is empty */  	list_del_init(&buf->queue); -	buf->state = CSI_BUF_NEEDS_INIT;  	if (txd) {  		buf->txd = NULL; @@ -405,13 +397,13 @@ static int mx3_videobuf_init(struct vb2_buffer *vb)  	struct mx3_camera_dev *mx3_cam = ici->priv;  	struct mx3_camera_buffer *buf = to_mx3_vb(vb); -	/* This is for locking debugging only */ -	INIT_LIST_HEAD(&buf->queue); -	sg_init_table(&buf->sg, 1); +	if (!buf->txd) { +		/* This is for locking debugging only */ +		INIT_LIST_HEAD(&buf->queue); +		sg_init_table(&buf->sg, 1); -	buf->state = CSI_BUF_NEEDS_INIT; - -	mx3_cam->buf_total += vb2_plane_size(vb, 0); +		mx3_cam->buf_total += vb2_plane_size(vb, 0); +	}  	return 0;  } diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index b03ffecb743..1bde255e45d 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c @@ -171,7 +171,8 @@ static int soc_camera_try_fmt(struct soc_camera_device *icd,  	dev_dbg(icd->pdev, "TRY_FMT(%c%c%c%c, %ux%u)\n",  		pixfmtstr(pix->pixelformat), pix->width, pix->height); -	if (!(ici->capabilities & SOCAM_HOST_CAP_STRIDE)) { +	if (pix->pixelformat != V4L2_PIX_FMT_JPEG && +	    !(ici->capabilities & SOCAM_HOST_CAP_STRIDE)) {  		pix->bytesperline = 0;  		pix->sizeimage = 0;  	} diff --git a/drivers/media/video/soc_mediabus.c b/drivers/media/video/soc_mediabus.c index 89dce097a82..a397812635d 100644 --- a/drivers/media/video/soc_mediabus.c +++ b/drivers/media/video/soc_mediabus.c @@ -378,6 +378,9 @@ EXPORT_SYMBOL(soc_mbus_samples_per_pixel);  s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf)  { +	if (mf->fourcc == V4L2_PIX_FMT_JPEG) +		return 0; +  	if (mf->layout != SOC_MBUS_LAYOUT_PACKED)  		return width * mf->bits_per_sample / 8; @@ -400,6 +403,9 @@ EXPORT_SYMBOL(soc_mbus_bytes_per_line);  s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf,  			u32 bytes_per_line, u32 height)  { +	if (mf->fourcc == V4L2_PIX_FMT_JPEG) +		return 0; +  	if (mf->layout == SOC_MBUS_LAYOUT_PACKED)  		return bytes_per_line * height; diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c index 9288fbd5001..5577381b5bf 100644 --- a/drivers/media/video/uvc/uvc_queue.c +++ b/drivers/media/video/uvc/uvc_queue.c @@ -338,6 +338,7 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,  	if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) {  		buf->error = 0;  		buf->state = UVC_BUF_STATE_QUEUED; +		buf->bytesused = 0;  		vb2_set_plane_payload(&buf->buf, 0, 0);  		return buf;  	} diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c index c3b7b5f59b3..6bc47fc82fe 100644 --- a/drivers/media/video/v4l2-ioctl.c +++ b/drivers/media/video/v4l2-ioctl.c @@ -402,8 +402,10 @@ static void v4l_print_hw_freq_seek(const void *arg, bool write_only)  {  	const struct v4l2_hw_freq_seek *p = arg; -	pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u\n", -		p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing); +	pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u, " +		"rangelow=%u, rangehigh=%u\n", +		p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing, +		p->rangelow, p->rangehigh);  }  static void v4l_print_requestbuffers(const void *arg, bool write_only) @@ -1853,6 +1855,8 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,  			.type = type,  		}; +		if (p->index) +			return -EINVAL;  		err = ops->vidioc_g_tuner(file, fh, &t);  		if (err)  			return err; @@ -1870,6 +1874,8 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,  		if (type != V4L2_TUNER_RADIO)  			return -EINVAL; +		if (p->index) +			return -EINVAL;  		err = ops->vidioc_g_modulator(file, fh, &m);  		if (err)  			return err; diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index d1facef28a6..b1a146205c0 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -395,7 +395,8 @@ config MFD_TC6387XB  config MFD_TC6393XB  	bool "Support Toshiba TC6393XB" -	depends on GPIOLIB && ARM && HAVE_CLK +	depends on ARM && HAVE_CLK +	select GPIOLIB  	select MFD_CORE  	select MFD_TMIO  	help diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index c6ffbbe5a6c..d78c05e693f 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -1253,7 +1253,7 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list,  			if (dev->wd_timeout)  				*slots -= mei_data2slots(MEI_START_WD_DATA_SIZE);  			else -				*slots -= mei_data2slots(MEI_START_WD_DATA_SIZE); +				*slots -= mei_data2slots(MEI_WD_PARAMS_SIZE);  		}  	}  	if (dev->stop) diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 09233020886..7422c765284 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -925,6 +925,27 @@ static struct miscdevice  mei_misc_device = {  };  /** + * mei_quirk_probe - probe for devices that doesn't valid ME interface + * @pdev: PCI device structure + * @ent: entry into pci_device_table + * + * returns true if ME Interface is valid, false otherwise + */ +static bool __devinit mei_quirk_probe(struct pci_dev *pdev, +				const struct pci_device_id *ent) +{ +	u32 reg; +	if (ent->device == MEI_DEV_ID_PBG_1) { +		pci_read_config_dword(pdev, 0x48, ®); +		/* make sure that bit 9 is up and bit 10 is down */ +		if ((reg & 0x600) == 0x200) { +			dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n"); +			return false; +		} +	} +	return true; +} +/**   * mei_probe - Device Initialization Routine   *   * @pdev: PCI device structure @@ -939,6 +960,12 @@ static int __devinit mei_probe(struct pci_dev *pdev,  	int err;  	mutex_lock(&mei_mutex); + +	if (!mei_quirk_probe(pdev, ent)) { +		err = -ENODEV; +		goto end; +	} +  	if (mei_device) {  		err = -EEXIST;  		goto end; diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 87b251ab6ec..b9e2000969f 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -18,6 +18,8 @@  #include <linux/interrupt.h>  #include <linux/delay.h>  #include <linux/device.h> +#include <linux/cpu.h> +#include <linux/module.h>  #include <linux/err.h>  #include <linux/slab.h>  #include <asm/uv/uv_hub.h> @@ -59,6 +61,8 @@ static struct xpc_heartbeat_uv *xpc_heartbeat_uv;  					 XPC_NOTIFY_MSG_SIZE_UV)  #define XPC_NOTIFY_IRQ_NAME		"xpc_notify" +static int xpc_mq_node = -1; +  static struct xpc_gru_mq_uv *xpc_activate_mq_uv;  static struct xpc_gru_mq_uv *xpc_notify_mq_uv; @@ -109,11 +113,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)  #if defined CONFIG_X86_64  	mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,  			UV_AFFINITY_CPU); -	if (mq->irq < 0) { -		dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", -			-mq->irq); +	if (mq->irq < 0)  		return mq->irq; -	}  	mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); @@ -238,8 +239,9 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,  	mq->mmr_blade = uv_cpu_to_blade_id(cpu);  	nid = cpu_to_node(cpu); -	page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, -				pg_order); +	page = alloc_pages_exact_node(nid, +				      GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, +				      pg_order);  	if (page == NULL) {  		dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "  			"bytes of memory on nid=%d for GRU mq\n", mq_size, nid); @@ -1731,9 +1733,50 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {  	.notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,  }; +static int +xpc_init_mq_node(int nid) +{ +	int cpu; + +	get_online_cpus(); + +	for_each_cpu(cpu, cpumask_of_node(nid)) { +		xpc_activate_mq_uv = +			xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid, +					     XPC_ACTIVATE_IRQ_NAME, +					     xpc_handle_activate_IRQ_uv); +		if (!IS_ERR(xpc_activate_mq_uv)) +			break; +	} +	if (IS_ERR(xpc_activate_mq_uv)) { +		put_online_cpus(); +		return PTR_ERR(xpc_activate_mq_uv); +	} + +	for_each_cpu(cpu, cpumask_of_node(nid)) { +		xpc_notify_mq_uv = +			xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid, +					     XPC_NOTIFY_IRQ_NAME, +					     xpc_handle_notify_IRQ_uv); +		if (!IS_ERR(xpc_notify_mq_uv)) +			break; +	} +	if (IS_ERR(xpc_notify_mq_uv)) { +		xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); +		put_online_cpus(); +		return PTR_ERR(xpc_notify_mq_uv); +	} + +	put_online_cpus(); +	return 0; +} +  int  xpc_init_uv(void)  { +	int nid; +	int ret = 0; +  	xpc_arch_ops = xpc_arch_ops_uv;  	if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) { @@ -1742,21 +1785,21 @@ xpc_init_uv(void)  		return -E2BIG;  	} -	xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, -						  XPC_ACTIVATE_IRQ_NAME, -						  xpc_handle_activate_IRQ_uv); -	if (IS_ERR(xpc_activate_mq_uv)) -		return PTR_ERR(xpc_activate_mq_uv); +	if (xpc_mq_node < 0) +		for_each_online_node(nid) { +			ret = xpc_init_mq_node(nid); -	xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, -						XPC_NOTIFY_IRQ_NAME, -						xpc_handle_notify_IRQ_uv); -	if (IS_ERR(xpc_notify_mq_uv)) { -		xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); -		return PTR_ERR(xpc_notify_mq_uv); -	} +			if (!ret) +				break; +		} +	else +		ret = xpc_init_mq_node(xpc_mq_node); -	return 0; +	if (ret < 0) +		dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n", +			-ret); + +	return ret;  }  void @@ -1765,3 +1808,6 @@ xpc_exit_uv(void)  	xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);  	xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);  } + +module_param(xpc_mq_node, int, 0); +MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues."); diff --git a/drivers/misc/ti-st/st_ll.c b/drivers/misc/ti-st/st_ll.c index 1ff460a8e9c..93b4d67cc4a 100644 --- a/drivers/misc/ti-st/st_ll.c +++ b/drivers/misc/ti-st/st_ll.c @@ -87,7 +87,7 @@ static void ll_device_want_to_wakeup(struct st_data_s *st_data)  	/* communicate to platform about chip wakeup */  	kim_data = st_data->kim_data;  	pdata = kim_data->kim_pdev->dev.platform_data; -	if (pdata->chip_asleep) +	if (pdata->chip_awake)  		pdata->chip_awake(NULL);  } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 6fae5f3ec7f..d688a8af432 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -398,7 +398,7 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,  		     sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));  	skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; -	if (unlikely(netpoll_tx_running(slave_dev))) +	if (unlikely(netpoll_tx_running(bond->dev)))  		bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);  	else  		dev_queue_xmit(skb); @@ -1235,12 +1235,12 @@ static inline int slave_enable_netpoll(struct slave *slave)  	struct netpoll *np;  	int err = 0; -	np = kzalloc(sizeof(*np), GFP_KERNEL); +	np = kzalloc(sizeof(*np), GFP_ATOMIC);  	err = -ENOMEM;  	if (!np)  		goto out; -	err = __netpoll_setup(np, slave->dev); +	err = __netpoll_setup(np, slave->dev, GFP_ATOMIC);  	if (err) {  		kfree(np);  		goto out; @@ -1257,9 +1257,7 @@ static inline void slave_disable_netpoll(struct slave *slave)  		return;  	slave->np = NULL; -	synchronize_rcu_bh(); -	__netpoll_cleanup(np); -	kfree(np); +	__netpoll_free_rcu(np);  }  static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)  { @@ -1292,7 +1290,7 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)  	read_unlock(&bond->lock);  } -static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) +static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp)  {  	struct bonding *bond = netdev_priv(dev);  	struct slave *slave; diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index 0f2d1a71090..15145330940 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -174,8 +174,10 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)  	new_bus->phy_mask = ~0;  	new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); -	if (!new_bus->irq) +	if (!new_bus->irq) { +		ret = -ENOMEM;  		goto out_unmap_regs; +	}  	new_bus->parent = &ofdev->dev;  	dev_set_drvdata(&ofdev->dev, new_bus); diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index 55bb867258e..cdf702a5948 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -137,8 +137,10 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)  	snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start);  	fec->fecp = ioremap(res.start, resource_size(&res)); -	if (!fec->fecp) +	if (!fec->fecp) { +		ret = -ENOMEM;  		goto out_fec; +	}  	if (get_bus_freq) {  		clock = get_bus_freq(ofdev->dev.of_node); @@ -172,8 +174,10 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)  	new_bus->phy_mask = ~0;  	new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); -	if (!new_bus->irq) +	if (!new_bus->irq) { +		ret = -ENOMEM;  		goto out_unmap_regs; +	}  	new_bus->parent = &ofdev->dev;  	dev_set_drvdata(&ofdev->dev, new_bus); diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index 88b7b3e75ab..daf41792366 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -358,13 +358,14 @@ void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,  }  int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, -			u64 virt, int obj_size,	int nobj, int reserved, +			u64 virt, int obj_size,	u32 nobj, int reserved,  			int use_lowmem, int use_coherent)  {  	int obj_per_chunk;  	int num_icm;  	unsigned chunk_size;  	int i; +	u64 size;  	obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;  	num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; @@ -380,10 +381,12 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,  	table->coherent = use_coherent;  	mutex_init(&table->mutex); +	size = (u64) nobj * obj_size;  	for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {  		chunk_size = MLX4_TABLE_CHUNK_SIZE; -		if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size) -			chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE); +		if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > size) +			chunk_size = PAGE_ALIGN(size - +					i * MLX4_TABLE_CHUNK_SIZE);  		table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,  					       (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h index 19e4efc0b34..a67744f5350 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.h +++ b/drivers/net/ethernet/mellanox/mlx4/icm.h @@ -78,7 +78,7 @@ int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,  void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,  			  int start, int end);  int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, -			u64 virt, int obj_size,	int nobj, int reserved, +			u64 virt, int obj_size,	u32 nobj, int reserved,  			int use_lowmem, int use_coherent);  void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);  void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle); diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 4ec3835e1bc..a018ea2a43d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -432,8 +432,10 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port,  			if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {  				/* Entry already exists, add to duplicates */  				dqp = kmalloc(sizeof *dqp, GFP_KERNEL); -				if (!dqp) +				if (!dqp) { +					err = -ENOMEM;  					goto out_mailbox; +				}  				dqp->qpn = qpn;  				list_add_tail(&dqp->list, &entry->duplicates);  				found = true; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 59ebc033963..4d9df8f2a12 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -249,7 +249,7 @@ struct mlx4_bitmap {  struct mlx4_buddy {  	unsigned long	      **bits;  	unsigned int	       *num_free; -	int			max_order; +	u32			max_order;  	spinlock_t		lock;  }; @@ -258,7 +258,7 @@ struct mlx4_icm;  struct mlx4_icm_table {  	u64			virt;  	int			num_icm; -	int			num_obj; +	u32			num_obj;  	int			obj_size;  	int			lowmem;  	int			coherent; diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index af55b7ce534..c202d3ad2a0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -37,6 +37,7 @@  #include <linux/export.h>  #include <linux/slab.h>  #include <linux/kernel.h> +#include <linux/vmalloc.h>  #include <linux/mlx4/cmd.h> @@ -120,7 +121,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)  	buddy->max_order = max_order;  	spin_lock_init(&buddy->lock); -	buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), +	buddy->bits = kcalloc(buddy->max_order + 1, sizeof (long *),  			      GFP_KERNEL);  	buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,  				  GFP_KERNEL); @@ -129,10 +130,12 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)  	for (i = 0; i <= buddy->max_order; ++i) {  		s = BITS_TO_LONGS(1 << (buddy->max_order - i)); -		buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL); -		if (!buddy->bits[i]) -			goto err_out_free; -		bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i)); +		buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN); +		if (!buddy->bits[i]) { +			buddy->bits[i] = vzalloc(s * sizeof(long)); +			if (!buddy->bits[i]) +				goto err_out_free; +		}  	}  	set_bit(0, buddy->bits[buddy->max_order]); @@ -142,7 +145,10 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)  err_out_free:  	for (i = 0; i <= buddy->max_order; ++i) -		kfree(buddy->bits[i]); +		if (buddy->bits[i] && is_vmalloc_addr(buddy->bits[i])) +			vfree(buddy->bits[i]); +		else +			kfree(buddy->bits[i]);  err_out:  	kfree(buddy->bits); @@ -156,7 +162,10 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)  	int i;  	for (i = 0; i <= buddy->max_order; ++i) -		kfree(buddy->bits[i]); +		if (is_vmalloc_addr(buddy->bits[i])) +			vfree(buddy->bits[i]); +		else +			kfree(buddy->bits[i]);  	kfree(buddy->bits);  	kfree(buddy->num_free); @@ -668,7 +677,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)  		return err;  	err = mlx4_buddy_init(&mr_table->mtt_buddy, -			      ilog2(dev->caps.num_mtts / +			      ilog2((u32)dev->caps.num_mtts /  			      (1 << log_mtts_per_seg)));  	if (err)  		goto err_buddy; @@ -678,7 +687,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)  			mlx4_alloc_mtt_range(dev,  					     fls(dev->caps.reserved_mtts - 1));  		if (priv->reserved_mtts < 0) { -			mlx4_warn(dev, "MTT table of order %d is too small.\n", +			mlx4_warn(dev, "MTT table of order %u is too small.\n",  				  mr_table->mtt_buddy.max_order);  			err = -ENOMEM;  			goto err_reserve_mtts; diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c index 9ee4725363d..8e0c3cc2a1e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/profile.c +++ b/drivers/net/ethernet/mellanox/mlx4/profile.c @@ -76,7 +76,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,  		u64 size;  		u64 start;  		int type; -		int num; +		u32 num;  		int log_num;  	}; @@ -105,7 +105,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,  	si_meminfo(&si);  	request->num_mtt =  		roundup_pow_of_two(max_t(unsigned, request->num_mtt, -					 min(1UL << 31, +					 min(1UL << (31 - log_mtts_per_seg),  					     si.totalram >> (log_mtts_per_seg - 1))));  	profile[MLX4_RES_QP].size     = dev_cap->qpc_entry_sz; diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index 46df3a04030..24c2305d794 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -8,7 +8,7 @@ config SH_ETH  		(CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \  		 CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \  		 CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \ -		 CPU_SUBTYPE_SH7757 || ARCH_R8A7740) +		 CPU_SUBTYPE_SH7757 || ARCH_R8A7740 || ARCH_R8A7779)  	select CRC32  	select NET_CORE  	select MII @@ -18,4 +18,4 @@ config SH_ETH  	  Renesas SuperH Ethernet device driver.  	  This driver supporting CPUs are:  		- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757, -		  and R8A7740. +		  R8A7740 and R8A7779. diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index af0b867a6cf..bad8f2eec9b 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -78,7 +78,7 @@ static void sh_eth_select_mii(struct net_device *ndev)  #endif  /* There is CPU dependent code */ -#if defined(CONFIG_CPU_SUBTYPE_SH7724) +#if defined(CONFIG_CPU_SUBTYPE_SH7724) || defined(CONFIG_ARCH_R8A7779)  #define SH_ETH_RESET_DEFAULT	1  static void sh_eth_set_duplex(struct net_device *ndev)  { @@ -93,13 +93,18 @@ static void sh_eth_set_duplex(struct net_device *ndev)  static void sh_eth_set_rate(struct net_device *ndev)  {  	struct sh_eth_private *mdp = netdev_priv(ndev); +	unsigned int bits = ECMR_RTM; + +#if defined(CONFIG_ARCH_R8A7779) +	bits |= ECMR_ELB; +#endif  	switch (mdp->speed) {  	case 10: /* 10BASE */ -		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR); +		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~bits, ECMR);  		break;  	case 100:/* 100BASE */ -		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR); +		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | bits, ECMR);  		break;  	default:  		break; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index fd8882f9602..c136162e647 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2077,7 +2077,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,  		goto error_netdev_register;  	} -	priv->stmmac_clk = clk_get(priv->device, NULL); +	priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME);  	if (IS_ERR(priv->stmmac_clk)) {  		pr_warning("%s: warning: cannot get CSR clock\n", __func__);  		goto error_clk_get; diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 3b5c4571b55..d15c888e9df 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -538,11 +538,12 @@ EXPORT_SYMBOL_GPL(cpdma_chan_create);  int cpdma_chan_destroy(struct cpdma_chan *chan)  { -	struct cpdma_ctlr *ctlr = chan->ctlr; +	struct cpdma_ctlr *ctlr;  	unsigned long flags;  	if (!chan)  		return -EINVAL; +	ctlr = chan->ctlr;  	spin_lock_irqsave(&ctlr->lock, flags);  	if (chan->state != CPDMA_STATE_IDLE) diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c index 824e2a93fe8..5f3aeac3f86 100644 --- a/drivers/net/irda/ks959-sir.c +++ b/drivers/net/irda/ks959-sir.c @@ -542,6 +542,7 @@ static int ks959_net_open(struct net_device *netdev)  	sprintf(hwname, "usb#%d", kingsun->usbdev->devnum);  	kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname);  	if (!kingsun->irlap) { +		err = -ENOMEM;  		dev_err(&kingsun->usbdev->dev, "irlap_open failed\n");  		goto free_mem;  	} diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c index 5a278ab83c2..2d4b6a1ab20 100644 --- a/drivers/net/irda/ksdazzle-sir.c +++ b/drivers/net/irda/ksdazzle-sir.c @@ -436,6 +436,7 @@ static int ksdazzle_net_open(struct net_device *netdev)  	sprintf(hwname, "usb#%d", kingsun->usbdev->devnum);  	kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname);  	if (!kingsun->irlap) { +		err = -ENOMEM;  		dev_err(&kingsun->usbdev->dev, "irlap_open failed\n");  		goto free_mem;  	} diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index f9347ea3d38..b3321129a83 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -640,15 +640,9 @@ static int netconsole_netdev_event(struct notifier_block *this,  				 * rtnl_lock already held  				 */  				if (nt->np.dev) { -					spin_unlock_irqrestore( -							      &target_list_lock, -							      flags);  					__netpoll_cleanup(&nt->np); -					spin_lock_irqsave(&target_list_lock, -							  flags);  					dev_put(nt->np.dev);  					nt->np.dev = NULL; -					netconsole_target_put(nt);  				}  				nt->enabled = 0;  				stopped = true; diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c index 5c120189ec8..4d4d25efc1e 100644 --- a/drivers/net/phy/mdio-mux.c +++ b/drivers/net/phy/mdio-mux.c @@ -132,7 +132,7 @@ int mdio_mux_init(struct device *dev,  	pb->mii_bus = parent_bus;  	ret_val = -ENODEV; -	for_each_child_of_node(dev->of_node, child_bus_node) { +	for_each_available_child_of_node(dev->of_node, child_bus_node) {  		u32 v;  		r = of_property_read_u32(child_bus_node, "reg", &v); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 87707ab3943..341b65dbbcd 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -795,16 +795,17 @@ static void team_port_leave(struct team *team, struct team_port *port)  }  #ifdef CONFIG_NET_POLL_CONTROLLER -static int team_port_enable_netpoll(struct team *team, struct team_port *port) +static int team_port_enable_netpoll(struct team *team, struct team_port *port, +				    gfp_t gfp)  {  	struct netpoll *np;  	int err; -	np = kzalloc(sizeof(*np), GFP_KERNEL); +	np = kzalloc(sizeof(*np), gfp);  	if (!np)  		return -ENOMEM; -	err = __netpoll_setup(np, port->dev); +	err = __netpoll_setup(np, port->dev, gfp);  	if (err) {  		kfree(np);  		return err; @@ -833,7 +834,8 @@ static struct netpoll_info *team_netpoll_info(struct team *team)  }  #else -static int team_port_enable_netpoll(struct team *team, struct team_port *port) +static int team_port_enable_netpoll(struct team *team, struct team_port *port, +				    gfp_t gfp)  {  	return 0;  } @@ -913,7 +915,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)  	}  	if (team_netpoll_info(team)) { -		err = team_port_enable_netpoll(team, port); +		err = team_port_enable_netpoll(team, port, GFP_KERNEL);  		if (err) {  			netdev_err(dev, "Failed to enable netpoll on device %s\n",  				   portname); @@ -1443,7 +1445,7 @@ static void team_netpoll_cleanup(struct net_device *dev)  }  static int team_netpoll_setup(struct net_device *dev, -			      struct netpoll_info *npifo) +			      struct netpoll_info *npifo, gfp_t gfp)  {  	struct team *team = netdev_priv(dev);  	struct team_port *port; @@ -1451,7 +1453,7 @@ static int team_netpoll_setup(struct net_device *dev,  	mutex_lock(&team->lock);  	list_for_each_entry(port, &team->port_list, list) { -		err = team_port_enable_netpoll(team, port); +		err = team_port_enable_netpoll(team, port, gfp);  		if (err) {  			__team_netpoll_cleanup(team);  			break; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 2ea126a16d7..328397c6673 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -247,30 +247,12 @@ err:   */  static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf)  { -	int rv;  	struct qmi_wwan_state *info = (void *)&dev->data; -	/* ZTE makes devices where the interface descriptors and endpoint -	 * configurations of two or more interfaces are identical, even -	 * though the functions are completely different.  If set, then -	 * driver_info->data is a bitmap of acceptable interface numbers -	 * allowing us to bind to one such interface without binding to -	 * all of them -	 */ -	if (dev->driver_info->data && -	    !test_bit(intf->cur_altsetting->desc.bInterfaceNumber, &dev->driver_info->data)) { -		dev_info(&intf->dev, "not on our whitelist - ignored"); -		rv = -ENODEV; -		goto err; -	} -  	/*  control and data is shared */  	info->control = intf;  	info->data = intf; -	rv = qmi_wwan_register_subdriver(dev); - -err: -	return rv; +	return qmi_wwan_register_subdriver(dev);  }  static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf) @@ -356,214 +338,64 @@ static const struct driver_info	qmi_wwan_shared = {  	.manage_power	= qmi_wwan_manage_power,  }; -static const struct driver_info	qmi_wwan_force_int0 = { -	.description	= "Qualcomm WWAN/QMI device", -	.flags		= FLAG_WWAN, -	.bind		= qmi_wwan_bind_shared, -	.unbind		= qmi_wwan_unbind, -	.manage_power	= qmi_wwan_manage_power, -	.data		= BIT(0), /* interface whitelist bitmap */ -}; - -static const struct driver_info	qmi_wwan_force_int1 = { -	.description	= "Qualcomm WWAN/QMI device", -	.flags		= FLAG_WWAN, -	.bind		= qmi_wwan_bind_shared, -	.unbind		= qmi_wwan_unbind, -	.manage_power	= qmi_wwan_manage_power, -	.data		= BIT(1), /* interface whitelist bitmap */ -}; - -static const struct driver_info qmi_wwan_force_int2 = { -	.description	= "Qualcomm WWAN/QMI device", -	.flags		= FLAG_WWAN, -	.bind		= qmi_wwan_bind_shared, -	.unbind		= qmi_wwan_unbind, -	.manage_power	= qmi_wwan_manage_power, -	.data		= BIT(2), /* interface whitelist bitmap */ -}; - -static const struct driver_info	qmi_wwan_force_int3 = { -	.description	= "Qualcomm WWAN/QMI device", -	.flags		= FLAG_WWAN, -	.bind		= qmi_wwan_bind_shared, -	.unbind		= qmi_wwan_unbind, -	.manage_power	= qmi_wwan_manage_power, -	.data		= BIT(3), /* interface whitelist bitmap */ -}; - -static const struct driver_info	qmi_wwan_force_int4 = { -	.description	= "Qualcomm WWAN/QMI device", -	.flags		= FLAG_WWAN, -	.bind		= qmi_wwan_bind_shared, -	.unbind		= qmi_wwan_unbind, -	.manage_power	= qmi_wwan_manage_power, -	.data		= BIT(4), /* interface whitelist bitmap */ -}; - -/* Sierra Wireless provide equally useless interface descriptors - * Devices in QMI mode can be switched between two different - * configurations: - *   a) USB interface #8 is QMI/wwan - *   b) USB interfaces #8, #19 and #20 are QMI/wwan - * - * Both configurations provide a number of other interfaces (serial++), - * some of which have the same endpoint configuration as we expect, so - * a whitelist or blacklist is necessary. - * - * FIXME: The below whitelist should include BIT(20).  It does not - * because I cannot get it to work... - */ -static const struct driver_info	qmi_wwan_sierra = { -	.description	= "Sierra Wireless wwan/QMI device", -	.flags		= FLAG_WWAN, -	.bind		= qmi_wwan_bind_shared, -	.unbind		= qmi_wwan_unbind, -	.manage_power	= qmi_wwan_manage_power, -	.data		= BIT(8) | BIT(19), /* interface whitelist bitmap */ -}; -  #define HUAWEI_VENDOR_ID	0x12D1 +/* map QMI/wwan function by a fixed interface number */ +#define QMI_FIXED_INTF(vend, prod, num) \ +	USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \ +	.driver_info = (unsigned long)&qmi_wwan_shared +  /* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */  #define QMI_GOBI1K_DEVICE(vend, prod) \ -	USB_DEVICE(vend, prod), \ -	.driver_info = (unsigned long)&qmi_wwan_force_int3 +	QMI_FIXED_INTF(vend, prod, 3) -/* Gobi 2000 and Gobi 3000 QMI/wwan interface number is 0 according to qcserial */ +/* Gobi 2000/3000 QMI/wwan interface number is 0 according to qcserial */  #define QMI_GOBI_DEVICE(vend, prod) \ -	USB_DEVICE(vend, prod), \ -	.driver_info = (unsigned long)&qmi_wwan_force_int0 +	QMI_FIXED_INTF(vend, prod, 0)  static const struct usb_device_id products[] = { +	/* 1. CDC ECM like devices match on the control interface */  	{	/* Huawei E392, E398 and possibly others sharing both device id and more... */ -		.match_flags        = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, -		.idVendor           = HUAWEI_VENDOR_ID, -		.bInterfaceClass    = USB_CLASS_VENDOR_SPEC, -		.bInterfaceSubClass = 1, -		.bInterfaceProtocol = 9, /* CDC Ethernet *control* interface */ +		USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 9),  		.driver_info        = (unsigned long)&qmi_wwan_info,  	},  	{	/* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */ -		.match_flags        = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, -		.idVendor           = HUAWEI_VENDOR_ID, -		.bInterfaceClass    = USB_CLASS_VENDOR_SPEC, -		.bInterfaceSubClass = 1, -		.bInterfaceProtocol = 57, /* CDC Ethernet *control* interface */ +		USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57),  		.driver_info        = (unsigned long)&qmi_wwan_info,  	}, -	{	/* Huawei E392, E398 and possibly others in "Windows mode" -		 * using a combined control and data interface without any CDC -		 * functional descriptors -		 */ -		.match_flags        = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, -		.idVendor           = HUAWEI_VENDOR_ID, -		.bInterfaceClass    = USB_CLASS_VENDOR_SPEC, -		.bInterfaceSubClass = 1, -		.bInterfaceProtocol = 17, + +	/* 2. Combined interface devices matching on class+protocol */ +	{	/* Huawei E392, E398 and possibly others in "Windows mode" */ +		USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),  		.driver_info        = (unsigned long)&qmi_wwan_shared,  	},  	{	/* Pantech UML290 */ -		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, -		.idVendor           = 0x106c, -		.idProduct          = 0x3718, -		.bInterfaceClass    = 0xff, -		.bInterfaceSubClass = 0xf0, -		.bInterfaceProtocol = 0xff, +		USB_DEVICE_AND_INTERFACE_INFO(0x106c, 0x3718, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),  		.driver_info        = (unsigned long)&qmi_wwan_shared,  	}, -	{	/* ZTE MF820D */ -		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, -		.idVendor           = 0x19d2, -		.idProduct          = 0x0167, -		.bInterfaceClass    = 0xff, -		.bInterfaceSubClass = 0xff, -		.bInterfaceProtocol = 0xff, -		.driver_info        = (unsigned long)&qmi_wwan_force_int4, -	}, -	{	/* ZTE MF821D */ -		.match_flags        = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, -		.idVendor           = 0x19d2, -		.idProduct          = 0x0326, -		.bInterfaceClass    = 0xff, -		.bInterfaceSubClass = 0xff, -		.bInterfaceProtocol = 0xff, -		.driver_info        = (unsigned long)&qmi_wwan_force_int4, -	}, -	{	/* ZTE (Vodafone) K3520-Z */ -		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, -		.idVendor           = 0x19d2, -		.idProduct          = 0x0055, -		.bInterfaceClass    = 0xff, -		.bInterfaceSubClass = 0xff, -		.bInterfaceProtocol = 0xff, -		.driver_info        = (unsigned long)&qmi_wwan_force_int1, -	}, -	{	/* ZTE (Vodafone) K3565-Z */ -		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, -		.idVendor           = 0x19d2, -		.idProduct          = 0x0063, -		.bInterfaceClass    = 0xff, -		.bInterfaceSubClass = 0xff, -		.bInterfaceProtocol = 0xff, -		.driver_info        = (unsigned long)&qmi_wwan_force_int4, -	}, -	{	/* ZTE (Vodafone) K3570-Z */ -		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, -		.idVendor           = 0x19d2, -		.idProduct          = 0x1008, -		.bInterfaceClass    = 0xff, -		.bInterfaceSubClass = 0xff, -		.bInterfaceProtocol = 0xff, -		.driver_info        = (unsigned long)&qmi_wwan_force_int4, -	}, -	{	/* ZTE (Vodafone) K3571-Z */ -		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, -		.idVendor           = 0x19d2, -		.idProduct          = 0x1010, -		.bInterfaceClass    = 0xff, -		.bInterfaceSubClass = 0xff, -		.bInterfaceProtocol = 0xff, -		.driver_info        = (unsigned long)&qmi_wwan_force_int4, -	}, -	{	/* ZTE (Vodafone) K3765-Z */ -		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, -		.idVendor           = 0x19d2, -		.idProduct          = 0x2002, -		.bInterfaceClass    = 0xff, -		.bInterfaceSubClass = 0xff, -		.bInterfaceProtocol = 0xff, -		.driver_info        = (unsigned long)&qmi_wwan_force_int4, -	}, -	{	/* ZTE (Vodafone) K4505-Z */ -		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, -		.idVendor           = 0x19d2, -		.idProduct          = 0x0104, -		.bInterfaceClass    = 0xff, -		.bInterfaceSubClass = 0xff, -		.bInterfaceProtocol = 0xff, -		.driver_info        = (unsigned long)&qmi_wwan_force_int4, -	}, -	{	/* ZTE MF60 */ -		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, -		.idVendor           = 0x19d2, -		.idProduct          = 0x1402, -		.bInterfaceClass    = 0xff, -		.bInterfaceSubClass = 0xff, -		.bInterfaceProtocol = 0xff, -		.driver_info        = (unsigned long)&qmi_wwan_force_int2, -	}, -	{	/* Sierra Wireless MC77xx in QMI mode */ -		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, -		.idVendor           = 0x1199, -		.idProduct          = 0x68a2, -		.bInterfaceClass    = 0xff, -		.bInterfaceSubClass = 0xff, -		.bInterfaceProtocol = 0xff, -		.driver_info        = (unsigned long)&qmi_wwan_sierra, +	{	/* Pantech UML290 - newer firmware */ +		USB_DEVICE_AND_INTERFACE_INFO(0x106c, 0x3718, USB_CLASS_VENDOR_SPEC, 0xf1, 0xff), +		.driver_info        = (unsigned long)&qmi_wwan_shared,  	}, -	/* Gobi 1000 devices */ +	/* 3. Combined interface devices matching on interface number */ +	{QMI_FIXED_INTF(0x19d2, 0x0055, 1)},	/* ZTE (Vodafone) K3520-Z */ +	{QMI_FIXED_INTF(0x19d2, 0x0063, 4)},	/* ZTE (Vodafone) K3565-Z */ +	{QMI_FIXED_INTF(0x19d2, 0x0104, 4)},	/* ZTE (Vodafone) K4505-Z */ +	{QMI_FIXED_INTF(0x19d2, 0x0167, 4)},	/* ZTE MF820D */ +	{QMI_FIXED_INTF(0x19d2, 0x0326, 4)},	/* ZTE MF821D */ +	{QMI_FIXED_INTF(0x19d2, 0x1008, 4)},	/* ZTE (Vodafone) K3570-Z */ +	{QMI_FIXED_INTF(0x19d2, 0x1010, 4)},	/* ZTE (Vodafone) K3571-Z */ +	{QMI_FIXED_INTF(0x19d2, 0x1018, 3)},	/* ZTE (Vodafone) K5006-Z */ +	{QMI_FIXED_INTF(0x19d2, 0x1402, 2)},	/* ZTE MF60 */ +	{QMI_FIXED_INTF(0x19d2, 0x2002, 4)},	/* ZTE (Vodafone) K3765-Z */ +	{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */ +	{QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */ +	{QMI_FIXED_INTF(0x1199, 0x68a2, 8)},	/* Sierra Wireless MC7710 in QMI mode */ +	{QMI_FIXED_INTF(0x1199, 0x68a2, 19)},	/* Sierra Wireless MC7710 in QMI mode */ +	{QMI_FIXED_INTF(0x1199, 0x901c, 8)},    /* Sierra Wireless EM7700 */ + +	/* 4. Gobi 1000 devices */  	{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},	/* Acer Gobi Modem Device */  	{QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)},	/* HP un2400 Gobi Modem Device */  	{QMI_GOBI1K_DEVICE(0x03f0, 0x371d)},	/* HP un2430 Mobile Broadband Module */ @@ -579,7 +411,7 @@ static const struct usb_device_id products[] = {  	{QMI_GOBI1K_DEVICE(0x05c6, 0x9222)},	/* Generic Gobi Modem device */  	{QMI_GOBI1K_DEVICE(0x05c6, 0x9009)},	/* Generic Gobi Modem device */ -	/* Gobi 2000 and 3000 devices */ +	/* 5. Gobi 2000 and 3000 devices */  	{QMI_GOBI_DEVICE(0x413c, 0x8186)},	/* Dell Gobi 2000 Modem device (N0218, VU936) */  	{QMI_GOBI_DEVICE(0x05c6, 0x920b)},	/* Generic Gobi 2000 Modem device */  	{QMI_GOBI_DEVICE(0x05c6, 0x9225)},	/* Sony Gobi 2000 Modem device (N0279, VU730) */ @@ -589,6 +421,8 @@ static const struct usb_device_id products[] = {  	{QMI_GOBI_DEVICE(0x05c6, 0x9265)},	/* Asus Gobi 2000 Modem device (VR305) */  	{QMI_GOBI_DEVICE(0x05c6, 0x9235)},	/* Top Global Gobi 2000 Modem device (VR306) */  	{QMI_GOBI_DEVICE(0x05c6, 0x9275)},	/* iRex Technologies Gobi 2000 Modem device (VR307) */ +	{QMI_GOBI_DEVICE(0x1199, 0x68a5)},	/* Sierra Wireless Modem */ +	{QMI_GOBI_DEVICE(0x1199, 0x68a9)},	/* Sierra Wireless Modem */  	{QMI_GOBI_DEVICE(0x1199, 0x9001)},	/* Sierra Wireless Gobi 2000 Modem device (VT773) */  	{QMI_GOBI_DEVICE(0x1199, 0x9002)},	/* Sierra Wireless Gobi 2000 Modem device (VT773) */  	{QMI_GOBI_DEVICE(0x1199, 0x9003)},	/* Sierra Wireless Gobi 2000 Modem device (VT773) */ @@ -600,11 +434,14 @@ static const struct usb_device_id products[] = {  	{QMI_GOBI_DEVICE(0x1199, 0x9009)},	/* Sierra Wireless Gobi 2000 Modem device (VT773) */  	{QMI_GOBI_DEVICE(0x1199, 0x900a)},	/* Sierra Wireless Gobi 2000 Modem device (VT773) */  	{QMI_GOBI_DEVICE(0x1199, 0x9011)},	/* Sierra Wireless Gobi 2000 Modem device (MC8305) */ +	{QMI_FIXED_INTF(0x1199, 0x9011, 5)},	/* alternate interface number!? */  	{QMI_GOBI_DEVICE(0x16d8, 0x8002)},	/* CMDTech Gobi 2000 Modem device (VU922) */  	{QMI_GOBI_DEVICE(0x05c6, 0x9205)},	/* Gobi 2000 Modem device */  	{QMI_GOBI_DEVICE(0x1199, 0x9013)},	/* Sierra Wireless Gobi 3000 Modem device (MC8355) */  	{QMI_GOBI_DEVICE(0x1199, 0x9015)},	/* Sierra Wireless Gobi 3000 Modem device */  	{QMI_GOBI_DEVICE(0x1199, 0x9019)},	/* Sierra Wireless Gobi 3000 Modem device */ +	{QMI_GOBI_DEVICE(0x1199, 0x901b)},	/* Sierra Wireless MC7770 */ +  	{ }					/* END */  };  MODULE_DEVICE_TABLE(usb, products); diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index d75d1f56bec..7be49ea60b6 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c @@ -68,15 +68,8 @@ static	atomic_t iface_counter = ATOMIC_INIT(0);   */  #define SIERRA_NET_USBCTL_BUF_LEN	1024 -/* list of interface numbers - used for constructing interface lists */ -struct sierra_net_iface_info { -	const u32 infolen;	/* number of interface numbers on list */ -	const u8  *ifaceinfo;	/* pointer to the array holding the numbers */ -}; -  struct sierra_net_info_data {  	u16 rx_urb_size; -	struct sierra_net_iface_info whitelist;  };  /* Private data structure */ @@ -637,21 +630,6 @@ static int sierra_net_change_mtu(struct net_device *net, int new_mtu)  	return usbnet_change_mtu(net, new_mtu);  } -static int is_whitelisted(const u8 ifnum, -			const struct sierra_net_iface_info *whitelist) -{ -	if (whitelist) { -		const u8 *list = whitelist->ifaceinfo; -		int i; - -		for (i = 0; i < whitelist->infolen; i++) { -			if (list[i] == ifnum) -				return 1; -		} -	} -	return 0; -} -  static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)  {  	int result = 0; @@ -706,11 +684,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)  	dev_dbg(&dev->udev->dev, "%s", __func__);  	ifacenum = intf->cur_altsetting->desc.bInterfaceNumber; -	/* We only accept certain interfaces */ -	if (!is_whitelisted(ifacenum, &data->whitelist)) { -		dev_dbg(&dev->udev->dev, "Ignoring interface: %d", ifacenum); -		return -ENODEV; -	}  	numendpoints = intf->cur_altsetting->desc.bNumEndpoints;  	/* We have three endpoints, bulk in and out, and a status */  	if (numendpoints != 3) { @@ -945,13 +918,8 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,  	return NULL;  } -static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };  static const struct sierra_net_info_data sierra_net_info_data_direct_ip = {  	.rx_urb_size = 8 * 1024, -	.whitelist = { -		.infolen = ARRAY_SIZE(sierra_net_ifnum_list), -		.ifaceinfo = sierra_net_ifnum_list -	}  };  static const struct driver_info sierra_net_info_direct_ip = { @@ -965,15 +933,19 @@ static const struct driver_info sierra_net_info_direct_ip = {  	.data = (unsigned long)&sierra_net_info_data_direct_ip,  }; +#define DIRECT_IP_DEVICE(vend, prod) \ +	{USB_DEVICE_INTERFACE_NUMBER(vend, prod, 7), \ +	.driver_info = (unsigned long)&sierra_net_info_direct_ip}, \ +	{USB_DEVICE_INTERFACE_NUMBER(vend, prod, 10), \ +	.driver_info = (unsigned long)&sierra_net_info_direct_ip}, \ +	{USB_DEVICE_INTERFACE_NUMBER(vend, prod, 11), \ +	.driver_info = (unsigned long)&sierra_net_info_direct_ip} +  static const struct usb_device_id products[] = { -	{USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ -	.driver_info = (unsigned long) &sierra_net_info_direct_ip}, -	{USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */ -	.driver_info = (unsigned long) &sierra_net_info_direct_ip}, -	{USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */ -	.driver_info = (unsigned long) &sierra_net_info_direct_ip}, -	{USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */ -	.driver_info = (unsigned long) &sierra_net_info_direct_ip}, +	DIRECT_IP_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ +	DIRECT_IP_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */ +	DIRECT_IP_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */ +	DIRECT_IP_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */  	{}, /* last item */  }; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 93e0cfb739b..ce9d4f2c977 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -3019,6 +3019,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,  	netdev->watchdog_timeo = 5 * HZ;  	INIT_WORK(&adapter->work, vmxnet3_reset_work); +	set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);  	if (adapter->intr.type == VMXNET3_IT_MSIX) {  		int i; @@ -3043,7 +3044,6 @@ vmxnet3_probe_device(struct pci_dev *pdev,  		goto err_register;  	} -	set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);  	vmxnet3_check_link(adapter, false);  	atomic_inc(&devices_found);  	return 0; diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index 9eb6479306d..ef36cafd44b 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -774,14 +774,15 @@ static int __devinit dscc4_init_one(struct pci_dev *pdev,  	}  	/* Global interrupt queue */  	writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1); + +	rc = -ENOMEM; +  	priv->iqcfg = (__le32 *) pci_alloc_consistent(pdev,  		IRQ_RING_SIZE*sizeof(__le32), &priv->iqcfg_dma);  	if (!priv->iqcfg)  		goto err_free_irq_5;  	writel(priv->iqcfg_dma, ioaddr + IQCFG); -	rc = -ENOMEM; -  	/*  	 * SCC 0-3 private rx/tx irq structures  	 * IQRX/TXi needs to be set soon. Learned it the hard way... diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c index 283237f6f07..def12b38cbf 100644 --- a/drivers/net/wimax/i2400m/fw.c +++ b/drivers/net/wimax/i2400m/fw.c @@ -326,8 +326,10 @@ int i2400m_barker_db_init(const char *_options)  		unsigned barker;  		options_orig = kstrdup(_options, GFP_KERNEL); -		if (options_orig == NULL) +		if (options_orig == NULL) { +			result = -ENOMEM;  			goto error_parse; +		}  		options = options_orig;  		while ((token = strsep(&options, ",")) != NULL) { diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c index efc162e0b51..88b8d64c90f 100644 --- a/drivers/net/wireless/at76c50x-usb.c +++ b/drivers/net/wireless/at76c50x-usb.c @@ -342,7 +342,7 @@ static int at76_dfu_get_status(struct usb_device *udev,  	return ret;  } -static u8 at76_dfu_get_state(struct usb_device *udev, u8 *state) +static int at76_dfu_get_state(struct usb_device *udev, u8 *state)  {  	int ret; diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 8c4c040a47b..2aab20ee9f3 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -2056,9 +2056,7 @@ ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf)  void  ath5k_beacon_config(struct ath5k_hw *ah)  { -	unsigned long flags; - -	spin_lock_irqsave(&ah->block, flags); +	spin_lock_bh(&ah->block);  	ah->bmisscount = 0;  	ah->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA); @@ -2085,7 +2083,7 @@ ath5k_beacon_config(struct ath5k_hw *ah)  	ath5k_hw_set_imr(ah, ah->imask);  	mmiowb(); -	spin_unlock_irqrestore(&ah->block, flags); +	spin_unlock_bh(&ah->block);  }  static void ath5k_tasklet_beacon(unsigned long data) diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c index 260e7dc7f75..d56453e43d7 100644 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c @@ -254,7 +254,6 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,  	struct ath5k_vif *avf = (void *)vif->drv_priv;  	struct ath5k_hw *ah = hw->priv;  	struct ath_common *common = ath5k_hw_common(ah); -	unsigned long flags;  	mutex_lock(&ah->lock); @@ -300,9 +299,9 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,  	}  	if (changes & BSS_CHANGED_BEACON) { -		spin_lock_irqsave(&ah->block, flags); +		spin_lock_bh(&ah->block);  		ath5k_beacon_update(hw, vif); -		spin_unlock_irqrestore(&ah->block, flags); +		spin_unlock_bh(&ah->block);  	}  	if (changes & BSS_CHANGED_BEACON_ENABLED) diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index 7990cd55599..b42be910a83 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -773,15 +773,10 @@ bool ath9k_hw_intrpend(struct ath_hw *ah)  }  EXPORT_SYMBOL(ath9k_hw_intrpend); -void ath9k_hw_disable_interrupts(struct ath_hw *ah) +void ath9k_hw_kill_interrupts(struct ath_hw *ah)  {  	struct ath_common *common = ath9k_hw_common(ah); -	if (!(ah->imask & ATH9K_INT_GLOBAL)) -		atomic_set(&ah->intr_ref_cnt, -1); -	else -		atomic_dec(&ah->intr_ref_cnt); -  	ath_dbg(common, INTERRUPT, "disable IER\n");  	REG_WRITE(ah, AR_IER, AR_IER_DISABLE);  	(void) REG_READ(ah, AR_IER); @@ -793,6 +788,17 @@ void ath9k_hw_disable_interrupts(struct ath_hw *ah)  		(void) REG_READ(ah, AR_INTR_SYNC_ENABLE);  	}  } +EXPORT_SYMBOL(ath9k_hw_kill_interrupts); + +void ath9k_hw_disable_interrupts(struct ath_hw *ah) +{ +	if (!(ah->imask & ATH9K_INT_GLOBAL)) +		atomic_set(&ah->intr_ref_cnt, -1); +	else +		atomic_dec(&ah->intr_ref_cnt); + +	ath9k_hw_kill_interrupts(ah); +}  EXPORT_SYMBOL(ath9k_hw_disable_interrupts);  void ath9k_hw_enable_interrupts(struct ath_hw *ah) diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h index 0eba36dca6f..4a745e68dd9 100644 --- a/drivers/net/wireless/ath/ath9k/mac.h +++ b/drivers/net/wireless/ath/ath9k/mac.h @@ -738,6 +738,7 @@ bool ath9k_hw_intrpend(struct ath_hw *ah);  void ath9k_hw_set_interrupts(struct ath_hw *ah);  void ath9k_hw_enable_interrupts(struct ath_hw *ah);  void ath9k_hw_disable_interrupts(struct ath_hw *ah); +void ath9k_hw_kill_interrupts(struct ath_hw *ah);  void ar9002_hw_attach_mac_ops(struct ath_hw *ah); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 6049d8b8285..a22df749b8d 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -462,8 +462,10 @@ irqreturn_t ath_isr(int irq, void *dev)  	if (!ath9k_hw_intrpend(ah))  		return IRQ_NONE; -	if(test_bit(SC_OP_HW_RESET, &sc->sc_flags)) +	if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) { +		ath9k_hw_kill_interrupts(ah);  		return IRQ_HANDLED; +	}  	/*  	 * Figure out the reason(s) for the interrupt.  Note diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index d455de9162e..a978984d78a 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -321,6 +321,7 @@ static int ath_pci_suspend(struct device *device)  	 * Otherwise the chip never moved to full sleep,  	 * when no interface is up.  	 */ +	ath9k_stop_btcoex(sc);  	ath9k_hw_disable(sc->sc_ah);  	ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP); diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 12aca02228c..4480c0cc655 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -1044,7 +1044,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)  	struct ieee80211_hw *hw = sc->hw;  	struct ieee80211_hdr *hdr;  	int retval; -	bool decrypt_error = false;  	struct ath_rx_status rs;  	enum ath9k_rx_qtype qtype;  	bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); @@ -1066,6 +1065,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)  	tsf_lower = tsf & 0xffffffff;  	do { +		bool decrypt_error = false;  		/* If handling rx interrupt and flush is in progress => exit */  		if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags) && (flush == 0))  			break; diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index 7f207b6e955..effb044a8a9 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -42,7 +42,7 @@ MODULE_FIRMWARE("isl3887usb");   * whenever you add a new device.   */ -static struct usb_device_id p54u_table[] __devinitdata = { +static struct usb_device_id p54u_table[] = {  	/* Version 1 devices (pci chip + net2280) */  	{USB_DEVICE(0x0411, 0x0050)},	/* Buffalo WLI2-USB2-G54 */  	{USB_DEVICE(0x045e, 0x00c2)},	/* Microsoft MN-710 */ diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 241162e8111..7a4ae9ee1c6 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -1803,6 +1803,7 @@ static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev,  						struct cfg80211_pmksa *pmksa,  						int max_pmkids)  { +	struct ndis_80211_pmkid *new_pmkids;  	int i, err, newlen;  	unsigned int count; @@ -1833,11 +1834,12 @@ static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev,  	/* add new pmkid */  	newlen = sizeof(*pmkids) + (count + 1) * sizeof(pmkids->bssid_info[0]); -	pmkids = krealloc(pmkids, newlen, GFP_KERNEL); -	if (!pmkids) { +	new_pmkids = krealloc(pmkids, newlen, GFP_KERNEL); +	if (!new_pmkids) {  		err = -ENOMEM;  		goto error;  	} +	pmkids = new_pmkids;  	pmkids->length = cpu_to_le32(newlen);  	pmkids->bssid_info_count = cpu_to_le32(count + 1); diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c index 71a30b02608..533024095c4 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c @@ -44,7 +44,7 @@ MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");  MODULE_DESCRIPTION("RTL8187/RTL8187B USB wireless driver");  MODULE_LICENSE("GPL"); -static struct usb_device_id rtl8187_table[] __devinitdata = { +static struct usb_device_id rtl8187_table[] = {  	/* Asus */  	{USB_DEVICE(0x0b05, 0x171d), .driver_info = DEVICE_RTL8187},  	/* Belkin */ diff --git a/drivers/of/base.c b/drivers/of/base.c index c181b94abc3..d4a1c9a043e 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -364,6 +364,33 @@ struct device_node *of_get_next_child(const struct device_node *node,  EXPORT_SYMBOL(of_get_next_child);  /** + *	of_get_next_available_child - Find the next available child node + *	@node:	parent node + *	@prev:	previous child of the parent node, or NULL to get first + * + *      This function is like of_get_next_child(), except that it + *      automatically skips any disabled nodes (i.e. status = "disabled"). + */ +struct device_node *of_get_next_available_child(const struct device_node *node, +	struct device_node *prev) +{ +	struct device_node *next; + +	read_lock(&devtree_lock); +	next = prev ? prev->sibling : node->child; +	for (; next; next = next->sibling) { +		if (!of_device_is_available(next)) +			continue; +		if (of_node_get(next)) +			break; +	} +	of_node_put(prev); +	read_unlock(&devtree_lock); +	return next; +} +EXPORT_SYMBOL(of_get_next_available_child); + +/**   *	of_find_node_by_path - Find a node matching a full OF path   *	@path:	The full path to match   * diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index fbf7b26c7c8..c5792d622dc 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -266,8 +266,8 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)  	}  	if (!error) -		dev_printk(KERN_INFO, &dev->dev, -				"power state changed by ACPI to D%d\n", state); +		dev_info(&dev->dev, "power state changed by ACPI to %s\n", +			 pci_power_name(state));  	return error;  } diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 185be370334..5270f1a9932 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -959,6 +959,13 @@ static int pci_pm_poweroff_noirq(struct device *dev)  	if (!pci_dev->state_saved && !pci_is_bridge(pci_dev))  		pci_prepare_to_sleep(pci_dev); +	/* +	 * The reason for doing this here is the same as for the analogous code +	 * in pci_pm_suspend_noirq(). +	 */ +	if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI) +		pci_write_config_word(pci_dev, PCI_COMMAND, 0); +  	return 0;  } diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index fb7f3bebdc6..dc5c126e398 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -657,11 +657,7 @@ static struct pinctrl *pinctrl_get_locked(struct device *dev)  	if (p != NULL)  		return ERR_PTR(-EBUSY); -	p = create_pinctrl(dev); -	if (IS_ERR(p)) -		return p; - -	return p; +	return create_pinctrl(dev);  }  /** @@ -738,11 +734,8 @@ static struct pinctrl_state *pinctrl_lookup_state_locked(struct pinctrl *p,  			dev_dbg(p->dev, "using pinctrl dummy state (%s)\n",  				name);  			state = create_state(p, name); -			if (IS_ERR(state)) -				return state; -		} else { -			return ERR_PTR(-ENODEV); -		} +		} else +			state = ERR_PTR(-ENODEV);  	}  	return state; diff --git a/drivers/pinctrl/pinctrl-imx51.c b/drivers/pinctrl/pinctrl-imx51.c index 689b3c88dd2..9fd02162a3c 100644 --- a/drivers/pinctrl/pinctrl-imx51.c +++ b/drivers/pinctrl/pinctrl-imx51.c @@ -974,7 +974,7 @@ static struct imx_pin_reg imx51_pin_regs[] = {  	IMX_PIN_REG(MX51_PAD_EIM_DA13, NO_PAD, 0x050, 0, 0x000, 0), /* MX51_PAD_EIM_DA13__EIM_DA13 */  	IMX_PIN_REG(MX51_PAD_EIM_DA14, NO_PAD, 0x054, 0, 0x000, 0), /* MX51_PAD_EIM_DA14__EIM_DA14 */  	IMX_PIN_REG(MX51_PAD_EIM_DA15, NO_PAD, 0x058, 0, 0x000, 0), /* MX51_PAD_EIM_DA15__EIM_DA15 */ -	IMX_PIN_REG(MX51_PAD_SD2_CMD, NO_PAD, 0x3b4, 2, 0x91c, 3), /* MX51_PAD_SD2_CMD__CSPI_MOSI */ +	IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 2, 0x91c, 3), /* MX51_PAD_SD2_CMD__CSPI_MOSI */  	IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 1, 0x9b0, 2), /* MX51_PAD_SD2_CMD__I2C1_SCL */  	IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 0, 0x000, 0), /* MX51_PAD_SD2_CMD__SD2_CMD */  	IMX_PIN_REG(MX51_PAD_SD2_CLK, 0x7c0, 0x3b8, 2, 0x914, 3), /* MX51_PAD_SD2_CLK__CSPI_SCLK */ diff --git a/drivers/pinctrl/pinctrl-nomadik-db8500.c b/drivers/pinctrl/pinctrl-nomadik-db8500.c index 5f3e9d0221e..a39fb7a6fc5 100644 --- a/drivers/pinctrl/pinctrl-nomadik-db8500.c +++ b/drivers/pinctrl/pinctrl-nomadik-db8500.c @@ -505,6 +505,8 @@ static const unsigned kp_b_1_pins[] = { DB8500_PIN_F3, DB8500_PIN_F1,  	DB8500_PIN_J3, DB8500_PIN_H2, DB8500_PIN_J2, DB8500_PIN_H1,  	DB8500_PIN_F4, DB8500_PIN_E3, DB8500_PIN_E4, DB8500_PIN_D2,  	DB8500_PIN_C1, DB8500_PIN_D3, DB8500_PIN_C2, DB8500_PIN_D5 }; +static const unsigned kp_b_2_pins[] = { DB8500_PIN_F3, DB8500_PIN_F1, +	DB8500_PIN_G3, DB8500_PIN_G2, DB8500_PIN_F4, DB8500_PIN_E3};  static const unsigned sm_b_1_pins[] = { DB8500_PIN_C6, DB8500_PIN_B3,  	DB8500_PIN_C4, DB8500_PIN_E6, DB8500_PIN_A3, DB8500_PIN_B6,  	DB8500_PIN_D6, DB8500_PIN_B7, DB8500_PIN_D7, DB8500_PIN_D8, @@ -662,6 +664,7 @@ static const struct nmk_pingroup nmk_db8500_groups[] = {  	DB8500_PIN_GROUP(spi3_b_1, NMK_GPIO_ALT_B),  	DB8500_PIN_GROUP(msp1txrx_b_1, NMK_GPIO_ALT_B),  	DB8500_PIN_GROUP(kp_b_1, NMK_GPIO_ALT_B), +	DB8500_PIN_GROUP(kp_b_2, NMK_GPIO_ALT_B),  	DB8500_PIN_GROUP(sm_b_1, NMK_GPIO_ALT_B),  	DB8500_PIN_GROUP(smcs0_b_1, NMK_GPIO_ALT_B),  	DB8500_PIN_GROUP(smcs1_b_1, NMK_GPIO_ALT_B), @@ -751,7 +754,7 @@ DB8500_FUNC_GROUPS(msp1, "msp1txrx_a_1", "msp1_a_1", "msp1txrx_b_1");  DB8500_FUNC_GROUPS(lcdb, "lcdb_a_1");  DB8500_FUNC_GROUPS(lcd, "lcdvsi0_a_1", "lcdvsi1_a_1", "lcd_d0_d7_a_1",  	"lcd_d8_d11_a_1", "lcd_d12_d23_a_1", "lcd_b_1"); -DB8500_FUNC_GROUPS(kp, "kp_a_1", "kp_b_1", "kp_c_1", "kp_oc1_1"); +DB8500_FUNC_GROUPS(kp, "kp_a_1", "kp_b_1", "kp_b_2", "kp_c_1", "kp_oc1_1");  DB8500_FUNC_GROUPS(mc2, "mc2_a_1", "mc2rstn_c_1");  DB8500_FUNC_GROUPS(ssp1, "ssp1_a_1");  DB8500_FUNC_GROUPS(ssp0, "ssp0_a_1"); diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c index ec6ac501b23..3dde6537adb 100644 --- a/drivers/pinctrl/pinctrl-nomadik.c +++ b/drivers/pinctrl/pinctrl-nomadik.c @@ -1292,7 +1292,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)  						NOMADIK_GPIO_TO_IRQ(pdata->first_gpio),  						0, &nmk_gpio_irq_simple_ops, nmk_chip);  	if (!nmk_chip->domain) { -		pr_err("%s: Failed to create irqdomain\n", np->full_name); +		dev_err(&dev->dev, "failed to create irqdomain\n");  		ret = -ENOSYS;  		goto out;  	} diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 2a262f5c5c0..c86bae828c2 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -289,6 +289,7 @@ config IDEAPAD_LAPTOP  	tristate "Lenovo IdeaPad Laptop Extras"  	depends on ACPI  	depends on RFKILL && INPUT +	depends on SERIO_I8042  	select INPUT_SPARSEKMAP  	help  	  This is a driver for the rfkill switches on Lenovo IdeaPad netbooks. @@ -758,8 +759,11 @@ config SAMSUNG_Q10  config APPLE_GMUX  	tristate "Apple Gmux Driver" +	depends on ACPI  	depends on PNP -	select BACKLIGHT_CLASS_DEVICE +	depends on BACKLIGHT_CLASS_DEVICE +	depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE +	depends on ACPI_VIDEO=n || ACPI_VIDEO  	---help---  	  This driver provides support for the gmux device found on many  	  Apple laptops, which controls the display mux for the hybrid diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c index 905fa01ac8d..dfb1a92ce94 100644 --- a/drivers/platform/x86/apple-gmux.c +++ b/drivers/platform/x86/apple-gmux.c @@ -2,6 +2,7 @@   *  Gmux driver for Apple laptops   *   *  Copyright (C) Canonical Ltd. <seth.forshee@canonical.com> + *  Copyright (C) 2010-2012 Andreas Heider <andreas@meetr.de>   *   *  This program is free software; you can redistribute it and/or modify   *  it under the terms of the GNU General Public License version 2 as @@ -18,16 +19,30 @@  #include <linux/pnp.h>  #include <linux/apple_bl.h>  #include <linux/slab.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/vga_switcheroo.h>  #include <acpi/video.h>  #include <asm/io.h>  struct apple_gmux_data {  	unsigned long iostart;  	unsigned long iolen; +	bool indexed; +	struct mutex index_lock;  	struct backlight_device *bdev; + +	/* switcheroo data */ +	acpi_handle dhandle; +	int gpe; +	enum vga_switcheroo_client_id resume_client_id; +	enum vga_switcheroo_state power_state; +	struct completion powerchange_done;  }; +static struct apple_gmux_data *apple_gmux_data; +  /*   * gmux port offsets. Many of these are not yet used, but may be in the   * future, and it's useful to have them documented here anyhow. @@ -45,6 +60,9 @@ struct apple_gmux_data {  #define GMUX_PORT_DISCRETE_POWER	0x50  #define GMUX_PORT_MAX_BRIGHTNESS	0x70  #define GMUX_PORT_BRIGHTNESS		0x74 +#define GMUX_PORT_VALUE			0xc2 +#define GMUX_PORT_READ			0xd0 +#define GMUX_PORT_WRITE			0xd4  #define GMUX_MIN_IO_LEN			(GMUX_PORT_BRIGHTNESS + 4) @@ -59,22 +77,172 @@ struct apple_gmux_data {  #define GMUX_BRIGHTNESS_MASK		0x00ffffff  #define GMUX_MAX_BRIGHTNESS		GMUX_BRIGHTNESS_MASK -static inline u8 gmux_read8(struct apple_gmux_data *gmux_data, int port) +static u8 gmux_pio_read8(struct apple_gmux_data *gmux_data, int port)  {  	return inb(gmux_data->iostart + port);  } -static inline void gmux_write8(struct apple_gmux_data *gmux_data, int port, +static void gmux_pio_write8(struct apple_gmux_data *gmux_data, int port,  			       u8 val)  {  	outb(val, gmux_data->iostart + port);  } -static inline u32 gmux_read32(struct apple_gmux_data *gmux_data, int port) +static u32 gmux_pio_read32(struct apple_gmux_data *gmux_data, int port)  {  	return inl(gmux_data->iostart + port);  } +static void gmux_pio_write32(struct apple_gmux_data *gmux_data, int port, +			     u32 val) +{ +	int i; +	u8 tmpval; + +	for (i = 0; i < 4; i++) { +		tmpval = (val >> (i * 8)) & 0xff; +		outb(tmpval, port + i); +	} +} + +static int gmux_index_wait_ready(struct apple_gmux_data *gmux_data) +{ +	int i = 200; +	u8 gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); + +	while (i && (gwr & 0x01)) { +		inb(gmux_data->iostart + GMUX_PORT_READ); +		gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); +		udelay(100); +		i--; +	} + +	return !!i; +} + +static int gmux_index_wait_complete(struct apple_gmux_data *gmux_data) +{ +	int i = 200; +	u8 gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); + +	while (i && !(gwr & 0x01)) { +		gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); +		udelay(100); +		i--; +	} + +	if (gwr & 0x01) +		inb(gmux_data->iostart + GMUX_PORT_READ); + +	return !!i; +} + +static u8 gmux_index_read8(struct apple_gmux_data *gmux_data, int port) +{ +	u8 val; + +	mutex_lock(&gmux_data->index_lock); +	outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ); +	gmux_index_wait_ready(gmux_data); +	val = inb(gmux_data->iostart + GMUX_PORT_VALUE); +	mutex_unlock(&gmux_data->index_lock); + +	return val; +} + +static void gmux_index_write8(struct apple_gmux_data *gmux_data, int port, +			      u8 val) +{ +	mutex_lock(&gmux_data->index_lock); +	outb(val, gmux_data->iostart + GMUX_PORT_VALUE); +	gmux_index_wait_ready(gmux_data); +	outb(port & 0xff, gmux_data->iostart + GMUX_PORT_WRITE); +	gmux_index_wait_complete(gmux_data); +	mutex_unlock(&gmux_data->index_lock); +} + +static u32 gmux_index_read32(struct apple_gmux_data *gmux_data, int port) +{ +	u32 val; + +	mutex_lock(&gmux_data->index_lock); +	outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ); +	gmux_index_wait_ready(gmux_data); +	val = inl(gmux_data->iostart + GMUX_PORT_VALUE); +	mutex_unlock(&gmux_data->index_lock); + +	return val; +} + +static void gmux_index_write32(struct apple_gmux_data *gmux_data, int port, +			       u32 val) +{ +	int i; +	u8 tmpval; + +	mutex_lock(&gmux_data->index_lock); + +	for (i = 0; i < 4; i++) { +		tmpval = (val >> (i * 8)) & 0xff; +		outb(tmpval, gmux_data->iostart + GMUX_PORT_VALUE + i); +	} + +	gmux_index_wait_ready(gmux_data); +	outb(port & 0xff, gmux_data->iostart + GMUX_PORT_WRITE); +	gmux_index_wait_complete(gmux_data); +	mutex_unlock(&gmux_data->index_lock); +} + +static u8 gmux_read8(struct apple_gmux_data *gmux_data, int port) +{ +	if (gmux_data->indexed) +		return gmux_index_read8(gmux_data, port); +	else +		return gmux_pio_read8(gmux_data, port); +} + +static void gmux_write8(struct apple_gmux_data *gmux_data, int port, u8 val) +{ +	if (gmux_data->indexed) +		gmux_index_write8(gmux_data, port, val); +	else +		gmux_pio_write8(gmux_data, port, val); +} + +static u32 gmux_read32(struct apple_gmux_data *gmux_data, int port) +{ +	if (gmux_data->indexed) +		return gmux_index_read32(gmux_data, port); +	else +		return gmux_pio_read32(gmux_data, port); +} + +static void gmux_write32(struct apple_gmux_data *gmux_data, int port, +			     u32 val) +{ +	if (gmux_data->indexed) +		gmux_index_write32(gmux_data, port, val); +	else +		gmux_pio_write32(gmux_data, port, val); +} + +static bool gmux_is_indexed(struct apple_gmux_data *gmux_data) +{ +	u16 val; + +	outb(0xaa, gmux_data->iostart + 0xcc); +	outb(0x55, gmux_data->iostart + 0xcd); +	outb(0x00, gmux_data->iostart + 0xce); + +	val = inb(gmux_data->iostart + 0xcc) | +		(inb(gmux_data->iostart + 0xcd) << 8); + +	if (val == 0x55aa) +		return true; + +	return false; +} +  static int gmux_get_brightness(struct backlight_device *bd)  {  	struct apple_gmux_data *gmux_data = bl_get_data(bd); @@ -90,16 +258,7 @@ static int gmux_update_status(struct backlight_device *bd)  	if (bd->props.state & BL_CORE_SUSPENDED)  		return 0; -	/* -	 * Older gmux versions require writing out lower bytes first then -	 * setting the upper byte to 0 to flush the values. Newer versions -	 * accept a single u32 write, but the old method also works, so we -	 * just use the old method for all gmux versions. -	 */ -	gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS, brightness); -	gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS + 1, brightness >> 8); -	gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS + 2, brightness >> 16); -	gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS + 3, 0); +	gmux_write32(gmux_data, GMUX_PORT_BRIGHTNESS, brightness);  	return 0;  } @@ -110,6 +269,146 @@ static const struct backlight_ops gmux_bl_ops = {  	.update_status = gmux_update_status,  }; +static int gmux_switchto(enum vga_switcheroo_client_id id) +{ +	if (id == VGA_SWITCHEROO_IGD) { +		gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 1); +		gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 2); +		gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 2); +	} else { +		gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 2); +		gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 3); +		gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 3); +	} + +	return 0; +} + +static int gmux_set_discrete_state(struct apple_gmux_data *gmux_data, +				   enum vga_switcheroo_state state) +{ +	INIT_COMPLETION(gmux_data->powerchange_done); + +	if (state == VGA_SWITCHEROO_ON) { +		gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1); +		gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 3); +		pr_debug("Discrete card powered up\n"); +	} else { +		gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1); +		gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 0); +		pr_debug("Discrete card powered down\n"); +	} + +	gmux_data->power_state = state; + +	if (gmux_data->gpe >= 0 && +	    !wait_for_completion_interruptible_timeout(&gmux_data->powerchange_done, +						       msecs_to_jiffies(200))) +		pr_warn("Timeout waiting for gmux switch to complete\n"); + +	return 0; +} + +static int gmux_set_power_state(enum vga_switcheroo_client_id id, +				enum vga_switcheroo_state state) +{ +	if (id == VGA_SWITCHEROO_IGD) +		return 0; + +	return gmux_set_discrete_state(apple_gmux_data, state); +} + +static int gmux_get_client_id(struct pci_dev *pdev) +{ +	/* +	 * Early Macbook Pros with switchable graphics use nvidia +	 * integrated graphics. Hardcode that the 9400M is integrated. +	 */ +	if (pdev->vendor == PCI_VENDOR_ID_INTEL) +		return VGA_SWITCHEROO_IGD; +	else if (pdev->vendor == PCI_VENDOR_ID_NVIDIA && +		 pdev->device == 0x0863) +		return VGA_SWITCHEROO_IGD; +	else +		return VGA_SWITCHEROO_DIS; +} + +static enum vga_switcheroo_client_id +gmux_active_client(struct apple_gmux_data *gmux_data) +{ +	if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_DISPLAY) == 2) +		return VGA_SWITCHEROO_IGD; + +	return VGA_SWITCHEROO_DIS; +} + +static struct vga_switcheroo_handler gmux_handler = { +	.switchto = gmux_switchto, +	.power_state = gmux_set_power_state, +	.get_client_id = gmux_get_client_id, +}; + +static inline void gmux_disable_interrupts(struct apple_gmux_data *gmux_data) +{ +	gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE, +		    GMUX_INTERRUPT_DISABLE); +} + +static inline void gmux_enable_interrupts(struct apple_gmux_data *gmux_data) +{ +	gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE, +		    GMUX_INTERRUPT_ENABLE); +} + +static inline u8 gmux_interrupt_get_status(struct apple_gmux_data *gmux_data) +{ +	return gmux_read8(gmux_data, GMUX_PORT_INTERRUPT_STATUS); +} + +static void gmux_clear_interrupts(struct apple_gmux_data *gmux_data) +{ +	u8 status; + +	/* to clear interrupts write back current status */ +	status = gmux_interrupt_get_status(gmux_data); +	gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_STATUS, status); +} + +static void gmux_notify_handler(acpi_handle device, u32 value, void *context) +{ +	u8 status; +	struct pnp_dev *pnp = (struct pnp_dev *)context; +	struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); + +	status = gmux_interrupt_get_status(gmux_data); +	gmux_disable_interrupts(gmux_data); +	pr_debug("Notify handler called: status %d\n", status); + +	gmux_clear_interrupts(gmux_data); +	gmux_enable_interrupts(gmux_data); + +	if (status & GMUX_INTERRUPT_STATUS_POWER) +		complete(&gmux_data->powerchange_done); +} + +static int gmux_suspend(struct pnp_dev *pnp, pm_message_t state) +{ +	struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); +	gmux_data->resume_client_id = gmux_active_client(gmux_data); +	gmux_disable_interrupts(gmux_data); +	return 0; +} + +static int gmux_resume(struct pnp_dev *pnp) +{ +	struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); +	gmux_enable_interrupts(gmux_data); +	gmux_switchto(gmux_data->resume_client_id); +	if (gmux_data->power_state == VGA_SWITCHEROO_OFF) +		gmux_set_discrete_state(gmux_data, gmux_data->power_state); +	return 0; +} +  static int __devinit gmux_probe(struct pnp_dev *pnp,  				const struct pnp_device_id *id)  { @@ -119,6 +418,11 @@ static int __devinit gmux_probe(struct pnp_dev *pnp,  	struct backlight_device *bdev;  	u8 ver_major, ver_minor, ver_release;  	int ret = -ENXIO; +	acpi_status status; +	unsigned long long gpe; + +	if (apple_gmux_data) +		return -EBUSY;  	gmux_data = kzalloc(sizeof(*gmux_data), GFP_KERNEL);  	if (!gmux_data) @@ -147,22 +451,29 @@ static int __devinit gmux_probe(struct pnp_dev *pnp,  	}  	/* -	 * On some machines the gmux is in ACPI even thought the machine -	 * doesn't really have a gmux. Check for invalid version information -	 * to detect this. +	 * Invalid version information may indicate either that the gmux +	 * device isn't present or that it's a new one that uses indexed +	 * io  	 */ +  	ver_major = gmux_read8(gmux_data, GMUX_PORT_VERSION_MAJOR);  	ver_minor = gmux_read8(gmux_data, GMUX_PORT_VERSION_MINOR);  	ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE);  	if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) { -		pr_info("gmux device not present\n"); -		ret = -ENODEV; -		goto err_release; +		if (gmux_is_indexed(gmux_data)) { +			mutex_init(&gmux_data->index_lock); +			gmux_data->indexed = true; +		} else { +			pr_info("gmux device not present\n"); +			ret = -ENODEV; +			goto err_release; +		} +		pr_info("Found indexed gmux\n"); +	} else { +		pr_info("Found gmux version %d.%d.%d\n", ver_major, ver_minor, +			ver_release);  	} -	pr_info("Found gmux version %d.%d.%d\n", ver_major, ver_minor, -		ver_release); -  	memset(&props, 0, sizeof(props));  	props.type = BACKLIGHT_PLATFORM;  	props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS); @@ -194,13 +505,67 @@ static int __devinit gmux_probe(struct pnp_dev *pnp,  	 * Disable the other backlight choices.  	 */  	acpi_video_dmi_promote_vendor(); -#ifdef CONFIG_ACPI_VIDEO +#if defined (CONFIG_ACPI_VIDEO) || defined (CONFIG_ACPI_VIDEO_MODULE)  	acpi_video_unregister();  #endif  	apple_bl_unregister(); +	gmux_data->power_state = VGA_SWITCHEROO_ON; + +	gmux_data->dhandle = DEVICE_ACPI_HANDLE(&pnp->dev); +	if (!gmux_data->dhandle) { +		pr_err("Cannot find acpi handle for pnp device %s\n", +		       dev_name(&pnp->dev)); +		ret = -ENODEV; +		goto err_notify; +	} + +	status = acpi_evaluate_integer(gmux_data->dhandle, "GMGP", NULL, &gpe); +	if (ACPI_SUCCESS(status)) { +		gmux_data->gpe = (int)gpe; + +		status = acpi_install_notify_handler(gmux_data->dhandle, +						     ACPI_DEVICE_NOTIFY, +						     &gmux_notify_handler, pnp); +		if (ACPI_FAILURE(status)) { +			pr_err("Install notify handler failed: %s\n", +			       acpi_format_exception(status)); +			ret = -ENODEV; +			goto err_notify; +		} + +		status = acpi_enable_gpe(NULL, gmux_data->gpe); +		if (ACPI_FAILURE(status)) { +			pr_err("Cannot enable gpe: %s\n", +			       acpi_format_exception(status)); +			goto err_enable_gpe; +		} +	} else { +		pr_warn("No GPE found for gmux\n"); +		gmux_data->gpe = -1; +	} + +	if (vga_switcheroo_register_handler(&gmux_handler)) { +		ret = -ENODEV; +		goto err_register_handler; +	} + +	init_completion(&gmux_data->powerchange_done); +	apple_gmux_data = gmux_data; +	gmux_enable_interrupts(gmux_data); +  	return 0; +err_register_handler: +	if (gmux_data->gpe >= 0) +		acpi_disable_gpe(NULL, gmux_data->gpe); +err_enable_gpe: +	if (gmux_data->gpe >= 0) +		acpi_remove_notify_handler(gmux_data->dhandle, +					   ACPI_DEVICE_NOTIFY, +					   &gmux_notify_handler); +err_notify: +	backlight_device_unregister(bdev);  err_release:  	release_region(gmux_data->iostart, gmux_data->iolen);  err_free: @@ -212,12 +577,23 @@ static void __devexit gmux_remove(struct pnp_dev *pnp)  {  	struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); +	vga_switcheroo_unregister_handler(); +	gmux_disable_interrupts(gmux_data); +	if (gmux_data->gpe >= 0) { +		acpi_disable_gpe(NULL, gmux_data->gpe); +		acpi_remove_notify_handler(gmux_data->dhandle, +					   ACPI_DEVICE_NOTIFY, +					   &gmux_notify_handler); +	} +  	backlight_device_unregister(gmux_data->bdev); +  	release_region(gmux_data->iostart, gmux_data->iolen); +	apple_gmux_data = NULL;  	kfree(gmux_data);  	acpi_video_dmi_demote_vendor(); -#ifdef CONFIG_ACPI_VIDEO +#if defined (CONFIG_ACPI_VIDEO) || defined (CONFIG_ACPI_VIDEO_MODULE)  	acpi_video_register();  #endif  	apple_bl_register(); @@ -233,6 +609,8 @@ static struct pnp_driver gmux_pnp_driver = {  	.probe		= gmux_probe,  	.remove		= __devexit_p(gmux_remove),  	.id_table	= gmux_device_ids, +	.suspend	= gmux_suspend, +	.resume		= gmux_resume  };  static int __init apple_gmux_init(void) diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index c7a36f6b058..2eb9fe8e8ef 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -101,6 +101,7 @@ MODULE_LICENSE("GPL");  #define ASUS_WMI_DEVID_WIRELESS_LED	0x00010002  #define ASUS_WMI_DEVID_CWAP		0x00010003  #define ASUS_WMI_DEVID_WLAN		0x00010011 +#define ASUS_WMI_DEVID_WLAN_LED		0x00010012  #define ASUS_WMI_DEVID_BLUETOOTH	0x00010013  #define ASUS_WMI_DEVID_GPS		0x00010015  #define ASUS_WMI_DEVID_WIMAX		0x00010017 @@ -731,8 +732,21 @@ static int asus_rfkill_set(void *data, bool blocked)  {  	struct asus_rfkill *priv = data;  	u32 ctrl_param = !blocked; +	u32 dev_id = priv->dev_id; -	return asus_wmi_set_devstate(priv->dev_id, ctrl_param, NULL); +	/* +	 * If the user bit is set, BIOS can't set and record the wlan status, +	 * it will report the value read from id ASUS_WMI_DEVID_WLAN_LED +	 * while we query the wlan status through WMI(ASUS_WMI_DEVID_WLAN). +	 * So, we have to record wlan status in id ASUS_WMI_DEVID_WLAN_LED +	 * while setting the wlan status through WMI. +	 * This is also the behavior that windows app will do. +	 */ +	if ((dev_id == ASUS_WMI_DEVID_WLAN) && +	     priv->asus->driver->wlan_ctrl_by_user) +		dev_id = ASUS_WMI_DEVID_WLAN_LED; + +	return asus_wmi_set_devstate(dev_id, ctrl_param, NULL);  }  static void asus_rfkill_query(struct rfkill *rfkill, void *data) @@ -1653,6 +1667,7 @@ static int asus_wmi_add(struct platform_device *pdev)  	struct asus_wmi *asus;  	acpi_status status;  	int err; +	u32 result;  	asus = kzalloc(sizeof(struct asus_wmi), GFP_KERNEL);  	if (!asus) @@ -1711,6 +1726,10 @@ static int asus_wmi_add(struct platform_device *pdev)  	if (err)  		goto fail_debugfs; +	asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result); +	if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT)) +		asus->driver->wlan_ctrl_by_user = 1; +  	return 0;  fail_debugfs: diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h index 9c1da8b81be..4c9bd38bb0a 100644 --- a/drivers/platform/x86/asus-wmi.h +++ b/drivers/platform/x86/asus-wmi.h @@ -46,6 +46,7 @@ struct quirk_entry {  struct asus_wmi_driver {  	int			brightness;  	int			panel_power; +	int			wlan_ctrl_by_user;  	const char		*name;  	struct module		*owner; diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c index cd33add118c..c87ff16873f 100644 --- a/drivers/platform/x86/classmate-laptop.c +++ b/drivers/platform/x86/classmate-laptop.c @@ -725,8 +725,10 @@ static void cmpc_tablet_handler(struct acpi_device *dev, u32 event)  	struct input_dev *inputdev = dev_get_drvdata(&dev->dev);  	if (event == 0x81) { -		if (ACPI_SUCCESS(cmpc_get_tablet(dev->handle, &val))) +		if (ACPI_SUCCESS(cmpc_get_tablet(dev->handle, &val))) {  			input_report_switch(inputdev, SW_TABLET_MODE, !val); +			input_sync(inputdev); +		}  	}  } @@ -739,8 +741,10 @@ static void cmpc_tablet_idev_init(struct input_dev *inputdev)  	set_bit(SW_TABLET_MODE, inputdev->swbit);  	acpi = to_acpi_device(inputdev->dev.parent); -	if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val))) +	if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val))) {  		input_report_switch(inputdev, SW_TABLET_MODE, !val); +		input_sync(inputdev); +	}  }  static int cmpc_tablet_add(struct acpi_device *acpi) @@ -760,8 +764,10 @@ static int cmpc_tablet_resume(struct device *dev)  	struct input_dev *inputdev = dev_get_drvdata(dev);  	unsigned long long val = 0; -	if (ACPI_SUCCESS(cmpc_get_tablet(to_acpi_device(dev)->handle, &val))) +	if (ACPI_SUCCESS(cmpc_get_tablet(to_acpi_device(dev)->handle, &val))) {  		input_report_switch(inputdev, SW_TABLET_MODE, !val); +		input_sync(inputdev); +	}  	return 0;  }  #endif diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 4e96e8c0b60..927c33af67e 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -211,7 +211,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = {  		.ident = "Dell Inspiron 5420",  		.matches = {  			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), -			DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 5420"), +			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5420"),  		},  		.driver_data = &quirk_dell_vostro_v130,  	}, @@ -220,7 +220,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = {  		.ident = "Dell Inspiron 5520",  		.matches = {  			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), -			DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 5520"), +			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5520"),  		},  		.driver_data = &quirk_dell_vostro_v130,  	}, @@ -229,7 +229,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = {  		.ident = "Dell Inspiron 5720",  		.matches = {  			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), -			DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 5720"), +			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5720"),  		},  		.driver_data = &quirk_dell_vostro_v130,  	}, @@ -238,7 +238,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = {  		.ident = "Dell Inspiron 7420",  		.matches = {  			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), -			DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 7420"), +			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7420"),  		},  		.driver_data = &quirk_dell_vostro_v130,  	}, @@ -247,7 +247,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = {  		.ident = "Dell Inspiron 7520",  		.matches = {  			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), -			DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 7520"), +			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),  		},  		.driver_data = &quirk_dell_vostro_v130,  	}, @@ -256,7 +256,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = {  		.ident = "Dell Inspiron 7720",  		.matches = {  			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), -			DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 7720"), +			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7720"),  		},  		.driver_data = &quirk_dell_vostro_v130,  	}, diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 17f6dfd8dbf..dae7abe1d71 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -36,6 +36,7 @@  #include <linux/fb.h>  #include <linux/debugfs.h>  #include <linux/seq_file.h> +#include <linux/i8042.h>  #define IDEAPAD_RFKILL_DEV_NUM	(3) @@ -63,8 +64,11 @@ enum {  	VPCCMD_R_3G,  	VPCCMD_W_3G,  	VPCCMD_R_ODD, /* 0x21 */ -	VPCCMD_R_RF = 0x23, +	VPCCMD_W_FAN, +	VPCCMD_R_RF,  	VPCCMD_W_RF, +	VPCCMD_R_FAN = 0x2B, +	VPCCMD_R_SPECIAL_BUTTONS = 0x31,  	VPCCMD_W_BL_POWER = 0x33,  }; @@ -356,14 +360,46 @@ static ssize_t store_ideapad_cam(struct device *dev,  		return -EINVAL;  	ret = write_ec_cmd(ideapad_handle, VPCCMD_W_CAMERA, state);  	if (ret < 0) -		return ret; +		return -EIO;  	return count;  }  static DEVICE_ATTR(camera_power, 0644, show_ideapad_cam, store_ideapad_cam); +static ssize_t show_ideapad_fan(struct device *dev, +				struct device_attribute *attr, +				char *buf) +{ +	unsigned long result; + +	if (read_ec_data(ideapad_handle, VPCCMD_R_FAN, &result)) +		return sprintf(buf, "-1\n"); +	return sprintf(buf, "%lu\n", result); +} + +static ssize_t store_ideapad_fan(struct device *dev, +				 struct device_attribute *attr, +				 const char *buf, size_t count) +{ +	int ret, state; + +	if (!count) +		return 0; +	if (sscanf(buf, "%i", &state) != 1) +		return -EINVAL; +	if (state < 0 || state > 4 || state == 3) +		return -EINVAL; +	ret = write_ec_cmd(ideapad_handle, VPCCMD_W_FAN, state); +	if (ret < 0) +		return -EIO; +	return count; +} + +static DEVICE_ATTR(fan_mode, 0644, show_ideapad_fan, store_ideapad_fan); +  static struct attribute *ideapad_attributes[] = {  	&dev_attr_camera_power.attr, +	&dev_attr_fan_mode.attr,  	NULL  }; @@ -377,7 +413,10 @@ static umode_t ideapad_is_visible(struct kobject *kobj,  	if (attr == &dev_attr_camera_power.attr)  		supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg)); -	else +	else if (attr == &dev_attr_fan_mode.attr) { +		unsigned long value; +		supported = !read_ec_data(ideapad_handle, VPCCMD_R_FAN, &value); +	} else  		supported = true;  	return supported ? attr->mode : 0; @@ -518,9 +557,15 @@ static void ideapad_platform_exit(struct ideapad_private *priv)   */  static const struct key_entry ideapad_keymap[] = {  	{ KE_KEY, 6,  { KEY_SWITCHVIDEOMODE } }, +	{ KE_KEY, 7,  { KEY_CAMERA } }, +	{ KE_KEY, 11, { KEY_F16 } },  	{ KE_KEY, 13, { KEY_WLAN } },  	{ KE_KEY, 16, { KEY_PROG1 } },  	{ KE_KEY, 17, { KEY_PROG2 } }, +	{ KE_KEY, 64, { KEY_PROG3 } }, +	{ KE_KEY, 65, { KEY_PROG4 } }, +	{ KE_KEY, 66, { KEY_TOUCHPAD_OFF } }, +	{ KE_KEY, 67, { KEY_TOUCHPAD_ON } },  	{ KE_END, 0 },  }; @@ -587,6 +632,28 @@ static void ideapad_input_novokey(struct ideapad_private *priv)  		ideapad_input_report(priv, 16);  } +static void ideapad_check_special_buttons(struct ideapad_private *priv) +{ +	unsigned long bit, value; + +	read_ec_data(ideapad_handle, VPCCMD_R_SPECIAL_BUTTONS, &value); + +	for (bit = 0; bit < 16; bit++) { +		if (test_bit(bit, &value)) { +			switch (bit) { +			case 6: +				/* Thermal Management button */ +				ideapad_input_report(priv, 65); +				break; +			case 1: +				/* OneKey Theater button */ +				ideapad_input_report(priv, 64); +				break; +			} +		} +	} +} +  /*   * backlight   */ @@ -691,6 +758,24 @@ static const struct acpi_device_id ideapad_device_ids[] = {  };  MODULE_DEVICE_TABLE(acpi, ideapad_device_ids); +static void ideapad_sync_touchpad_state(struct acpi_device *adevice) +{ +	struct ideapad_private *priv = dev_get_drvdata(&adevice->dev); +	unsigned long value; + +	/* Without reading from EC touchpad LED doesn't switch state */ +	if (!read_ec_data(adevice->handle, VPCCMD_R_TOUCHPAD, &value)) { +		/* Some IdeaPads don't really turn off touchpad - they only +		 * switch the LED state. We (de)activate KBC AUX port to turn +		 * touchpad off and on. We send KEY_TOUCHPAD_OFF and +		 * KEY_TOUCHPAD_ON to not to get out of sync with LED */ +		unsigned char param; +		i8042_command(¶m, value ? I8042_CMD_AUX_ENABLE : +			      I8042_CMD_AUX_DISABLE); +		ideapad_input_report(priv, value ? 67 : 66); +	} +} +  static int __devinit ideapad_acpi_add(struct acpi_device *adevice)  {  	int ret, i; @@ -727,6 +812,7 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice)  			priv->rfk[i] = NULL;  	}  	ideapad_sync_rfk_state(priv); +	ideapad_sync_touchpad_state(adevice);  	if (!acpi_video_backlight_support()) {  		ret = ideapad_backlight_init(priv); @@ -785,9 +871,14 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)  				ideapad_sync_rfk_state(priv);  				break;  			case 13: +			case 11: +			case 7:  			case 6:  				ideapad_input_report(priv, vpc_bit);  				break; +			case 5: +				ideapad_sync_touchpad_state(adevice); +				break;  			case 4:  				ideapad_backlight_notify_brightness(priv);  				break; @@ -797,6 +888,9 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)  			case 2:  				ideapad_backlight_notify_power(priv);  				break; +			case 0: +				ideapad_check_special_buttons(priv); +				break;  			default:  				pr_info("Unknown event: %lu\n", vpc_bit);  			} @@ -804,6 +898,15 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)  	}  } +static int ideapad_acpi_resume(struct device *device) +{ +	ideapad_sync_rfk_state(ideapad_priv); +	ideapad_sync_touchpad_state(to_acpi_device(device)); +	return 0; +} + +static SIMPLE_DEV_PM_OPS(ideapad_pm, NULL, ideapad_acpi_resume); +  static struct acpi_driver ideapad_acpi_driver = {  	.name = "ideapad_acpi",  	.class = "IdeaPad", @@ -811,6 +914,7 @@ static struct acpi_driver ideapad_acpi_driver = {  	.ops.add = ideapad_acpi_add,  	.ops.remove = ideapad_acpi_remove,  	.ops.notify = ideapad_acpi_notify, +	.drv.pm = &ideapad_pm,  	.owner = THIS_MODULE,  }; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index f28f36ccdcf..80e37794931 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -8664,6 +8664,13 @@ static int __must_check __init get_thinkpad_model_data(  		tp->model_str = kstrdup(s, GFP_KERNEL);  		if (!tp->model_str)  			return -ENOMEM; +	} else { +		s = dmi_get_system_info(DMI_BIOS_VENDOR); +		if (s && !(strnicmp(s, "Lenovo", 6))) { +			tp->model_str = kstrdup(s, GFP_KERNEL); +			if (!tp->model_str) +				return -ENOMEM; +		}  	}  	s = dmi_get_system_info(DMI_PRODUCT_NAME); diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index 8fc3808d7a3..90c5c7357a5 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -1,12 +1,31 @@  menuconfig PWM -	bool "PWM Support" +	bool "Pulse-Width Modulation (PWM) Support"  	depends on !MACH_JZ4740 && !PUV3_PWM  	help -	  This enables PWM support through the generic PWM framework. -	  You only need to enable this, if you also want to enable -	  one or more of the PWM drivers below. +	  Generic Pulse-Width Modulation (PWM) support. -	  If unsure, say N. +	  In Pulse-Width Modulation, a variation of the width of pulses +	  in a rectangular pulse signal is used as a means to alter the +	  average power of the signal. Applications include efficient +	  power delivery and voltage regulation. In computer systems, +	  PWMs are commonly used to control fans or the brightness of +	  display backlights. + +	  This framework provides a generic interface to PWM devices +	  within the Linux kernel. On the driver side it provides an API +	  to register and unregister a PWM chip, an abstraction of a PWM +	  controller, that supports one or more PWM devices. Client +	  drivers can request PWM devices and use the generic framework +	  to configure as well as enable and disable them. + +	  This generic framework replaces the legacy PWM framework which +	  allows only a single driver implementing the required API. Not +	  all legacy implementations have been ported to the framework +	  yet. The framework provides an API that is backward compatible +	  with the legacy framework so that existing client drivers +	  continue to work as expected. + +	  If unsure, say no.  if PWM diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index ecb76909e94..c6e05078d3a 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -129,8 +129,8 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label)  	return 0;  } -static struct pwm_device *of_pwm_simple_xlate(struct pwm_chip *pc, -					      const struct of_phandle_args *args) +static struct pwm_device * +of_pwm_simple_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)  {  	struct pwm_device *pwm; @@ -149,7 +149,7 @@ static struct pwm_device *of_pwm_simple_xlate(struct pwm_chip *pc,  	return pwm;  } -void of_pwmchip_add(struct pwm_chip *chip) +static void of_pwmchip_add(struct pwm_chip *chip)  {  	if (!chip->dev || !chip->dev->of_node)  		return; @@ -162,7 +162,7 @@ void of_pwmchip_add(struct pwm_chip *chip)  	of_node_get(chip->dev->of_node);  } -void of_pwmchip_remove(struct pwm_chip *chip) +static void of_pwmchip_remove(struct pwm_chip *chip)  {  	if (chip->dev && chip->dev->of_node)  		of_node_put(chip->dev->of_node); @@ -527,7 +527,7 @@ void __init pwm_add_table(struct pwm_lookup *table, size_t num)  struct pwm_device *pwm_get(struct device *dev, const char *con_id)  {  	struct pwm_device *pwm = ERR_PTR(-EPROBE_DEFER); -	const char *dev_id = dev ? dev_name(dev): NULL; +	const char *dev_id = dev ? dev_name(dev) : NULL;  	struct pwm_chip *chip = NULL;  	unsigned int index = 0;  	unsigned int best = 0; @@ -609,7 +609,7 @@ void pwm_put(struct pwm_device *pwm)  	mutex_lock(&pwm_lock);  	if (!test_and_clear_bit(PWMF_REQUESTED, &pwm->flags)) { -		pr_warning("PWM device already freed\n"); +		pr_warn("PWM device already freed\n");  		goto out;  	} diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c index d10386528c9..e5187c0ade9 100644 --- a/drivers/pwm/pwm-samsung.c +++ b/drivers/pwm/pwm-samsung.c @@ -225,6 +225,7 @@ static int s3c_pwm_probe(struct platform_device *pdev)  	/* calculate base of control bits in TCON */  	s3c->tcon_base = id == 0 ? 0 : (id * 4) + 4; +	s3c->chip.dev = &pdev->dev;  	s3c->chip.ops = &s3c_pwm_ops;  	s3c->chip.base = -1;  	s3c->chip.npwm = 1; diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c index 02ce18d5e49..057465e0553 100644 --- a/drivers/pwm/pwm-tegra.c +++ b/drivers/pwm/pwm-tegra.c @@ -187,10 +187,8 @@ static int tegra_pwm_probe(struct platform_device *pdev)  	}  	pwm->mmio_base = devm_request_and_ioremap(&pdev->dev, r); -	if (!pwm->mmio_base) { -		dev_err(&pdev->dev, "failed to ioremap() region\n"); +	if (!pwm->mmio_base)  		return -EADDRNOTAVAIL; -	}  	platform_set_drvdata(pdev, pwm); diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c index 3c2ad284ee3..0b66d0f2592 100644 --- a/drivers/pwm/pwm-tiecap.c +++ b/drivers/pwm/pwm-tiecap.c @@ -192,10 +192,8 @@ static int __devinit ecap_pwm_probe(struct platform_device *pdev)  	}  	pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r); -	if (!pc->mmio_base) { -		dev_err(&pdev->dev, "failed to ioremap() registers\n"); +	if (!pc->mmio_base)  		return -EADDRNOTAVAIL; -	}  	ret = pwmchip_add(&pc->chip);  	if (ret < 0) { diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c index 010d232cb0c..c3756d1be19 100644 --- a/drivers/pwm/pwm-tiehrpwm.c +++ b/drivers/pwm/pwm-tiehrpwm.c @@ -371,10 +371,8 @@ static int __devinit ehrpwm_pwm_probe(struct platform_device *pdev)  	}  	pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r); -	if (!pc->mmio_base) { -		dev_err(&pdev->dev, "failed to ioremap() registers\n"); +	if (!pc->mmio_base)  		return  -EADDRNOTAVAIL; -	}  	ret = pwmchip_add(&pc->chip);  	if (ret < 0) { diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c index 548021439f0..ad14389b714 100644 --- a/drivers/pwm/pwm-vt8500.c +++ b/drivers/pwm/pwm-vt8500.c @@ -41,7 +41,7 @@ static inline void pwm_busy_wait(void __iomem *reg, u8 bitmask)  		cpu_relax();  	if (unlikely(!loops)) -		pr_warning("Waiting for status bits 0x%x to clear timed out\n", +		pr_warn("Waiting for status bits 0x%x to clear timed out\n",  			   bitmask);  } diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c index 722246cf20a..5d44252b734 100644 --- a/drivers/rapidio/devices/tsi721.c +++ b/drivers/rapidio/devices/tsi721.c @@ -435,6 +435,9 @@ static void tsi721_db_dpc(struct work_struct *work)  				" info %4.4x\n", DBELL_SID(idb.bytes),  				DBELL_TID(idb.bytes), DBELL_INF(idb.bytes));  		} + +		wr_ptr = ioread32(priv->regs + +				  TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;  	}  	iowrite32(rd_ptr & (IDB_QSIZE - 1), @@ -445,6 +448,10 @@ static void tsi721_db_dpc(struct work_struct *work)  	regval |= TSI721_SR_CHINT_IDBQRCV;  	iowrite32(regval,  		priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); + +	wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; +	if (wr_ptr != rd_ptr) +		schedule_work(&priv->idb_work);  }  /** @@ -2212,7 +2219,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,  				  const struct pci_device_id *id)  {  	struct tsi721_device *priv; -	int i, cap; +	int cap;  	int err;  	u32 regval; @@ -2232,12 +2239,15 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,  	priv->pdev = pdev;  #ifdef DEBUG +	{ +	int i;  	for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {  		dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n",  			i, (unsigned long long)pci_resource_start(pdev, i),  			(unsigned long)pci_resource_len(pdev, i),  			pci_resource_flags(pdev, i));  	} +	}  #endif  	/*  	 * Verify BAR configuration diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c index 182b553059c..c151fd5d8c9 100644 --- a/drivers/regulator/ab3100.c +++ b/drivers/regulator/ab3100.c @@ -486,6 +486,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {  		.id   = AB3100_BUCK,  		.ops  = ®ulator_ops_variable_sleepable,  		.n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages), +		.volt_table = ldo_e_buck_typ_voltages,  		.type = REGULATOR_VOLTAGE,  		.owner = THIS_MODULE,  		.enable_time = 1000, diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c index e9c2085f9df..ce0fe72a428 100644 --- a/drivers/regulator/anatop-regulator.c +++ b/drivers/regulator/anatop-regulator.c @@ -64,14 +64,15 @@ static int anatop_set_voltage_sel(struct regulator_dev *reg, unsigned selector)  static int anatop_get_voltage_sel(struct regulator_dev *reg)  {  	struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg); -	u32 val; +	u32 val, mask;  	if (!anatop_reg->control_reg)  		return -ENOTSUPP;  	val = anatop_read_reg(anatop_reg->mfd, anatop_reg->control_reg); -	val = (val & ((1 << anatop_reg->vol_bit_width) - 1)) >> +	mask = ((1 << anatop_reg->vol_bit_width) - 1) <<  		anatop_reg->vol_bit_shift; +	val = (val & mask) >> anatop_reg->vol_bit_shift;  	return val - anatop_reg->min_bit_val;  } diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index f092588a078..48385318175 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -3217,7 +3217,7 @@ regulator_register(const struct regulator_desc *regulator_desc,  	dev_set_drvdata(&rdev->dev, rdev); -	if (config->ena_gpio) { +	if (config->ena_gpio && gpio_is_valid(config->ena_gpio)) {  		ret = gpio_request_one(config->ena_gpio,  				       GPIOF_DIR_OUT | config->ena_gpio_flags,  				       rdev_get_name(rdev)); diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c index 34b67bee932..8b5944f2d7d 100644 --- a/drivers/regulator/gpio-regulator.c +++ b/drivers/regulator/gpio-regulator.c @@ -57,16 +57,17 @@ static int gpio_regulator_get_value(struct regulator_dev *dev)  	return -EINVAL;  } -static int gpio_regulator_set_value(struct regulator_dev *dev, -					int min, int max, unsigned *selector) +static int gpio_regulator_set_voltage(struct regulator_dev *dev, +					int min_uV, int max_uV, +					unsigned *selector)  {  	struct gpio_regulator_data *data = rdev_get_drvdata(dev);  	int ptr, target = 0, state, best_val = INT_MAX;  	for (ptr = 0; ptr < data->nr_states; ptr++)  		if (data->states[ptr].value < best_val && -		    data->states[ptr].value >= min && -		    data->states[ptr].value <= max) { +		    data->states[ptr].value >= min_uV && +		    data->states[ptr].value <= max_uV) {  			target = data->states[ptr].gpios;  			best_val = data->states[ptr].value;  			if (selector) @@ -85,13 +86,6 @@ static int gpio_regulator_set_value(struct regulator_dev *dev,  	return 0;  } -static int gpio_regulator_set_voltage(struct regulator_dev *dev, -					int min_uV, int max_uV, -					unsigned *selector) -{ -	return gpio_regulator_set_value(dev, min_uV, max_uV, selector); -} -  static int gpio_regulator_list_voltage(struct regulator_dev *dev,  				      unsigned selector)  { @@ -106,7 +100,27 @@ static int gpio_regulator_list_voltage(struct regulator_dev *dev,  static int gpio_regulator_set_current_limit(struct regulator_dev *dev,  					int min_uA, int max_uA)  { -	return gpio_regulator_set_value(dev, min_uA, max_uA, NULL); +	struct gpio_regulator_data *data = rdev_get_drvdata(dev); +	int ptr, target = 0, state, best_val = 0; + +	for (ptr = 0; ptr < data->nr_states; ptr++) +		if (data->states[ptr].value > best_val && +		    data->states[ptr].value >= min_uA && +		    data->states[ptr].value <= max_uA) { +			target = data->states[ptr].gpios; +			best_val = data->states[ptr].value; +		} + +	if (best_val == 0) +		return -EINVAL; + +	for (ptr = 0; ptr < data->nr_gpios; ptr++) { +		state = (target & (1 << ptr)) >> ptr; +		gpio_set_value(data->gpios[ptr].gpio, state); +	} +	data->state = target; + +	return 0;  }  static struct regulator_ops gpio_regulator_voltage_ops = { diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c index 17d19fbbc49..46c7e88f838 100644 --- a/drivers/regulator/palmas-regulator.c +++ b/drivers/regulator/palmas-regulator.c @@ -486,9 +486,12 @@ static int palmas_map_voltage_ldo(struct regulator_dev *rdev,  {  	int ret, voltage; -	ret = ((min_uV - 900000) / 50000) + 1; -	if (ret < 0) -		return ret; +	if (min_uV == 0) +		return 0; + +	if (min_uV < 900000) +		min_uV = 900000; +	ret = DIV_ROUND_UP(min_uV - 900000, 50000) + 1;  	/* Map back into a voltage to verify we're still in bounds */  	voltage = palmas_list_voltage_ldo(rdev, ret); @@ -586,7 +589,7 @@ static int palmas_ldo_init(struct palmas *palmas, int id,  	addr = palmas_regs_info[id].ctrl_addr; -	ret = palmas_smps_read(palmas, addr, ®); +	ret = palmas_ldo_read(palmas, addr, ®);  	if (ret)  		return ret; @@ -596,7 +599,7 @@ static int palmas_ldo_init(struct palmas *palmas, int id,  	if (reg_init->mode_sleep)  		reg |= PALMAS_LDO1_CTRL_MODE_SLEEP; -	ret = palmas_smps_write(palmas, addr, reg); +	ret = palmas_ldo_write(palmas, addr, reg);  	if (ret)  		return ret; @@ -630,7 +633,7 @@ static __devinit int palmas_probe(struct platform_device *pdev)  	ret = palmas_smps_read(palmas, PALMAS_SMPS_CTRL, ®);  	if (ret) -		goto err_unregister_regulator; +		return ret;  	if (reg & PALMAS_SMPS_CTRL_SMPS12_SMPS123_EN)  		pmic->smps123 = 1; @@ -676,7 +679,9 @@ static __devinit int palmas_probe(struct platform_device *pdev)  		case PALMAS_REG_SMPS10:  			pmic->desc[id].n_voltages = PALMAS_SMPS10_NUM_VOLTAGES;  			pmic->desc[id].ops = &palmas_ops_smps10; -			pmic->desc[id].vsel_reg = PALMAS_SMPS10_CTRL; +			pmic->desc[id].vsel_reg = +					PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, +							PALMAS_SMPS10_CTRL);  			pmic->desc[id].vsel_mask = SMPS10_VSEL;  			pmic->desc[id].enable_reg =  					PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, @@ -778,8 +783,10 @@ static __devinit int palmas_probe(struct platform_device *pdev)  			reg_init = pdata->reg_init[id];  			if (reg_init) {  				ret = palmas_ldo_init(palmas, id, reg_init); -				if (ret) +				if (ret) { +					regulator_unregister(pmic->rdev[id]);  					goto err_unregister_regulator; +				}  			}  		}  	} diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c index e6da90ab515..19241fc3005 100644 --- a/drivers/regulator/tps6586x-regulator.c +++ b/drivers/regulator/tps6586x-regulator.c @@ -240,14 +240,16 @@ static struct tps6586x_regulator tps6586x_regulator[] = {  	TPS6586X_LDO(LDO_9, "vinldo9", ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7),  	TPS6586X_LDO(LDO_RTC, NULL, ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7),  	TPS6586X_LDO(LDO_1, "vinldo01", dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1), -	TPS6586X_LDO(SM_2, "sm2", sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7), +	TPS6586X_LDO(SM_2, "vin-sm2", sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7),  	TPS6586X_DVM(LDO_2, "vinldo23", dvm, LDO2BV1, 0, 5, ENA, 3,  					ENB, 3, VCC2, 6),  	TPS6586X_DVM(LDO_4, "vinldo4", ldo4, LDO4V1, 0, 5, ENC, 3,  					END, 3, VCC1, 6), -	TPS6586X_DVM(SM_0, "sm0", dvm, SM0V1, 0, 5, ENA, 1, ENB, 1, VCC1, 2), -	TPS6586X_DVM(SM_1, "sm1", dvm, SM1V1, 0, 5, ENA, 0, ENB, 0, VCC1, 0), +	TPS6586X_DVM(SM_0, "vin-sm0", dvm, SM0V1, 0, 5, ENA, 1, +					ENB, 1, VCC1, 2), +	TPS6586X_DVM(SM_1, "vin-sm1", dvm, SM1V1, 0, 5, ENA, 0, +					ENB, 0, VCC1, 0),  };  /* diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index 242fe90dc56..77a71a5c17c 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c @@ -1037,7 +1037,7 @@ TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300);  TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300);  TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300);  TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300); -TWL4030_FIXED_LDO(VINTANA2, 0x3f, 1500, 11, 100, 0x08); +TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08);  TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08);  TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08);  TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08); @@ -1048,7 +1048,6 @@ TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0);  TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 0);  TWL6030_FIXED_LDO(V1V8, 0x16, 1800, 0);  TWL6030_FIXED_LDO(V2V1, 0x1c, 2100, 0); -TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 0);  TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34);  TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10);  TWL6025_ADJUSTABLE_SMPS(VIO, 0x16); @@ -1117,7 +1116,7 @@ static const struct of_device_id twl_of_match[] __devinitconst = {  	TWL6025_OF_MATCH("ti,twl6025-ldo6", LDO6),  	TWL6025_OF_MATCH("ti,twl6025-ldoln", LDOLN),  	TWL6025_OF_MATCH("ti,twl6025-ldousb", LDOUSB), -	TWLFIXED_OF_MATCH("ti,twl4030-vintana2", VINTANA2), +	TWLFIXED_OF_MATCH("ti,twl4030-vintana1", VINTANA1),  	TWLFIXED_OF_MATCH("ti,twl4030-vintdig", VINTDIG),  	TWLFIXED_OF_MATCH("ti,twl4030-vusb1v5", VUSB1V5),  	TWLFIXED_OF_MATCH("ti,twl4030-vusb1v8", VUSB1V8), diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c index 836118795c0..13e4df63974 100644 --- a/drivers/rtc/rtc-pcf2123.c +++ b/drivers/rtc/rtc-pcf2123.c @@ -43,6 +43,7 @@  #include <linux/rtc.h>  #include <linux/spi/spi.h>  #include <linux/module.h> +#include <linux/sysfs.h>  #define DRV_VERSION "0.6" @@ -292,6 +293,7 @@ static int __devinit pcf2123_probe(struct spi_device *spi)  	pdata->rtc = rtc;  	for (i = 0; i < 16; i++) { +		sysfs_attr_init(&pdata->regs[i].attr.attr);  		sprintf(pdata->regs[i].name, "%1x", i);  		pdata->regs[i].attr.attr.mode = S_IRUGO | S_IWUSR;  		pdata->regs[i].attr.attr.name = pdata->regs[i].name; diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c index 77074ccd285..fd5c7af04ae 100644 --- a/drivers/rtc/rtc-rs5c348.c +++ b/drivers/rtc/rtc-rs5c348.c @@ -122,9 +122,12 @@ rs5c348_rtc_read_time(struct device *dev, struct rtc_time *tm)  	tm->tm_min = bcd2bin(rxbuf[RS5C348_REG_MINS] & RS5C348_MINS_MASK);  	tm->tm_hour = bcd2bin(rxbuf[RS5C348_REG_HOURS] & RS5C348_HOURS_MASK);  	if (!pdata->rtc_24h) { -		tm->tm_hour %= 12; -		if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) +		if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) { +			tm->tm_hour -= 20; +			tm->tm_hour %= 12;  			tm->tm_hour += 12; +		} else +			tm->tm_hour %= 12;  	}  	tm->tm_wday = bcd2bin(rxbuf[RS5C348_REG_WDAY] & RS5C348_WDAY_MASK);  	tm->tm_mday = bcd2bin(rxbuf[RS5C348_REG_DAY] & RS5C348_DAY_MASK); diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index 6e25ef1bce9..a9f4049c676 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c @@ -47,6 +47,8 @@ struct bcm63xx_spi {  	/* Platform data */  	u32			speed_hz;  	unsigned		fifo_size; +	unsigned int		msg_type_shift; +	unsigned int		msg_ctl_width;  	/* Data buffers */  	const unsigned char	*tx_ptr; @@ -221,13 +223,20 @@ static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi,  	msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT);  	if (t->rx_buf && t->tx_buf) -		msg_ctl |= (SPI_FD_RW << SPI_MSG_TYPE_SHIFT); +		msg_ctl |= (SPI_FD_RW << bs->msg_type_shift);  	else if (t->rx_buf) -		msg_ctl |= (SPI_HD_R << SPI_MSG_TYPE_SHIFT); +		msg_ctl |= (SPI_HD_R << bs->msg_type_shift);  	else if (t->tx_buf) -		msg_ctl |= (SPI_HD_W << SPI_MSG_TYPE_SHIFT); +		msg_ctl |= (SPI_HD_W << bs->msg_type_shift); -	bcm_spi_writew(bs, msg_ctl, SPI_MSG_CTL); +	switch (bs->msg_ctl_width) { +	case 8: +		bcm_spi_writeb(bs, msg_ctl, SPI_MSG_CTL); +		break; +	case 16: +		bcm_spi_writew(bs, msg_ctl, SPI_MSG_CTL); +		break; +	}  	/* Issue the transfer */  	cmd = SPI_CMD_START_IMMEDIATE; @@ -406,9 +415,21 @@ static int __devinit bcm63xx_spi_probe(struct platform_device *pdev)  	master->transfer_one_message = bcm63xx_spi_transfer_one;  	master->mode_bits = MODEBITS;  	bs->speed_hz = pdata->speed_hz; +	bs->msg_type_shift = pdata->msg_type_shift; +	bs->msg_ctl_width = pdata->msg_ctl_width;  	bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA));  	bs->rx_io = (const u8 *)(bs->regs + bcm63xx_spireg(SPI_RX_DATA)); +	switch (bs->msg_ctl_width) { +	case 8: +	case 16: +		break; +	default: +		dev_err(dev, "unsupported MSG_CTL width: %d\n", +			 bs->msg_ctl_width); +		goto out_clk_disable; +	} +  	/* Initialize hardware */  	clk_enable(bs->clk);  	bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS); @@ -438,7 +459,7 @@ out:  static int __devexit bcm63xx_spi_remove(struct platform_device *pdev)  { -	struct spi_master *master = platform_get_drvdata(pdev); +	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));  	struct bcm63xx_spi *bs = spi_master_get_devdata(master);  	spi_unregister_master(master); @@ -452,6 +473,8 @@ static int __devexit bcm63xx_spi_remove(struct platform_device *pdev)  	platform_set_drvdata(pdev, 0); +	spi_master_put(master); +  	return 0;  } diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c index b2d4b9e4e01..764bfee7592 100644 --- a/drivers/spi/spi-coldfire-qspi.c +++ b/drivers/spi/spi-coldfire-qspi.c @@ -533,7 +533,6 @@ static int __devexit mcfqspi_remove(struct platform_device *pdev)  	iounmap(mcfqspi->iobase);  	release_mem_region(res->start, resource_size(res));  	spi_unregister_master(master); -	spi_master_put(master);  	return 0;  } @@ -541,7 +540,7 @@ static int __devexit mcfqspi_remove(struct platform_device *pdev)  #ifdef CONFIG_PM_SLEEP  static int mcfqspi_suspend(struct device *dev)  { -	struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); +	struct spi_master *master = dev_get_drvdata(dev);  	struct mcfqspi *mcfqspi = spi_master_get_devdata(master);  	spi_master_suspend(master); @@ -553,7 +552,7 @@ static int mcfqspi_suspend(struct device *dev)  static int mcfqspi_resume(struct device *dev)  { -	struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); +	struct spi_master *master = dev_get_drvdata(dev);  	struct mcfqspi *mcfqspi = spi_master_get_devdata(master);  	spi_master_resume(master); diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index bc4778175e3..b2fb141da37 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -1228,18 +1228,16 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)  	status = spi_register_master(master);  	if (status < 0) -		goto err_spi_register; +		goto disable_pm;  	return status; -err_spi_register: -	spi_master_put(master);  disable_pm:  	pm_runtime_disable(&pdev->dev);  dma_chnl_free:  	kfree(mcspi->dma_channels);  free_master: -	kfree(master); +	spi_master_put(master);  	platform_set_drvdata(pdev, NULL);  	return status;  } diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index aab518ec2bb..6abbe23c39b 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -2053,7 +2053,6 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)  	printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",  	       adev->res.start, pl022->virtbase); -	pm_runtime_enable(dev);  	pm_runtime_resume(dev);  	pl022->clk = clk_get(&adev->dev, NULL); diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index cfa2c35dfee..d1c8441f638 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -1479,40 +1479,40 @@ static const struct dev_pm_ops s3c64xx_spi_pm = {  			   s3c64xx_spi_runtime_resume, NULL)  }; -struct s3c64xx_spi_port_config s3c2443_spi_port_config = { +static struct s3c64xx_spi_port_config s3c2443_spi_port_config = {  	.fifo_lvl_mask	= { 0x7f },  	.rx_lvl_offset	= 13,  	.tx_st_done	= 21,  	.high_speed	= true,  }; -struct s3c64xx_spi_port_config s3c6410_spi_port_config = { +static struct s3c64xx_spi_port_config s3c6410_spi_port_config = {  	.fifo_lvl_mask	= { 0x7f, 0x7F },  	.rx_lvl_offset	= 13,  	.tx_st_done	= 21,  }; -struct s3c64xx_spi_port_config s5p64x0_spi_port_config = { +static struct s3c64xx_spi_port_config s5p64x0_spi_port_config = {  	.fifo_lvl_mask	= { 0x1ff, 0x7F },  	.rx_lvl_offset	= 15,  	.tx_st_done	= 25,  }; -struct s3c64xx_spi_port_config s5pc100_spi_port_config = { +static struct s3c64xx_spi_port_config s5pc100_spi_port_config = {  	.fifo_lvl_mask	= { 0x7f, 0x7F },  	.rx_lvl_offset	= 13,  	.tx_st_done	= 21,  	.high_speed	= true,  }; -struct s3c64xx_spi_port_config s5pv210_spi_port_config = { +static struct s3c64xx_spi_port_config s5pv210_spi_port_config = {  	.fifo_lvl_mask	= { 0x1ff, 0x7F },  	.rx_lvl_offset	= 15,  	.tx_st_done	= 25,  	.high_speed	= true,  }; -struct s3c64xx_spi_port_config exynos4_spi_port_config = { +static struct s3c64xx_spi_port_config exynos4_spi_port_config = {  	.fifo_lvl_mask	= { 0x1ff, 0x7F, 0x7F },  	.rx_lvl_offset	= 15,  	.tx_st_done	= 25, diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index c0fdb00783e..2359151af7e 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c @@ -168,7 +168,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)  			dev->board_ptr = comedi_recognize(driv, it->board_name);  			if (dev->board_ptr)  				break; -		} else if (strcmp(driv->driver_name, it->board_name)) +		} else if (strcmp(driv->driver_name, it->board_name) == 0)  			break;  		module_put(driv->module);  	} diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c index 31986608eaf..6b4d0d68e63 100644 --- a/drivers/staging/comedi/drivers/adv_pci1710.c +++ b/drivers/staging/comedi/drivers/adv_pci1710.c @@ -1349,9 +1349,6 @@ static struct pci_dev *pci1710_find_pci_dev(struct comedi_device *dev,  		}  		if (pcidev->vendor != PCI_VENDOR_ID_ADVANTECH)  			continue; -		if (pci_is_enabled(pcidev)) -			continue; -  		if (strcmp(this_board->name, DRV_NAME) == 0) {  			for (i = 0; i < ARRAY_SIZE(boardtypes); ++i) {  				if (pcidev->device == boardtypes[i].device_id) { diff --git a/drivers/staging/comedi/drivers/adv_pci1723.c b/drivers/staging/comedi/drivers/adv_pci1723.c index da5ee69d2c9..dfde0f6328d 100644 --- a/drivers/staging/comedi/drivers/adv_pci1723.c +++ b/drivers/staging/comedi/drivers/adv_pci1723.c @@ -301,8 +301,6 @@ static struct pci_dev *pci1723_find_pci_dev(struct comedi_device *dev,  		}  		if (pcidev->vendor != PCI_VENDOR_ID_ADVANTECH)  			continue; -		if (pci_is_enabled(pcidev)) -			continue;  		return pcidev;  	}  	dev_err(dev->class_dev, diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c index 97f06dc8e48..2d4cb7f638b 100644 --- a/drivers/staging/comedi/drivers/adv_pci_dio.c +++ b/drivers/staging/comedi/drivers/adv_pci_dio.c @@ -1064,8 +1064,6 @@ static struct pci_dev *pci_dio_find_pci_dev(struct comedi_device *dev,  			    slot != PCI_SLOT(pcidev->devfn))  				continue;  		} -		if (pci_is_enabled(pcidev)) -			continue;  		for (i = 0; i < ARRAY_SIZE(boardtypes); ++i) {  			if (boardtypes[i].vendor_id != pcidev->vendor)  				continue; diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c index ef28385c148..cad559a1a73 100644 --- a/drivers/staging/comedi/drivers/daqboard2000.c +++ b/drivers/staging/comedi/drivers/daqboard2000.c @@ -718,7 +718,8 @@ static struct pci_dev *daqboard2000_find_pci_dev(struct comedi_device *dev,  				continue;  		}  		if (pcidev->vendor != PCI_VENDOR_ID_IOTECH || -		    pcidev->device != 0x0409) +		    pcidev->device != 0x0409 || +		    pcidev->subsystem_device != PCI_VENDOR_ID_IOTECH)  			continue;  		for (i = 0; i < ARRAY_SIZE(boardtypes); i++) { @@ -739,6 +740,7 @@ static int daqboard2000_attach(struct comedi_device *dev,  {  	struct pci_dev *pcidev;  	struct comedi_subdevice *s; +	resource_size_t pci_base;  	void *aux_data;  	unsigned int aux_len;  	int result; @@ -758,11 +760,12 @@ static int daqboard2000_attach(struct comedi_device *dev,  			"failed to enable PCI device and request regions\n");  		return -EIO;  	} -	dev->iobase = pci_resource_start(pcidev, 2); +	dev->iobase = 1;	/* the "detach" needs this */ -	devpriv->plx = -	    ioremap(pci_resource_start(pcidev, 0), DAQBOARD2000_PLX_SIZE); -	devpriv->daq = ioremap(dev->iobase, DAQBOARD2000_DAQ_SIZE); +	pci_base = pci_resource_start(pcidev, 0); +	devpriv->plx = ioremap(pci_base, DAQBOARD2000_PLX_SIZE); +	pci_base = pci_resource_start(pcidev, 2); +	devpriv->daq = ioremap(pci_base, DAQBOARD2000_DAQ_SIZE);  	if (!devpriv->plx || !devpriv->daq)  		return -ENOMEM; @@ -799,8 +802,6 @@ static int daqboard2000_attach(struct comedi_device *dev,  	   printk("Interrupt after is: %x\n", interrupt);  	 */ -	dev->iobase = (unsigned long)devpriv->daq; -  	dev->board_name = this_board->name;  	s = dev->subdevices + 0; @@ -824,7 +825,7 @@ static int daqboard2000_attach(struct comedi_device *dev,  	s = dev->subdevices + 2;  	result = subdev_8255_init(dev, s, daqboard2000_8255_cb, -				  (unsigned long)(dev->iobase + 0x40)); +				  (unsigned long)(devpriv->daq + 0x40));  out:  	return result; diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c index a6fe6c9be87..3476cda0fff 100644 --- a/drivers/staging/comedi/drivers/dt3000.c +++ b/drivers/staging/comedi/drivers/dt3000.c @@ -804,6 +804,7 @@ static int dt3000_attach(struct comedi_device *dev, struct comedi_devconfig *it)  {  	struct pci_dev *pcidev;  	struct comedi_subdevice *s; +	resource_size_t pci_base;  	int ret = 0;  	dev_dbg(dev->class_dev, "dt3000:\n"); @@ -820,9 +821,10 @@ static int dt3000_attach(struct comedi_device *dev, struct comedi_devconfig *it)  	ret = comedi_pci_enable(pcidev, "dt3000");  	if (ret < 0)  		return ret; +	dev->iobase = 1;	/* the "detach" needs this */ -	dev->iobase = pci_resource_start(pcidev, 0); -	devpriv->io_addr = ioremap(dev->iobase, DT3000_SIZE); +	pci_base  = pci_resource_start(pcidev, 0); +	devpriv->io_addr = ioremap(pci_base, DT3000_SIZE);  	if (!devpriv->io_addr)  		return -ENOMEM; diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c index 112fdc3e9c6..5aa8be1e7b9 100644 --- a/drivers/staging/comedi/drivers/rtd520.c +++ b/drivers/staging/comedi/drivers/rtd520.c @@ -1619,9 +1619,8 @@ static int rtd_attach(struct comedi_device *dev, struct comedi_devconfig *it)  	struct rtdPrivate *devpriv;  	struct pci_dev *pcidev;  	struct comedi_subdevice *s; +	resource_size_t pci_base;  	int ret; -	resource_size_t physLas1;	/* data area */ -	resource_size_t physLcfg;	/* PLX9080 */  #ifdef USE_DMA  	int index;  #endif @@ -1655,20 +1654,15 @@ static int rtd_attach(struct comedi_device *dev, struct comedi_devconfig *it)  		printk(KERN_INFO "Failed to enable PCI device and request regions.\n");  		return ret;  	} +	dev->iobase = 1;	/* the "detach" needs this */ -	/* -	 * Initialize base addresses -	 */ -	/* Get the physical address from PCI config */ -	dev->iobase = pci_resource_start(pcidev, LAS0_PCIINDEX); -	physLas1 = pci_resource_start(pcidev, LAS1_PCIINDEX); -	physLcfg = pci_resource_start(pcidev, LCFG_PCIINDEX); -	/* Now have the kernel map this into memory */ -	/* ASSUME page aligned */ -	devpriv->las0 = ioremap_nocache(dev->iobase, LAS0_PCISIZE); -	devpriv->las1 = ioremap_nocache(physLas1, LAS1_PCISIZE); -	devpriv->lcfg = ioremap_nocache(physLcfg, LCFG_PCISIZE); - +	/* Initialize the base addresses */ +	pci_base = pci_resource_start(pcidev, LAS0_PCIINDEX); +	devpriv->las0 = ioremap_nocache(pci_base, LAS0_PCISIZE); +	pci_base = pci_resource_start(pcidev, LAS1_PCIINDEX); +	devpriv->las1 = ioremap_nocache(pci_base, LAS1_PCISIZE); +	pci_base = pci_resource_start(pcidev, LCFG_PCIINDEX); +	devpriv->lcfg = ioremap_nocache(pci_base, LCFG_PCISIZE);  	if (!devpriv->las0 || !devpriv->las1 || !devpriv->lcfg)  		return -ENOMEM; diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c index 848c7ec0697..11ee83681da 100644 --- a/drivers/staging/comedi/drivers/usbdux.c +++ b/drivers/staging/comedi/drivers/usbdux.c @@ -102,6 +102,7 @@ sampling rate. If you sample two channels you get 4kHz and so on.  #define BULK_TIMEOUT 1000  /* constants for "firmware" upload and download */ +#define FIRMWARE "usbdux_firmware.bin"  #define USBDUXSUB_FIRMWARE 0xA0  #define VENDOR_DIR_IN  0xC0  #define VENDOR_DIR_OUT 0x40 @@ -2791,7 +2792,7 @@ static int usbdux_usb_probe(struct usb_interface *uinterf,  	ret = request_firmware_nowait(THIS_MODULE,  				      FW_ACTION_HOTPLUG, -				      "usbdux_firmware.bin", +				      FIRMWARE,  				      &udev->dev,  				      GFP_KERNEL,  				      usbduxsub + index, @@ -2850,3 +2851,4 @@ module_comedi_usb_driver(usbdux_driver, usbdux_usb_driver);  MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com");  MODULE_DESCRIPTION("Stirling/ITL USB-DUX -- Bernd.Porr@f2s.com");  MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE); diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c index d9911588c10..8eb41257c6c 100644 --- a/drivers/staging/comedi/drivers/usbduxfast.c +++ b/drivers/staging/comedi/drivers/usbduxfast.c @@ -57,6 +57,7 @@  /*   * constants for "firmware" upload and download   */ +#define FIRMWARE		"usbduxfast_firmware.bin"  #define USBDUXFASTSUB_FIRMWARE	0xA0  #define VENDOR_DIR_IN		0xC0  #define VENDOR_DIR_OUT		0x40 @@ -1706,7 +1707,7 @@ static int usbduxfast_usb_probe(struct usb_interface *uinterf,  	ret = request_firmware_nowait(THIS_MODULE,  				      FW_ACTION_HOTPLUG, -				      "usbduxfast_firmware.bin", +				      FIRMWARE,  				      &udev->dev,  				      GFP_KERNEL,  				      usbduxfastsub + index, @@ -1774,3 +1775,4 @@ module_comedi_usb_driver(usbduxfast_driver, usbduxfast_usb_driver);  MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com");  MODULE_DESCRIPTION("USB-DUXfast, BerndPorr@f2s.com");  MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE); diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c index 543e604791e..f54ab8c2fcf 100644 --- a/drivers/staging/comedi/drivers/usbduxsigma.c +++ b/drivers/staging/comedi/drivers/usbduxsigma.c @@ -63,6 +63,7 @@ Status: testing  #define BULK_TIMEOUT 1000  /* constants for "firmware" upload and download */ +#define FIRMWARE "usbduxsigma_firmware.bin"  #define USBDUXSUB_FIRMWARE 0xA0  #define VENDOR_DIR_IN  0xC0  #define VENDOR_DIR_OUT 0x40 @@ -2780,7 +2781,7 @@ static int usbduxsigma_usb_probe(struct usb_interface *uinterf,  	ret = request_firmware_nowait(THIS_MODULE,  				      FW_ACTION_HOTPLUG, -				      "usbduxsigma_firmware.bin", +				      FIRMWARE,  				      &udev->dev,  				      GFP_KERNEL,  				      usbduxsub + index, @@ -2845,3 +2846,4 @@ module_comedi_usb_driver(usbduxsigma_driver, usbduxsigma_usb_driver);  MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com");  MODULE_DESCRIPTION("Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com");  MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE); diff --git a/drivers/staging/csr/Kconfig b/drivers/staging/csr/Kconfig index cee8d48d2af..ad2a1096e92 100644 --- a/drivers/staging/csr/Kconfig +++ b/drivers/staging/csr/Kconfig @@ -1,6 +1,6 @@  config CSR_WIFI  	tristate "CSR wireless driver" -	depends on MMC && CFG80211_WEXT +	depends on MMC && CFG80211_WEXT && INET  	select WIRELESS_EXT  	select WEXT_PRIV  	help diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c index 22c3923d55e..095837285f4 100644 --- a/drivers/staging/iio/adc/ad7192.c +++ b/drivers/staging/iio/adc/ad7192.c @@ -754,7 +754,7 @@ static ssize_t ad7192_set(struct device *dev,  		else  			st->mode &= ~AD7192_MODE_ACX; -		ad7192_write_reg(st, AD7192_REG_GPOCON, 3, st->mode); +		ad7192_write_reg(st, AD7192_REG_MODE, 3, st->mode);  		break;  	default:  		ret = -EINVAL; @@ -798,6 +798,11 @@ static const struct attribute_group ad7195_attribute_group = {  	.attrs = ad7195_attributes,  }; +static unsigned int ad7192_get_temp_scale(bool unipolar) +{ +	return unipolar ? 2815 * 2 : 2815; +} +  static int ad7192_read_raw(struct iio_dev *indio_dev,  			   struct iio_chan_spec const *chan,  			   int *val, @@ -824,19 +829,6 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,  		*val = (smpl >> chan->scan_type.shift) &  			((1 << (chan->scan_type.realbits)) - 1); -		switch (chan->type) { -		case IIO_VOLTAGE: -			if (!unipolar) -				*val -= (1 << (chan->scan_type.realbits - 1)); -			break; -		case IIO_TEMP: -			*val -= 0x800000; -			*val /= 2815; /* temp Kelvin */ -			*val -= 273; /* temp Celsius */ -			break; -		default: -			return -EINVAL; -		}  		return IIO_VAL_INT;  	case IIO_CHAN_INFO_SCALE: @@ -848,11 +840,21 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,  			mutex_unlock(&indio_dev->mlock);  			return IIO_VAL_INT_PLUS_NANO;  		case IIO_TEMP: -			*val =  1000; -			return IIO_VAL_INT; +			*val = 0; +			*val2 = 1000000000 / ad7192_get_temp_scale(unipolar); +			return IIO_VAL_INT_PLUS_NANO;  		default:  			return -EINVAL;  		} +	case IIO_CHAN_INFO_OFFSET: +		if (!unipolar) +			*val = -(1 << (chan->scan_type.realbits - 1)); +		else +			*val = 0; +		/* Kelvin to Celsius */ +		if (chan->type == IIO_TEMP) +			*val -= 273 * ad7192_get_temp_scale(unipolar); +		return IIO_VAL_INT;  	}  	return -EINVAL; @@ -890,7 +892,7 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,  				}  				ret = 0;  			} - +		break;  	default:  		ret = -EINVAL;  	} @@ -942,20 +944,22 @@ static const struct iio_info ad7195_info = {  	  .channel = _chan,						\  	  .channel2 = _chan2,						\  	  .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |			\ -	  IIO_CHAN_INFO_SCALE_SHARED_BIT,				\ +	  IIO_CHAN_INFO_SCALE_SHARED_BIT |				\ +	  IIO_CHAN_INFO_OFFSET_SHARED_BIT,				\  	  .address = _address,						\  	  .scan_index = _si,						\ -	  .scan_type =  IIO_ST('s', 24, 32, 0)} +	  .scan_type =  IIO_ST('u', 24, 32, 0)}  #define AD7192_CHAN(_chan, _address, _si)				\  	{ .type = IIO_VOLTAGE,						\  	  .indexed = 1,							\  	  .channel = _chan,						\  	  .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |			\ -	  IIO_CHAN_INFO_SCALE_SHARED_BIT,				\ +	  IIO_CHAN_INFO_SCALE_SHARED_BIT |				\ +	  IIO_CHAN_INFO_OFFSET_SHARED_BIT,				\  	  .address = _address,						\  	  .scan_index = _si,						\ -	  .scan_type =  IIO_ST('s', 24, 32, 0)} +	  .scan_type =  IIO_ST('u', 24, 32, 0)}  #define AD7192_CHAN_TEMP(_chan, _address, _si)				\  	{ .type = IIO_TEMP,						\ @@ -965,7 +969,7 @@ static const struct iio_info ad7195_info = {  	  IIO_CHAN_INFO_SCALE_SEPARATE_BIT,				\  	  .address = _address,						\  	  .scan_index = _si,						\ -	  .scan_type =  IIO_ST('s', 24, 32, 0)} +	  .scan_type =  IIO_ST('u', 24, 32, 0)}  static struct iio_chan_spec ad7192_channels[] = {  	AD7192_CHAN_DIFF(1, 2, NULL, AD7192_CH_AIN1P_AIN2M, 0), diff --git a/drivers/staging/iio/adc/ad7298_ring.c b/drivers/staging/iio/adc/ad7298_ring.c index fd1d855ff57..506016f0159 100644 --- a/drivers/staging/iio/adc/ad7298_ring.c +++ b/drivers/staging/iio/adc/ad7298_ring.c @@ -76,7 +76,7 @@ static irqreturn_t ad7298_trigger_handler(int irq, void *p)  	struct iio_dev *indio_dev = pf->indio_dev;  	struct ad7298_state *st = iio_priv(indio_dev);  	struct iio_buffer *ring = indio_dev->buffer; -	s64 time_ns; +	s64 time_ns = 0;  	__u16 buf[16];  	int b_sent, i; diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c index 1ece2ac8de5..19ee49c95de 100644 --- a/drivers/staging/iio/adc/ad7780.c +++ b/drivers/staging/iio/adc/ad7780.c @@ -131,9 +131,10 @@ static const struct ad7780_chip_info ad7780_chip_info_tbl[] = {  			.indexed = 1,  			.channel = 0,  			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | -			IIO_CHAN_INFO_SCALE_SHARED_BIT, +			IIO_CHAN_INFO_SCALE_SHARED_BIT | +			IIO_CHAN_INFO_OFFSET_SHARED_BIT,  			.scan_type = { -				.sign = 's', +				.sign = 'u',  				.realbits = 24,  				.storagebits = 32,  				.shift = 8, @@ -146,9 +147,10 @@ static const struct ad7780_chip_info ad7780_chip_info_tbl[] = {  			.indexed = 1,  			.channel = 0,  			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | -			IIO_CHAN_INFO_SCALE_SHARED_BIT, +			IIO_CHAN_INFO_SCALE_SHARED_BIT | +			IIO_CHAN_INFO_OFFSET_SHARED_BIT,  			.scan_type = { -				.sign = 's', +				.sign = 'u',  				.realbits = 20,  				.storagebits = 32,  				.shift = 12, diff --git a/drivers/staging/iio/adc/ad7793.c b/drivers/staging/iio/adc/ad7793.c index 76fdd7145fc..112e2b7b5bc 100644 --- a/drivers/staging/iio/adc/ad7793.c +++ b/drivers/staging/iio/adc/ad7793.c @@ -563,8 +563,9 @@ static ssize_t ad7793_show_scale_available(struct device *dev,  	return len;  } -static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available, in-in_scale_available, -			     S_IRUGO, ad7793_show_scale_available, NULL, 0); +static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available, +		in_voltage-voltage_scale_available, S_IRUGO, +		ad7793_show_scale_available, NULL, 0);  static struct attribute *ad7793_attributes[] = {  	&iio_dev_attr_sampling_frequency.dev_attr.attr, @@ -604,9 +605,6 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,  		*val = (smpl >> chan->scan_type.shift) &  			((1 << (chan->scan_type.realbits)) - 1); -		if (!unipolar) -			*val -= (1 << (chan->scan_type.realbits - 1)); -  		return IIO_VAL_INT;  	case IIO_CHAN_INFO_SCALE: @@ -620,25 +618,38 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,  				return IIO_VAL_INT_PLUS_NANO;  			} else {  				/* 1170mV / 2^23 * 6 */ -				scale_uv = (1170ULL * 100000000ULL * 6ULL) -					>> (chan->scan_type.realbits - -					    (unipolar ? 0 : 1)); +				scale_uv = (1170ULL * 100000000ULL * 6ULL);  			}  			break;  		case IIO_TEMP: -			/* Always uses unity gain and internal ref */ -			scale_uv = (2500ULL * 100000000ULL) -				>> (chan->scan_type.realbits - -				(unipolar ? 0 : 1)); +				/* 1170mV / 0.81 mV/C / 2^23 */ +				scale_uv = 1444444444444ULL;  			break;  		default:  			return -EINVAL;  		} -		*val2 = do_div(scale_uv, 100000000) * 10; -		*val =  scale_uv; - +		scale_uv >>= (chan->scan_type.realbits - (unipolar ? 0 : 1)); +		*val = 0; +		*val2 = scale_uv;  		return IIO_VAL_INT_PLUS_NANO; +	case IIO_CHAN_INFO_OFFSET: +		if (!unipolar) +			*val = -(1 << (chan->scan_type.realbits - 1)); +		else +			*val = 0; + +		/* Kelvin to Celsius */ +		if (chan->type == IIO_TEMP) { +			unsigned long long offset; +			unsigned int shift; + +			shift = chan->scan_type.realbits - (unipolar ? 0 : 1); +			offset = 273ULL << shift; +			do_div(offset, 1444); +			*val -= offset; +		} +		return IIO_VAL_INT;  	}  	return -EINVAL;  } @@ -676,7 +687,7 @@ static int ad7793_write_raw(struct iio_dev *indio_dev,  				}  				ret = 0;  			} - +		break;  	default:  		ret = -EINVAL;  	} @@ -720,9 +731,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {  			.channel2 = 0,  			.address = AD7793_CH_AIN1P_AIN1M,  			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | -			IIO_CHAN_INFO_SCALE_SHARED_BIT, +			IIO_CHAN_INFO_SCALE_SHARED_BIT | +			IIO_CHAN_INFO_OFFSET_SHARED_BIT,  			.scan_index = 0, -			.scan_type = IIO_ST('s', 24, 32, 0) +			.scan_type = IIO_ST('u', 24, 32, 0)  		},  		.channel[1] = {  			.type = IIO_VOLTAGE, @@ -732,9 +744,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {  			.channel2 = 1,  			.address = AD7793_CH_AIN2P_AIN2M,  			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | -			IIO_CHAN_INFO_SCALE_SHARED_BIT, +			IIO_CHAN_INFO_SCALE_SHARED_BIT | +			IIO_CHAN_INFO_OFFSET_SHARED_BIT,  			.scan_index = 1, -			.scan_type = IIO_ST('s', 24, 32, 0) +			.scan_type = IIO_ST('u', 24, 32, 0)  		},  		.channel[2] = {  			.type = IIO_VOLTAGE, @@ -744,9 +757,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {  			.channel2 = 2,  			.address = AD7793_CH_AIN3P_AIN3M,  			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | -			IIO_CHAN_INFO_SCALE_SHARED_BIT, +			IIO_CHAN_INFO_SCALE_SHARED_BIT | +			IIO_CHAN_INFO_OFFSET_SHARED_BIT,  			.scan_index = 2, -			.scan_type = IIO_ST('s', 24, 32, 0) +			.scan_type = IIO_ST('u', 24, 32, 0)  		},  		.channel[3] = {  			.type = IIO_VOLTAGE, @@ -757,9 +771,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {  			.channel2 = 2,  			.address = AD7793_CH_AIN1M_AIN1M,  			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | -			IIO_CHAN_INFO_SCALE_SHARED_BIT, +			IIO_CHAN_INFO_SCALE_SHARED_BIT | +			IIO_CHAN_INFO_OFFSET_SHARED_BIT,  			.scan_index = 3, -			.scan_type = IIO_ST('s', 24, 32, 0) +			.scan_type = IIO_ST('u', 24, 32, 0)  		},  		.channel[4] = {  			.type = IIO_TEMP, @@ -769,7 +784,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {  			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |  			IIO_CHAN_INFO_SCALE_SEPARATE_BIT,  			.scan_index = 4, -			.scan_type = IIO_ST('s', 24, 32, 0), +			.scan_type = IIO_ST('u', 24, 32, 0),  		},  		.channel[5] = {  			.type = IIO_VOLTAGE, @@ -778,9 +793,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {  			.channel = 4,  			.address = AD7793_CH_AVDD_MONITOR,  			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | -			IIO_CHAN_INFO_SCALE_SEPARATE_BIT, +			IIO_CHAN_INFO_SCALE_SEPARATE_BIT | +			IIO_CHAN_INFO_OFFSET_SHARED_BIT,  			.scan_index = 5, -			.scan_type = IIO_ST('s', 24, 32, 0), +			.scan_type = IIO_ST('u', 24, 32, 0),  		},  		.channel[6] = IIO_CHAN_SOFT_TIMESTAMP(6),  	}, @@ -793,9 +809,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {  			.channel2 = 0,  			.address = AD7793_CH_AIN1P_AIN1M,  			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | -			IIO_CHAN_INFO_SCALE_SHARED_BIT, +			IIO_CHAN_INFO_SCALE_SHARED_BIT | +			IIO_CHAN_INFO_OFFSET_SHARED_BIT,  			.scan_index = 0, -			.scan_type = IIO_ST('s', 16, 32, 0) +			.scan_type = IIO_ST('u', 16, 32, 0)  		},  		.channel[1] = {  			.type = IIO_VOLTAGE, @@ -805,9 +822,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {  			.channel2 = 1,  			.address = AD7793_CH_AIN2P_AIN2M,  			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | -			IIO_CHAN_INFO_SCALE_SHARED_BIT, +			IIO_CHAN_INFO_SCALE_SHARED_BIT | +			IIO_CHAN_INFO_OFFSET_SHARED_BIT,  			.scan_index = 1, -			.scan_type = IIO_ST('s', 16, 32, 0) +			.scan_type = IIO_ST('u', 16, 32, 0)  		},  		.channel[2] = {  			.type = IIO_VOLTAGE, @@ -817,9 +835,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {  			.channel2 = 2,  			.address = AD7793_CH_AIN3P_AIN3M,  			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | -			IIO_CHAN_INFO_SCALE_SHARED_BIT, +			IIO_CHAN_INFO_SCALE_SHARED_BIT | +			IIO_CHAN_INFO_OFFSET_SHARED_BIT,  			.scan_index = 2, -			.scan_type = IIO_ST('s', 16, 32, 0) +			.scan_type = IIO_ST('u', 16, 32, 0)  		},  		.channel[3] = {  			.type = IIO_VOLTAGE, @@ -830,9 +849,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {  			.channel2 = 2,  			.address = AD7793_CH_AIN1M_AIN1M,  			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | -			IIO_CHAN_INFO_SCALE_SHARED_BIT, +			IIO_CHAN_INFO_SCALE_SHARED_BIT | +			IIO_CHAN_INFO_OFFSET_SHARED_BIT,  			.scan_index = 3, -			.scan_type = IIO_ST('s', 16, 32, 0) +			.scan_type = IIO_ST('u', 16, 32, 0)  		},  		.channel[4] = {  			.type = IIO_TEMP, @@ -842,7 +862,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {  			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |  			IIO_CHAN_INFO_SCALE_SEPARATE_BIT,  			.scan_index = 4, -			.scan_type = IIO_ST('s', 16, 32, 0), +			.scan_type = IIO_ST('u', 16, 32, 0),  		},  		.channel[5] = {  			.type = IIO_VOLTAGE, @@ -851,9 +871,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {  			.channel = 4,  			.address = AD7793_CH_AVDD_MONITOR,  			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | -			IIO_CHAN_INFO_SCALE_SEPARATE_BIT, +			IIO_CHAN_INFO_SCALE_SEPARATE_BIT | +			IIO_CHAN_INFO_OFFSET_SHARED_BIT,  			.scan_index = 5, -			.scan_type = IIO_ST('s', 16, 32, 0), +			.scan_type = IIO_ST('u', 16, 32, 0),  		},  		.channel[6] = IIO_CHAN_SOFT_TIMESTAMP(6),  	}, @@ -901,7 +922,7 @@ static int __devinit ad7793_probe(struct spi_device *spi)  	else if (voltage_uv)  		st->int_vref_mv = voltage_uv / 1000;  	else -		st->int_vref_mv = 2500; /* Build-in ref */ +		st->int_vref_mv = 1170; /* Build-in ref */  	spi_set_drvdata(spi, indio_dev);  	st->spi = spi; diff --git a/drivers/staging/omapdrm/omap_connector.c b/drivers/staging/omapdrm/omap_connector.c index 5e2856c0e0b..9c2287b71d2 100644 --- a/drivers/staging/omapdrm/omap_connector.c +++ b/drivers/staging/omapdrm/omap_connector.c @@ -177,14 +177,11 @@ static int omap_connector_get_modes(struct drm_connector *connector)  			drm_mode_connector_update_edid_property(  					connector, edid);  			n = drm_add_edid_modes(connector, edid); -			kfree(connector->display_info.raw_edid); -			connector->display_info.raw_edid = edid;  		} else {  			drm_mode_connector_update_edid_property(  					connector, NULL); -			connector->display_info.raw_edid = NULL; -			kfree(edid);  		} +		kfree(edid);  	} else {  		struct drm_display_mode *mode = drm_mode_create(dev);  		struct omap_video_timings timings; diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index b06fd5b723f..d536756549e 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -189,7 +189,7 @@ DEVICE_PARAM(b80211hEnable, "802.11h mode");  // Static vars definitions  // -static struct usb_device_id vt6656_table[] __devinitdata = { +static struct usb_device_id vt6656_table[] = {  	{USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)},  	{}  }; diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c index ef360547ece..0ca857ac473 100644 --- a/drivers/staging/winbond/wbusb.c +++ b/drivers/staging/winbond/wbusb.c @@ -25,7 +25,7 @@ MODULE_DESCRIPTION("IS89C35 802.11bg WLAN USB Driver");  MODULE_LICENSE("GPL");  MODULE_VERSION("0.1"); -static const struct usb_device_id wb35_table[] __devinitconst = { +static const struct usb_device_id wb35_table[] = {  	{ USB_DEVICE(0x0416, 0x0035) },  	{ USB_DEVICE(0x18E8, 0x6201) },  	{ USB_DEVICE(0x18E8, 0x6206) }, diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 6e32ff6f2fa..5552fa7426b 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -673,8 +673,15 @@ static int pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg)  	struct scsi_device *sd = pdv->pdv_sd;  	int result;  	struct pscsi_plugin_task *pt = cmd->priv; -	unsigned char *cdb = &pt->pscsi_cdb[0]; +	unsigned char *cdb; +	/* +	 * Special case for REPORT_LUNs handling where pscsi_plugin_task has +	 * not been allocated because TCM is handling the emulation directly. +	 */ +	if (!pt) +		return 0; +	cdb = &pt->pscsi_cdb[0];  	result = pt->pscsi_result;  	/*  	 * Hack to make sure that Write-Protect modepage is set if R/O mode is diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 0eaae23d12b..4de3186dc44 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1165,8 +1165,6 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)  			" 0x%02x\n", cmd->se_tfo->get_fabric_name(),  				cmd->data_length, size, cmd->t_task_cdb[0]); -		cmd->cmd_spdtl = size; -  		if (cmd->data_direction == DMA_TO_DEVICE) {  			pr_err("Rejecting underflow/overflow"  					" WRITE data\n"); @@ -2294,9 +2292,9 @@ transport_generic_get_mem(struct se_cmd *cmd)  	return 0;  out: -	while (i >= 0) { -		__free_page(sg_page(&cmd->t_data_sg[i])); +	while (i > 0) {  		i--; +		__free_page(sg_page(&cmd->t_data_sg[i]));  	}  	kfree(cmd->t_data_sg);  	cmd->t_data_sg = NULL; @@ -2323,9 +2321,12 @@ int transport_generic_new_cmd(struct se_cmd *cmd)  		if (ret < 0)  			goto out_fail;  	} - -	/* Workaround for handling zero-length control CDBs */ -	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->data_length) { +	/* +	 * If this command doesn't have any payload and we don't have to call +	 * into the fabric for data transfers, go ahead and complete it right +	 * away. +	 */ +	if (!cmd->data_length) {  		spin_lock_irq(&cmd->t_state_lock);  		cmd->t_state = TRANSPORT_COMPLETE;  		cmd->transport_state |= CMD_T_ACTIVE; diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index c5eb3c33c3d..eea69358ced 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h @@ -131,6 +131,7 @@ extern struct list_head ft_lport_list;  extern struct mutex ft_lport_lock;  extern struct fc4_prov ft_prov;  extern struct target_fabric_configfs *ft_configfs; +extern unsigned int ft_debug_logging;  /*   * Fabric methods. diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index b9cb5006177..823e6922249 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -48,7 +48,7 @@  /*   * Dump cmd state for debugging.   */ -void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) +static void _ft_dump_cmd(struct ft_cmd *cmd, const char *caller)  {  	struct fc_exch *ep;  	struct fc_seq *sp; @@ -80,6 +80,12 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)  	}  } +void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) +{ +	if (unlikely(ft_debug_logging)) +		_ft_dump_cmd(cmd, caller); +} +  static void ft_free_cmd(struct ft_cmd *cmd)  {  	struct fc_frame *fp; diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index 87901fa74dd..3c9e5b57caa 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -456,7 +456,9 @@ static void ft_prlo(struct fc_rport_priv *rdata)  	struct ft_tport *tport;  	mutex_lock(&ft_lport_lock); -	tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]); +	tport = rcu_dereference_protected(rdata->local_port->prov[FC_TYPE_FCP], +					  lockdep_is_held(&ft_lport_lock)); +  	if (!tport) {  		mutex_unlock(&ft_lport_lock);  		return; diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 070b442c1f8..4720b4ba096 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -160,10 +160,12 @@ config SERIAL_KS8695_CONSOLE  config SERIAL_CLPS711X  	tristate "CLPS711X serial port support" -	depends on ARM && ARCH_CLPS711X +	depends on ARCH_CLPS711X  	select SERIAL_CORE +	default y  	help -	  ::: To be written ::: +	  This enables the driver for the on-chip UARTs of the Cirrus +	  Logic EP711x/EP721x/EP731x processors.  config SERIAL_CLPS711X_CONSOLE  	bool "Support for console on CLPS711X serial port" @@ -173,9 +175,7 @@ config SERIAL_CLPS711X_CONSOLE  	  Even if you say Y here, the currently visible virtual console  	  (/dev/tty0) will still be used as the system console by default, but  	  you can alter that using a kernel command line option such as -	  "console=ttyCL1". (Try "man bootparam" or see the documentation of -	  your boot loader (lilo or loadlin) about how to pass options to the -	  kernel at boot time.) +	  "console=ttyCL1".  config SERIAL_SAMSUNG  	tristate "Samsung SoC serial support" diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c index 144cd3987d4..3ad079ffd04 100644 --- a/drivers/tty/serial/ifx6x60.c +++ b/drivers/tty/serial/ifx6x60.c @@ -1331,7 +1331,7 @@ static const struct spi_device_id ifx_id_table[] = {  MODULE_DEVICE_TABLE(spi, ifx_id_table);  /* spi operations */ -static const struct spi_driver ifx_spi_driver = { +static struct spi_driver ifx_spi_driver = {  	.driver = {  		.name = DRVNAME,  		.pm = &ifx_spi_pm, diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index 2e341b81ff8..3a667eed63d 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -73,6 +73,7 @@  #define AUART_CTRL0_CLKGATE			(1 << 30)  #define AUART_CTRL2_CTSEN			(1 << 15) +#define AUART_CTRL2_RTSEN			(1 << 14)  #define AUART_CTRL2_RTS				(1 << 11)  #define AUART_CTRL2_RXE				(1 << 9)  #define AUART_CTRL2_TXE				(1 << 8) @@ -259,9 +260,12 @@ static void mxs_auart_set_mctrl(struct uart_port *u, unsigned mctrl)  	u32 ctrl = readl(u->membase + AUART_CTRL2); -	ctrl &= ~AUART_CTRL2_RTS; -	if (mctrl & TIOCM_RTS) -		ctrl |= AUART_CTRL2_RTS; +	ctrl &= ~AUART_CTRL2_RTSEN; +	if (mctrl & TIOCM_RTS) { +		if (u->state->port.flags & ASYNC_CTS_FLOW) +			ctrl |= AUART_CTRL2_RTSEN; +	} +  	s->ctrl = mctrl;  	writel(ctrl, u->membase + AUART_CTRL2);  } @@ -359,9 +363,9 @@ static void mxs_auart_settermios(struct uart_port *u,  	/* figure out the hardware flow control settings */  	if (cflag & CRTSCTS) -		ctrl2 |= AUART_CTRL2_CTSEN; +		ctrl2 |= AUART_CTRL2_CTSEN | AUART_CTRL2_RTSEN;  	else -		ctrl2 &= ~AUART_CTRL2_CTSEN; +		ctrl2 &= ~(AUART_CTRL2_CTSEN | AUART_CTRL2_RTSEN);  	/* set baud rate */  	baud = uart_get_baud_rate(u, termios, old, 0, u->uartclk); diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c index 654755a990d..333c8d012b0 100644 --- a/drivers/tty/serial/pmac_zilog.c +++ b/drivers/tty/serial/pmac_zilog.c @@ -1348,10 +1348,16 @@ static int pmz_verify_port(struct uart_port *port, struct serial_struct *ser)  static int pmz_poll_get_char(struct uart_port *port)  {  	struct uart_pmac_port *uap = (struct uart_pmac_port *)port; +	int tries = 2; -	while ((read_zsreg(uap, R0) & Rx_CH_AV) == 0) -		udelay(5); -	return read_zsdata(uap); +	while (tries) { +		if ((read_zsreg(uap, R0) & Rx_CH_AV) != 0) +			return read_zsdata(uap); +		if (tries--) +			udelay(5); +	} + +	return NO_POLL_CHAR;  }  static void pmz_poll_put_char(struct uart_port *port, unsigned char c) diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index a7773a3e02b..7065df6036c 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -13,7 +13,7 @@ config USB_ARCH_HAS_OHCI  	default y if PXA3xx  	default y if ARCH_EP93XX  	default y if ARCH_AT91 -	default y if ARCH_PNX4008 && I2C +	default y if ARCH_PNX4008  	default y if MFD_TC6393XB  	default y if ARCH_W90X900  	default y if ARCH_DAVINCI_DA8XX diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig index 8337fb5d988..47e499c9c0b 100644 --- a/drivers/usb/chipidea/Kconfig +++ b/drivers/usb/chipidea/Kconfig @@ -1,9 +1,9 @@  config USB_CHIPIDEA  	tristate "ChipIdea Highspeed Dual Role Controller" -	depends on USB +	depends on USB || USB_GADGET  	help -          Say Y here if your system has a dual role high speed USB -          controller based on ChipIdea silicon IP. Currently, only the +	  Say Y here if your system has a dual role high speed USB +	  controller based on ChipIdea silicon IP. Currently, only the  	  peripheral mode is supported.  	  When compiled dynamically, the module will be called ci-hdrc.ko. @@ -12,7 +12,7 @@ if USB_CHIPIDEA  config USB_CHIPIDEA_UDC  	bool "ChipIdea device controller" -	depends on USB_GADGET +	depends on USB_GADGET=y || USB_GADGET=USB_CHIPIDEA  	select USB_GADGET_DUALSPEED  	help  	  Say Y here to enable device controller functionality of the @@ -20,6 +20,7 @@ config USB_CHIPIDEA_UDC  config USB_CHIPIDEA_HOST  	bool "ChipIdea host controller" +	depends on USB=y || USB=USB_CHIPIDEA  	select USB_EHCI_ROOT_HUB_TT  	help  	  Say Y here to enable host controller functionality of the diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 56d6bf66848..f763ed7ba91 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1104,7 +1104,8 @@ skip_normal_probe:  	} -	if (data_interface->cur_altsetting->desc.bNumEndpoints < 2) +	if (data_interface->cur_altsetting->desc.bNumEndpoints < 2 || +	    control_interface->cur_altsetting->desc.bNumEndpoints == 0)  		return -EINVAL;  	epctrl = &control_interface->cur_altsetting->endpoint[0].desc; diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c index 90e82e288eb..0e523092615 100644 --- a/drivers/usb/gadget/u_ether.c +++ b/drivers/usb/gadget/u_ether.c @@ -669,6 +669,8 @@ static int eth_stop(struct net_device *net)  	spin_lock_irqsave(&dev->lock, flags);  	if (dev->port_usb) {  		struct gether	*link = dev->port_usb; +		const struct usb_endpoint_descriptor *in; +		const struct usb_endpoint_descriptor *out;  		if (link->close)  			link->close(link); @@ -682,10 +684,14 @@ static int eth_stop(struct net_device *net)  		 * their own pace; the network stack can handle old packets.  		 * For the moment we leave this here, since it works.  		 */ +		in = link->in_ep->desc; +		out = link->out_ep->desc;  		usb_ep_disable(link->in_ep);  		usb_ep_disable(link->out_ep);  		if (netif_carrier_ok(net)) {  			DBG(dev, "host still using in/out endpoints\n"); +			link->in_ep->desc = in; +			link->out_ep->desc = out;  			usb_ep_enable(link->in_ep);  			usb_ep_enable(link->out_ep);  		} diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index bb55eb4a7d4..d7fe287d067 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c @@ -56,15 +56,6 @@  #define	EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT		8  #define	EHCI_INSNREG05_ULPI_WRDATA_SHIFT		0 -/* Errata i693 */ -static struct clk	*utmi_p1_fck; -static struct clk	*utmi_p2_fck; -static struct clk	*xclk60mhsp1_ck; -static struct clk	*xclk60mhsp2_ck; -static struct clk	*usbhost_p1_fck; -static struct clk	*usbhost_p2_fck; -static struct clk	*init_60m_fclk; -  /*-------------------------------------------------------------------------*/  static const struct hc_driver ehci_omap_hc_driver; @@ -80,40 +71,6 @@ static inline u32 ehci_read(void __iomem *base, u32 reg)  	return __raw_readl(base + reg);  } -/* Erratum i693 workaround sequence */ -static void omap_ehci_erratum_i693(struct ehci_hcd *ehci) -{ -	int ret = 0; - -	/* Switch to the internal 60 MHz clock */ -	ret = clk_set_parent(utmi_p1_fck, init_60m_fclk); -	if (ret != 0) -		ehci_err(ehci, "init_60m_fclk set parent" -			"failed error:%d\n", ret); - -	ret = clk_set_parent(utmi_p2_fck, init_60m_fclk); -	if (ret != 0) -		ehci_err(ehci, "init_60m_fclk set parent" -			"failed error:%d\n", ret); - -	clk_enable(usbhost_p1_fck); -	clk_enable(usbhost_p2_fck); - -	/* Wait 1ms and switch back to the external clock */ -	mdelay(1); -	ret = clk_set_parent(utmi_p1_fck, xclk60mhsp1_ck); -	if (ret != 0) -		ehci_err(ehci, "xclk60mhsp1_ck set parent" -			"failed error:%d\n", ret); - -	ret = clk_set_parent(utmi_p2_fck, xclk60mhsp2_ck); -	if (ret != 0) -		ehci_err(ehci, "xclk60mhsp2_ck set parent" -			"failed error:%d\n", ret); - -	clk_disable(usbhost_p1_fck); -	clk_disable(usbhost_p2_fck); -}  static void omap_ehci_soft_phy_reset(struct usb_hcd *hcd, u8 port)  { @@ -195,50 +152,6 @@ static int omap_ehci_init(struct usb_hcd *hcd)  	return rc;  } -static int omap_ehci_hub_control( -	struct usb_hcd	*hcd, -	u16		typeReq, -	u16		wValue, -	u16		wIndex, -	char		*buf, -	u16		wLength -) -{ -	struct ehci_hcd	*ehci = hcd_to_ehci(hcd); -	u32 __iomem *status_reg = &ehci->regs->port_status[ -				(wIndex & 0xff) - 1]; -	u32		temp; -	unsigned long	flags; -	int		retval = 0; - -	spin_lock_irqsave(&ehci->lock, flags); - -	if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) { -		temp = ehci_readl(ehci, status_reg); -		if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) { -			retval = -EPIPE; -			goto done; -		} - -		temp &= ~PORT_WKCONN_E; -		temp |= PORT_WKDISC_E | PORT_WKOC_E; -		ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); - -		omap_ehci_erratum_i693(ehci); - -		set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports); -		goto done; -	} - -	spin_unlock_irqrestore(&ehci->lock, flags); - -	/* Handle the hub control events here */ -	return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength); -done: -	spin_unlock_irqrestore(&ehci->lock, flags); -	return retval; -} -  static void disable_put_regulator(  		struct ehci_hcd_omap_platform_data *pdata)  { @@ -351,79 +264,9 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)  		goto err_pm_runtime;  	} -	/* get clocks */ -	utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk"); -	if (IS_ERR(utmi_p1_fck)) { -		ret = PTR_ERR(utmi_p1_fck); -		dev_err(dev, "utmi_p1_gfclk failed error:%d\n",	ret); -		goto err_add_hcd; -	} - -	xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck"); -	if (IS_ERR(xclk60mhsp1_ck)) { -		ret = PTR_ERR(xclk60mhsp1_ck); -		dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret); -		goto err_utmi_p1_fck; -	} - -	utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk"); -	if (IS_ERR(utmi_p2_fck)) { -		ret = PTR_ERR(utmi_p2_fck); -		dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret); -		goto err_xclk60mhsp1_ck; -	} - -	xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck"); -	if (IS_ERR(xclk60mhsp2_ck)) { -		ret = PTR_ERR(xclk60mhsp2_ck); -		dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret); -		goto err_utmi_p2_fck; -	} - -	usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk"); -	if (IS_ERR(usbhost_p1_fck)) { -		ret = PTR_ERR(usbhost_p1_fck); -		dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret); -		goto err_xclk60mhsp2_ck; -	} - -	usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk"); -	if (IS_ERR(usbhost_p2_fck)) { -		ret = PTR_ERR(usbhost_p2_fck); -		dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret); -		goto err_usbhost_p1_fck; -	} - -	init_60m_fclk = clk_get(dev, "init_60m_fclk"); -	if (IS_ERR(init_60m_fclk)) { -		ret = PTR_ERR(init_60m_fclk); -		dev_err(dev, "init_60m_fclk failed error:%d\n", ret); -		goto err_usbhost_p2_fck; -	}  	return 0; -err_usbhost_p2_fck: -	clk_put(usbhost_p2_fck); - -err_usbhost_p1_fck: -	clk_put(usbhost_p1_fck); - -err_xclk60mhsp2_ck: -	clk_put(xclk60mhsp2_ck); - -err_utmi_p2_fck: -	clk_put(utmi_p2_fck); - -err_xclk60mhsp1_ck: -	clk_put(xclk60mhsp1_ck); - -err_utmi_p1_fck: -	clk_put(utmi_p1_fck); - -err_add_hcd: -	usb_remove_hcd(hcd); -  err_pm_runtime:  	disable_put_regulator(pdata);  	pm_runtime_put_sync(dev); @@ -454,14 +297,6 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)  	iounmap(hcd->regs);  	usb_put_hcd(hcd); -	clk_put(utmi_p1_fck); -	clk_put(utmi_p2_fck); -	clk_put(xclk60mhsp1_ck); -	clk_put(xclk60mhsp2_ck); -	clk_put(usbhost_p1_fck); -	clk_put(usbhost_p2_fck); -	clk_put(init_60m_fclk); -  	pm_runtime_put_sync(dev);  	pm_runtime_disable(dev); @@ -532,7 +367,7 @@ static const struct hc_driver ehci_omap_hc_driver = {  	 * root hub support  	 */  	.hub_status_data	= ehci_hub_status_data, -	.hub_control		= omap_ehci_hub_control, +	.hub_control		= ehci_hub_control,  	.bus_suspend		= ehci_bus_suspend,  	.bus_resume		= ehci_bus_resume, diff --git a/drivers/usb/host/ehci-sead3.c b/drivers/usb/host/ehci-sead3.c index 58c96bd50d2..0c9e43cfaff 100644 --- a/drivers/usb/host/ehci-sead3.c +++ b/drivers/usb/host/ehci-sead3.c @@ -40,7 +40,7 @@ static int ehci_sead3_setup(struct usb_hcd *hcd)  	ehci->need_io_watchdog = 0;  	/* Set burst length to 16 words. */ -	ehci_writel(ehci, 0x1010, &ehci->regs->reserved[1]); +	ehci_writel(ehci, 0x1010, &ehci->regs->reserved1[1]);  	return ret;  } diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c index 950e95efa38..26dedb30ad0 100644 --- a/drivers/usb/host/ehci-tegra.c +++ b/drivers/usb/host/ehci-tegra.c @@ -799,11 +799,12 @@ static int tegra_ehci_remove(struct platform_device *pdev)  #endif  	usb_remove_hcd(hcd); -	usb_put_hcd(hcd);  	tegra_usb_phy_close(tegra->phy);  	iounmap(hcd->regs); +	usb_put_hcd(hcd); +  	clk_disable_unprepare(tegra->clk);  	clk_put(tegra->clk); diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c index 2ed112d3e15..256326322cf 100644 --- a/drivers/usb/host/isp1362-hcd.c +++ b/drivers/usb/host/isp1362-hcd.c @@ -543,12 +543,12 @@ static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)  			    usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,  			    short_ok ? "" : "not_",  			    PTD_GET_COUNT(ptd), ep->maxpacket, len); +			/* save the data underrun error code for later and +			 * proceed with the status stage +			 */ +			urb->actual_length += PTD_GET_COUNT(ptd);  			if (usb_pipecontrol(urb->pipe)) {  				ep->nextpid = USB_PID_ACK; -				/* save the data underrun error code for later and -				 * proceed with the status stage -				 */ -				urb->actual_length += PTD_GET_COUNT(ptd);  				BUG_ON(urb->actual_length > urb->transfer_buffer_length);  				if (urb->status == -EINPROGRESS) diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index df0828cb2aa..c5e9e4a76f1 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -800,6 +800,13 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)  }  EXPORT_SYMBOL_GPL(usb_enable_xhci_ports); +void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) +{ +	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0); +	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0); +} +EXPORT_SYMBOL_GPL(usb_disable_xhci_ports); +  /**   * PCI Quirks for xHCI.   * diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h index b1002a8ef96..ef004a5de20 100644 --- a/drivers/usb/host/pci-quirks.h +++ b/drivers/usb/host/pci-quirks.h @@ -10,6 +10,7 @@ void usb_amd_quirk_pll_disable(void);  void usb_amd_quirk_pll_enable(void);  bool usb_is_intel_switchable_xhci(struct pci_dev *pdev);  void usb_enable_xhci_ports(struct pci_dev *xhci_pdev); +void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);  #else  static inline void usb_amd_quirk_pll_disable(void) {}  static inline void usb_amd_quirk_pll_enable(void) {} diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 18b231b0c5d..9bfd4ca1153 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -94,11 +94,21 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)  		xhci->quirks |= XHCI_EP_LIMIT_QUIRK;  		xhci->limit_active_eps = 64;  		xhci->quirks |= XHCI_SW_BW_CHECKING; +		/* +		 * PPT desktop boards DH77EB and DH77DF will power back on after +		 * a few seconds of being shutdown.  The fix for this is to +		 * switch the ports from xHCI to EHCI on shutdown.  We can't use +		 * DMI information to find those particular boards (since each +		 * vendor will change the board name), so we have to key off all +		 * PPT chipsets. +		 */ +		xhci->quirks |= XHCI_SPURIOUS_REBOOT;  	}  	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&  			pdev->device == PCI_DEVICE_ID_ASROCK_P67) {  		xhci->quirks |= XHCI_RESET_ON_RESUME;  		xhci_dbg(xhci, "QUIRK: Resetting on resume\n"); +		xhci->quirks |= XHCI_TRUST_TX_LENGTH;  	}  	if (pdev->vendor == PCI_VENDOR_ID_VIA)  		xhci->quirks |= XHCI_RESET_ON_RESUME; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 8275645889d..643c2f3f3e7 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -145,29 +145,37 @@ static void next_trb(struct xhci_hcd *xhci,   */  static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)  { -	union xhci_trb *next;  	unsigned long long addr;  	ring->deq_updates++; -	/* If this is not event ring, there is one more usable TRB */ +	/* +	 * If this is not event ring, and the dequeue pointer +	 * is not on a link TRB, there is one more usable TRB +	 */  	if (ring->type != TYPE_EVENT &&  			!last_trb(xhci, ring, ring->deq_seg, ring->dequeue))  		ring->num_trbs_free++; -	next = ++(ring->dequeue); -	/* Update the dequeue pointer further if that was a link TRB or we're at -	 * the end of an event ring segment (which doesn't have link TRBS) -	 */ -	while (last_trb(xhci, ring, ring->deq_seg, next)) { -		if (ring->type == TYPE_EVENT &&	last_trb_on_last_seg(xhci, -				ring, ring->deq_seg, next)) { -			ring->cycle_state = (ring->cycle_state ? 0 : 1); +	do { +		/* +		 * Update the dequeue pointer further if that was a link TRB or +		 * we're at the end of an event ring segment (which doesn't have +		 * link TRBS) +		 */ +		if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) { +			if (ring->type == TYPE_EVENT && +					last_trb_on_last_seg(xhci, ring, +						ring->deq_seg, ring->dequeue)) { +				ring->cycle_state = (ring->cycle_state ? 0 : 1); +			} +			ring->deq_seg = ring->deq_seg->next; +			ring->dequeue = ring->deq_seg->trbs; +		} else { +			ring->dequeue++;  		} -		ring->deq_seg = ring->deq_seg->next; -		ring->dequeue = ring->deq_seg->trbs; -		next = ring->dequeue; -	} +	} while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)); +  	addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);  } @@ -2073,8 +2081,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,  		if (xhci->quirks & XHCI_TRUST_TX_LENGTH)  			trb_comp_code = COMP_SHORT_TX;  		else -			xhci_warn(xhci, "WARN Successful completion on short TX: " -					"needs XHCI_TRUST_TX_LENGTH quirk?\n"); +			xhci_warn_ratelimited(xhci, +					"WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");  	case COMP_SHORT_TX:  		break;  	case COMP_STOP: diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 7648b2d4b26..c59d5b5b6c7 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -166,7 +166,7 @@ int xhci_reset(struct xhci_hcd *xhci)  	xhci_writel(xhci, command, &xhci->op_regs->command);  	ret = handshake(xhci, &xhci->op_regs->command, -			CMD_RESET, 0, 250 * 1000); +			CMD_RESET, 0, 10 * 1000 * 1000);  	if (ret)  		return ret; @@ -175,7 +175,8 @@ int xhci_reset(struct xhci_hcd *xhci)  	 * xHCI cannot write to any doorbells or operational registers other  	 * than status until the "Controller Not Ready" flag is cleared.  	 */ -	ret = handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000); +	ret = handshake(xhci, &xhci->op_regs->status, +			STS_CNR, 0, 10 * 1000 * 1000);  	for (i = 0; i < 2; ++i) {  		xhci->bus_state[i].port_c_suspend = 0; @@ -658,6 +659,9 @@ void xhci_shutdown(struct usb_hcd *hcd)  {  	struct xhci_hcd *xhci = hcd_to_xhci(hcd); +	if (xhci->quirks && XHCI_SPURIOUS_REBOOT) +		usb_disable_xhci_ports(to_pci_dev(hcd->self.controller)); +  	spin_lock_irq(&xhci->lock);  	xhci_halt(xhci);  	spin_unlock_irq(&xhci->lock); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 55c0785810c..c713256297a 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1494,6 +1494,7 @@ struct xhci_hcd {  #define XHCI_TRUST_TX_LENGTH	(1 << 10)  #define XHCI_LPM_SUPPORT	(1 << 11)  #define XHCI_INTEL_HOST		(1 << 12) +#define XHCI_SPURIOUS_REBOOT	(1 << 13)  	unsigned int		num_active_eps;  	unsigned int		limit_active_eps;  	/* There are two roothubs to keep track of bus suspend info for */ @@ -1537,6 +1538,8 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)  	dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args)  #define xhci_warn(xhci, fmt, args...) \  	dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args) +#define xhci_warn_ratelimited(xhci, fmt, args...) \ +	dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args)  /* TODO: copied from ehci.h - can be refactored? */  /* xHCI spec says all registers are little endian */ diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c index ff08015b230..ae794b90766 100644 --- a/drivers/usb/misc/emi62.c +++ b/drivers/usb/misc/emi62.c @@ -232,7 +232,7 @@ wraperr:  	return err;  } -static const struct usb_device_id id_table[] __devinitconst = { +static const struct usb_device_id id_table[] = {  	{ USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) },  	{ }                                             /* Terminating entry */  }; diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index ef0c3f9f094..6259f0d9932 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig @@ -8,7 +8,7 @@ config USB_MUSB_HDRC  	tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'  	depends on USB && USB_GADGET  	select NOP_USB_XCEIV if (ARCH_DAVINCI || MACH_OMAP3EVM || BLACKFIN) -	select NOP_USB_XCEIV if (SOC_OMAPTI81XX || SOC_OMAPAM33XX) +	select NOP_USB_XCEIV if (SOC_TI81XX || SOC_AM33XX)  	select TWL4030_USB if MACH_OMAP_3430SDP  	select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA  	select USB_OTG_UTILS @@ -57,7 +57,7 @@ config USB_MUSB_AM35X  config USB_MUSB_DSPS  	tristate "TI DSPS platforms" -	depends on SOC_OMAPTI81XX || SOC_OMAPAM33XX +	depends on SOC_TI81XX || SOC_AM33XX  config USB_MUSB_BLACKFIN  	tristate "Blackfin" diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 217808d9fbe..494772fc9e2 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -479,9 +479,9 @@ static int __devinit dsps_create_musb_pdev(struct dsps_glue *glue, u8 id)  		ret = -ENODEV;  		goto err0;  	} -	strcpy((u8 *)res->name, "mc");  	res->parent = NULL;  	resources[1] = *res; +	resources[1].name = "mc";  	/* allocate the child platform device */  	musb = platform_device_alloc("musb-hdrc", -1); @@ -566,27 +566,28 @@ static int __devinit dsps_probe(struct platform_device *pdev)  	}  	platform_set_drvdata(pdev, glue); -	/* create the child platform device for first instances of musb */ -	ret = dsps_create_musb_pdev(glue, 0); -	if (ret != 0) { -		dev_err(&pdev->dev, "failed to create child pdev\n"); -		goto err2; -	} -  	/* enable the usbss clocks */  	pm_runtime_enable(&pdev->dev);  	ret = pm_runtime_get_sync(&pdev->dev);  	if (ret < 0) {  		dev_err(&pdev->dev, "pm_runtime_get_sync FAILED"); +		goto err2; +	} + +	/* create the child platform device for first instances of musb */ +	ret = dsps_create_musb_pdev(glue, 0); +	if (ret != 0) { +		dev_err(&pdev->dev, "failed to create child pdev\n");  		goto err3;  	}  	return 0;  err3: -	pm_runtime_disable(&pdev->dev); +	pm_runtime_put(&pdev->dev);  err2: +	pm_runtime_disable(&pdev->dev);  	kfree(glue->wrp);  err1:  	kfree(glue); diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index 8c9bb1ad306..681da06170c 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c @@ -603,12 +603,12 @@ static int usbhsc_resume(struct device *dev)  	struct usbhs_priv *priv = dev_get_drvdata(dev);  	struct platform_device *pdev = usbhs_priv_to_pdev(priv); -	usbhs_platform_call(priv, phy_reset, pdev); -  	if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL))  		usbhsc_power_ctrl(priv, 1); -	usbhsc_hotplug(priv); +	usbhs_platform_call(priv, phy_reset, pdev); + +	usbhsc_drvcllbck_notify_hotplug(pdev);  	return 0;  } diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c index 1834cf50888..9b69a132329 100644 --- a/drivers/usb/renesas_usbhs/mod_host.c +++ b/drivers/usb/renesas_usbhs/mod_host.c @@ -1266,6 +1266,12 @@ static int usbhsh_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,  	return ret;  } +static int usbhsh_bus_nop(struct usb_hcd *hcd) +{ +	/* nothing to do */ +	return 0; +} +  static struct hc_driver usbhsh_driver = {  	.description =		usbhsh_hcd_name,  	.hcd_priv_size =	sizeof(struct usbhsh_hpriv), @@ -1290,6 +1296,8 @@ static struct hc_driver usbhsh_driver = {  	 */  	.hub_status_data =	usbhsh_hub_status_data,  	.hub_control =		usbhsh_hub_control, +	.bus_suspend =		usbhsh_bus_nop, +	.bus_resume =		usbhsh_bus_nop,  };  /* diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c index f398d1e3447..c15f2e7cefc 100644 --- a/drivers/usb/serial/bus.c +++ b/drivers/usb/serial/bus.c @@ -61,18 +61,23 @@ static int usb_serial_device_probe(struct device *dev)  		goto exit;  	} +	/* make sure suspend/resume doesn't race against port_probe */ +	retval = usb_autopm_get_interface(port->serial->interface); +	if (retval) +		goto exit; +  	driver = port->serial->type;  	if (driver->port_probe) {  		retval = driver->port_probe(port);  		if (retval) -			goto exit; +			goto exit_with_autopm;  	}  	retval = device_create_file(dev, &dev_attr_port_number);  	if (retval) {  		if (driver->port_remove)  			retval = driver->port_remove(port); -		goto exit; +		goto exit_with_autopm;  	}  	minor = port->number; @@ -81,6 +86,8 @@ static int usb_serial_device_probe(struct device *dev)  		 "%s converter now attached to ttyUSB%d\n",  		 driver->description, minor); +exit_with_autopm: +	usb_autopm_put_interface(port->serial->interface);  exit:  	return retval;  } @@ -96,6 +103,9 @@ static int usb_serial_device_remove(struct device *dev)  	if (!port)  		return -ENODEV; +	/* make sure suspend/resume doesn't race against port_remove */ +	usb_autopm_get_interface(port->serial->interface); +  	device_remove_file(&port->dev, &dev_attr_port_number);  	driver = port->serial->type; @@ -107,6 +117,7 @@ static int usb_serial_device_remove(struct device *dev)  	dev_info(dev, "%s converter now disconnected from ttyUSB%d\n",  		 driver->description, minor); +	usb_autopm_put_interface(port->serial->interface);  	return retval;  } diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index bc912e5a3be..5620db6469e 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -811,6 +811,7 @@ static struct usb_device_id id_table_combined [] = {  	{ USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },  	{ USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) },  	{ USB_DEVICE(PI_VID, PI_E861_PID) }, +	{ USB_DEVICE(KONDO_VID, KONDO_USB_SERIAL_PID) },  	{ USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },  	{ USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),  		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 5661c7e2d41..5dd96ca6c38 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -795,6 +795,13 @@  #define PI_E861_PID         0x1008  /* E-861 piezo controller USB connection */  /* + * Kondo Kagaku Co.Ltd. + * http://www.kondo-robot.com/EN + */ +#define KONDO_VID 		0x165c +#define KONDO_USB_SERIAL_PID	0x0002 + +/*   * Bayer Ascensia Contour blood glucose meter USB-converter cable.   * http://winglucofacts.com/cables/   */ diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c index 5811d34b6c6..2cb30c53583 100644 --- a/drivers/usb/serial/ipw.c +++ b/drivers/usb/serial/ipw.c @@ -227,7 +227,6 @@ static void ipw_release(struct usb_serial *serial)  {  	struct usb_wwan_intf_private *data = usb_get_serial_data(serial); -	usb_wwan_release(serial);  	usb_set_serial_data(serial, NULL);  	kfree(data);  } @@ -309,12 +308,12 @@ static struct usb_serial_driver ipw_device = {  	.description =		"IPWireless converter",  	.id_table =		id_table,  	.num_ports =		1, -	.disconnect =		usb_wwan_disconnect,  	.open =			ipw_open,  	.close =		ipw_close,  	.probe =		ipw_probe,  	.attach =		usb_wwan_startup,  	.release =		ipw_release, +	.port_remove =		usb_wwan_port_remove,  	.dtr_rts =		ipw_dtr_rts,  	.write =		usb_wwan_write,  }; diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 57eca244842..2f6da1e89bf 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -82,8 +82,7 @@   * Defines used for sending commands to port   */ -#define WAIT_FOR_EVER   (HZ * 0)	/* timeout urb is wait for ever */ -#define MOS_WDR_TIMEOUT (HZ * 5)	/* default urb timeout */ +#define MOS_WDR_TIMEOUT		5000	/* default urb timeout */  #define MOS_PORT1       0x0200  #define MOS_PORT2       0x0300 @@ -1232,9 +1231,12 @@ static int mos7840_chars_in_buffer(struct tty_struct *tty)  		return 0;  	spin_lock_irqsave(&mos7840_port->pool_lock, flags); -	for (i = 0; i < NUM_URBS; ++i) -		if (mos7840_port->busy[i]) -			chars += URB_TRANSFER_BUFFER_SIZE; +	for (i = 0; i < NUM_URBS; ++i) { +		if (mos7840_port->busy[i]) { +			struct urb *urb = mos7840_port->write_urb_pool[i]; +			chars += urb->transfer_buffer_length; +		} +	}  	spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);  	dbg("%s - returns %d", __func__, chars);  	return chars; @@ -1344,7 +1346,7 @@ static void mos7840_close(struct usb_serial_port *port)  static void mos7840_block_until_chase_response(struct tty_struct *tty,  					struct moschip_port *mos7840_port)  { -	int timeout = 1 * HZ; +	int timeout = msecs_to_jiffies(1000);  	int wait = 10;  	int count; @@ -2672,7 +2674,7 @@ static int mos7840_startup(struct usb_serial *serial)  	/* setting configuration feature to one */  	usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), -			(__u8) 0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5 * HZ); +			(__u8) 0x03, 0x00, 0x01, 0x00, NULL, 0x00, MOS_WDR_TIMEOUT);  	return 0;  error:  	for (/* nothing */; i >= 0; i--) { diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 08ff9b86204..cc40f47ecea 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -80,85 +80,9 @@ static void option_instat_callback(struct urb *urb);  #define OPTION_PRODUCT_GTM380_MODEM		0x7201  #define HUAWEI_VENDOR_ID			0x12D1 -#define HUAWEI_PRODUCT_E600			0x1001 -#define HUAWEI_PRODUCT_E220			0x1003 -#define HUAWEI_PRODUCT_E220BIS			0x1004 -#define HUAWEI_PRODUCT_E1401			0x1401 -#define HUAWEI_PRODUCT_E1402			0x1402 -#define HUAWEI_PRODUCT_E1403			0x1403 -#define HUAWEI_PRODUCT_E1404			0x1404 -#define HUAWEI_PRODUCT_E1405			0x1405 -#define HUAWEI_PRODUCT_E1406			0x1406 -#define HUAWEI_PRODUCT_E1407			0x1407 -#define HUAWEI_PRODUCT_E1408			0x1408 -#define HUAWEI_PRODUCT_E1409			0x1409 -#define HUAWEI_PRODUCT_E140A			0x140A -#define HUAWEI_PRODUCT_E140B			0x140B -#define HUAWEI_PRODUCT_E140C			0x140C -#define HUAWEI_PRODUCT_E140D			0x140D -#define HUAWEI_PRODUCT_E140E			0x140E -#define HUAWEI_PRODUCT_E140F			0x140F -#define HUAWEI_PRODUCT_E1410			0x1410 -#define HUAWEI_PRODUCT_E1411			0x1411 -#define HUAWEI_PRODUCT_E1412			0x1412 -#define HUAWEI_PRODUCT_E1413			0x1413 -#define HUAWEI_PRODUCT_E1414			0x1414 -#define HUAWEI_PRODUCT_E1415			0x1415 -#define HUAWEI_PRODUCT_E1416			0x1416 -#define HUAWEI_PRODUCT_E1417			0x1417 -#define HUAWEI_PRODUCT_E1418			0x1418 -#define HUAWEI_PRODUCT_E1419			0x1419 -#define HUAWEI_PRODUCT_E141A			0x141A -#define HUAWEI_PRODUCT_E141B			0x141B -#define HUAWEI_PRODUCT_E141C			0x141C -#define HUAWEI_PRODUCT_E141D			0x141D -#define HUAWEI_PRODUCT_E141E			0x141E -#define HUAWEI_PRODUCT_E141F			0x141F -#define HUAWEI_PRODUCT_E1420			0x1420 -#define HUAWEI_PRODUCT_E1421			0x1421 -#define HUAWEI_PRODUCT_E1422			0x1422 -#define HUAWEI_PRODUCT_E1423			0x1423 -#define HUAWEI_PRODUCT_E1424			0x1424 -#define HUAWEI_PRODUCT_E1425			0x1425 -#define HUAWEI_PRODUCT_E1426			0x1426 -#define HUAWEI_PRODUCT_E1427			0x1427 -#define HUAWEI_PRODUCT_E1428			0x1428 -#define HUAWEI_PRODUCT_E1429			0x1429 -#define HUAWEI_PRODUCT_E142A			0x142A -#define HUAWEI_PRODUCT_E142B			0x142B -#define HUAWEI_PRODUCT_E142C			0x142C -#define HUAWEI_PRODUCT_E142D			0x142D -#define HUAWEI_PRODUCT_E142E			0x142E -#define HUAWEI_PRODUCT_E142F			0x142F -#define HUAWEI_PRODUCT_E1430			0x1430 -#define HUAWEI_PRODUCT_E1431			0x1431 -#define HUAWEI_PRODUCT_E1432			0x1432 -#define HUAWEI_PRODUCT_E1433			0x1433 -#define HUAWEI_PRODUCT_E1434			0x1434 -#define HUAWEI_PRODUCT_E1435			0x1435 -#define HUAWEI_PRODUCT_E1436			0x1436 -#define HUAWEI_PRODUCT_E1437			0x1437 -#define HUAWEI_PRODUCT_E1438			0x1438 -#define HUAWEI_PRODUCT_E1439			0x1439 -#define HUAWEI_PRODUCT_E143A			0x143A -#define HUAWEI_PRODUCT_E143B			0x143B -#define HUAWEI_PRODUCT_E143C			0x143C -#define HUAWEI_PRODUCT_E143D			0x143D -#define HUAWEI_PRODUCT_E143E			0x143E -#define HUAWEI_PRODUCT_E143F			0x143F  #define HUAWEI_PRODUCT_K4505			0x1464  #define HUAWEI_PRODUCT_K3765			0x1465 -#define HUAWEI_PRODUCT_E14AC			0x14AC -#define HUAWEI_PRODUCT_K3806			0x14AE  #define HUAWEI_PRODUCT_K4605			0x14C6 -#define HUAWEI_PRODUCT_K5005			0x14C8 -#define HUAWEI_PRODUCT_K3770			0x14C9 -#define HUAWEI_PRODUCT_K3771			0x14CA -#define HUAWEI_PRODUCT_K4510			0x14CB -#define HUAWEI_PRODUCT_K4511			0x14CC -#define HUAWEI_PRODUCT_ETS1220			0x1803 -#define HUAWEI_PRODUCT_E353			0x1506 -#define HUAWEI_PRODUCT_E173S			0x1C05  #define QUANTA_VENDOR_ID			0x0408  #define QUANTA_PRODUCT_Q101			0xEA02 @@ -615,104 +539,123 @@ static const struct usb_device_id option_ids[] = {  	{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) },  	{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },  	{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1402, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1404, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1407, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140A, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140B, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140C, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140D, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140E, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140F, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1413, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1414, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1415, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1416, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141A, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141B, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141C, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141D, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141E, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141F, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1420, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1421, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1422, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1423, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1424, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1425, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1426, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1427, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1428, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1429, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142A, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142B, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142C, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142D, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142E, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142F, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1430, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1431, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1432, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1433, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1434, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1435, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1436, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1437, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1438, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1439, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143A, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143B, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143C, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S, 0xff, 0xff, 0xff) },  	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),  		.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },  	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),  		.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },  	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),  		.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x31) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x32) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x33) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x32) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x31) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x32) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) }, -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) },  /* E398 3G Modem */ -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) },  /* E398 3G PC UI Interface */ -	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) },  /* E398 3G Application Interface */ +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x03) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x04) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x05) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x06) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0A) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0B) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0D) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0E) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0F) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x10) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x12) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x13) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x14) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x15) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x17) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x18) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x19) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1A) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1B) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1C) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x31) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x32) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x33) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x34) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x35) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x36) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3A) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3B) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3D) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3E) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3F) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x48) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x49) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4A) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4B) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4C) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x61) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x62) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x63) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x64) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x65) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x66) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6A) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6B) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7B) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7C) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x01) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x02) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x03) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x04) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x05) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x06) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0A) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0B) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0D) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0E) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0F) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x10) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x12) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x13) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x14) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x15) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x17) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x18) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x19) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1A) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1B) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1C) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x31) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x32) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x33) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x34) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x35) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x36) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3A) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3B) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3D) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3E) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3F) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x48) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x49) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4A) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4B) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4C) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x61) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x62) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x63) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x64) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x65) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x66) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6A) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6B) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) }, +	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) }, + +  	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },  	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },  	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, @@ -943,6 +886,8 @@ static const struct usb_device_id option_ids[] = {  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),  	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) }, +	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff), +	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) }, @@ -1297,8 +1242,8 @@ static struct usb_serial_driver option_1port_device = {  	.tiocmset          = usb_wwan_tiocmset,  	.ioctl             = usb_wwan_ioctl,  	.attach            = usb_wwan_startup, -	.disconnect        = usb_wwan_disconnect,  	.release           = option_release, +	.port_remove	   = usb_wwan_port_remove,  	.read_int_callback = option_instat_callback,  #ifdef CONFIG_PM  	.suspend           = usb_wwan_suspend, @@ -1414,8 +1359,6 @@ static void option_release(struct usb_serial *serial)  	struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial);  	struct option_private *priv = intfdata->private; -	usb_wwan_release(serial); -  	kfree(priv);  	kfree(intfdata);  } diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 8d103019d6a..bfd50779f0c 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -199,43 +199,49 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)  	/* default to enabling interface */  	altsetting = 0; -	switch (ifnum) { -		/* Composite mode; don't bind to the QMI/net interface as that -		 * gets handled by other drivers. -		 */ +	/* Composite mode; don't bind to the QMI/net interface as that +	 * gets handled by other drivers. +	 */ + +	if (is_gobi1k) {  		/* Gobi 1K USB layout:  		 * 0: serial port (doesn't respond)  		 * 1: serial port (doesn't respond)  		 * 2: AT-capable modem port  		 * 3: QMI/net -		 * -		 * Gobi 2K+ USB layout: +		 */ +		if (ifnum == 2) +			dev_dbg(dev, "Modem port found\n"); +		else +			altsetting = -1; +	} else { +		/* Gobi 2K+ USB layout:  		 * 0: QMI/net  		 * 1: DM/DIAG (use libqcdm from ModemManager for communication)  		 * 2: AT-capable modem port  		 * 3: NMEA  		 */ - -	case 1: -		if (is_gobi1k) +		switch (ifnum) { +		case 0: +			/* Don't claim the QMI/net interface */  			altsetting = -1; -		else +			break; +		case 1:  			dev_dbg(dev, "Gobi 2K+ DM/DIAG interface found\n"); -		break; -	case 2: -		dev_dbg(dev, "Modem port found\n"); -		break; -	case 3: -		if (is_gobi1k) -			altsetting = -1; -		else +			break; +		case 2: +			dev_dbg(dev, "Modem port found\n"); +			break; +		case 3:  			/*  			 * NMEA (serial line 9600 8N1)  			 * # echo "\$GPS_START" > /dev/ttyUSBx  			 * # echo "\$GPS_STOP"  > /dev/ttyUSBx  			 */  			dev_dbg(dev, "Gobi 2K+ NMEA GPS interface found\n"); +			break; +		}  	}  done: @@ -262,8 +268,7 @@ static void qc_release(struct usb_serial *serial)  {  	struct usb_wwan_intf_private *priv = usb_get_serial_data(serial); -	/* Call usb_wwan release & free the private data allocated in qcprobe */ -	usb_wwan_release(serial); +	/* Free the private data allocated in qcprobe */  	usb_set_serial_data(serial, NULL);  	kfree(priv);  } @@ -283,8 +288,8 @@ static struct usb_serial_driver qcdevice = {  	.write_room	     = usb_wwan_write_room,  	.chars_in_buffer     = usb_wwan_chars_in_buffer,  	.attach		     = usb_wwan_startup, -	.disconnect	     = usb_wwan_disconnect,  	.release	     = qc_release, +	.port_remove	     = usb_wwan_port_remove,  #ifdef CONFIG_PM  	.suspend	     = usb_wwan_suspend,  	.resume		     = usb_wwan_resume, diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h index c47b6ec0306..1f034d2397c 100644 --- a/drivers/usb/serial/usb-wwan.h +++ b/drivers/usb/serial/usb-wwan.h @@ -9,8 +9,7 @@ extern void usb_wwan_dtr_rts(struct usb_serial_port *port, int on);  extern int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port);  extern void usb_wwan_close(struct usb_serial_port *port);  extern int usb_wwan_startup(struct usb_serial *serial); -extern void usb_wwan_disconnect(struct usb_serial *serial); -extern void usb_wwan_release(struct usb_serial *serial); +extern int usb_wwan_port_remove(struct usb_serial_port *port);  extern int usb_wwan_write_room(struct tty_struct *tty);  extern void usb_wwan_set_termios(struct tty_struct *tty,  				 struct usb_serial_port *port, diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index f35971dff4a..6855d5ed033 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c @@ -565,62 +565,52 @@ bail_out_error:  }  EXPORT_SYMBOL(usb_wwan_startup); -static void stop_read_write_urbs(struct usb_serial *serial) +int usb_wwan_port_remove(struct usb_serial_port *port)  { -	int i, j; -	struct usb_serial_port *port; +	int i;  	struct usb_wwan_port_private *portdata; -	/* Stop reading/writing urbs */ -	for (i = 0; i < serial->num_ports; ++i) { -		port = serial->port[i]; -		portdata = usb_get_serial_port_data(port); -		for (j = 0; j < N_IN_URB; j++) -			usb_kill_urb(portdata->in_urbs[j]); -		for (j = 0; j < N_OUT_URB; j++) -			usb_kill_urb(portdata->out_urbs[j]); +	portdata = usb_get_serial_port_data(port); +	usb_set_serial_port_data(port, NULL); + +	/* Stop reading/writing urbs and free them */ +	for (i = 0; i < N_IN_URB; i++) { +		usb_kill_urb(portdata->in_urbs[i]); +		usb_free_urb(portdata->in_urbs[i]); +		free_page((unsigned long)portdata->in_buffer[i]); +	} +	for (i = 0; i < N_OUT_URB; i++) { +		usb_kill_urb(portdata->out_urbs[i]); +		usb_free_urb(portdata->out_urbs[i]); +		kfree(portdata->out_buffer[i]);  	} -} -void usb_wwan_disconnect(struct usb_serial *serial) -{ -	stop_read_write_urbs(serial); +	/* Now free port private data */ +	kfree(portdata); +	return 0;  } -EXPORT_SYMBOL(usb_wwan_disconnect); +EXPORT_SYMBOL(usb_wwan_port_remove); -void usb_wwan_release(struct usb_serial *serial) +#ifdef CONFIG_PM +static void stop_read_write_urbs(struct usb_serial *serial)  {  	int i, j;  	struct usb_serial_port *port;  	struct usb_wwan_port_private *portdata; -	/* Now free them */ +	/* Stop reading/writing urbs */  	for (i = 0; i < serial->num_ports; ++i) {  		port = serial->port[i];  		portdata = usb_get_serial_port_data(port); - -		for (j = 0; j < N_IN_URB; j++) { -			usb_free_urb(portdata->in_urbs[j]); -			free_page((unsigned long) -				  portdata->in_buffer[j]); -			portdata->in_urbs[j] = NULL; -		} -		for (j = 0; j < N_OUT_URB; j++) { -			usb_free_urb(portdata->out_urbs[j]); -			kfree(portdata->out_buffer[j]); -			portdata->out_urbs[j] = NULL; -		} -	} - -	/* Now free per port private data */ -	for (i = 0; i < serial->num_ports; i++) { -		port = serial->port[i]; -		kfree(usb_get_serial_port_data(port)); +		if (!portdata) +			continue; +		for (j = 0; j < N_IN_URB; j++) +			usb_kill_urb(portdata->in_urbs[j]); +		for (j = 0; j < N_OUT_URB; j++) +			usb_kill_urb(portdata->out_urbs[j]);  	}  } -EXPORT_SYMBOL(usb_wwan_release); -#ifdef CONFIG_PM  int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message)  {  	struct usb_wwan_intf_private *intfdata = serial->private; @@ -712,7 +702,7 @@ int usb_wwan_resume(struct usb_serial *serial)  		/* skip closed ports */  		spin_lock_irq(&intfdata->susp_lock); -		if (!portdata->opened) { +		if (!portdata || !portdata->opened) {  			spin_unlock_irq(&intfdata->susp_lock);  			continue;  		} diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 9591e2b509d..17830c9c7cc 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -264,6 +264,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)  	return group;  } +/* called with vfio.group_lock held */  static void vfio_group_release(struct kref *kref)  {  	struct vfio_group *group = container_of(kref, struct vfio_group, kref); @@ -287,13 +288,7 @@ static void vfio_group_release(struct kref *kref)  static void vfio_group_put(struct vfio_group *group)  { -	mutex_lock(&vfio.group_lock); -	/* -	 * Release needs to unlock to unregister the notifier, so only -	 * unlock if not released. -	 */ -	if (!kref_put(&group->kref, vfio_group_release)) -		mutex_unlock(&vfio.group_lock); +	kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);  }  /* Assume group_lock or group reference is held */ @@ -401,7 +396,6 @@ static void vfio_device_release(struct kref *kref)  						  struct vfio_device, kref);  	struct vfio_group *group = device->group; -	mutex_lock(&group->device_lock);  	list_del(&device->group_next);  	mutex_unlock(&group->device_lock); @@ -416,8 +410,9 @@ static void vfio_device_release(struct kref *kref)  /* Device reference always implies a group reference */  static void vfio_device_put(struct vfio_device *device)  { -	kref_put(&device->kref, vfio_device_release); -	vfio_group_put(device->group); +	struct vfio_group *group = device->group; +	kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock); +	vfio_group_put(group);  }  static void vfio_device_get(struct vfio_device *device) @@ -1116,10 +1111,10 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)  		 */  		filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); -		fd_install(ret, filep); -  		vfio_device_get(device);  		atomic_inc(&group->container_users); + +		fd_install(ret, filep);  		break;  	}  	mutex_unlock(&group->device_lock); diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c index fb366540ed5..ed8e2e6c8df 100644 --- a/drivers/vhost/tcm_vhost.c +++ b/drivers/vhost/tcm_vhost.c @@ -53,9 +53,14 @@  #include "vhost.h"  #include "tcm_vhost.h" +enum { +	VHOST_SCSI_VQ_CTL = 0, +	VHOST_SCSI_VQ_EVT = 1, +	VHOST_SCSI_VQ_IO = 2, +}; +  struct vhost_scsi { -	atomic_t vhost_ref_cnt; -	struct tcm_vhost_tpg *vs_tpg; +	struct tcm_vhost_tpg *vs_tpg;	/* Protected by vhost_scsi->dev.mutex */  	struct vhost_dev dev;  	struct vhost_virtqueue vqs[3]; @@ -131,8 +136,7 @@ static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)  	return 1;  } -static u32 tcm_vhost_get_pr_transport_id( -	struct se_portal_group *se_tpg, +static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,  	struct se_node_acl *se_nacl,  	struct t10_pr_registration *pr_reg,  	int *format_code, @@ -162,8 +166,7 @@ static u32 tcm_vhost_get_pr_transport_id(  			format_code, buf);  } -static u32 tcm_vhost_get_pr_transport_id_len( -	struct se_portal_group *se_tpg, +static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,  	struct se_node_acl *se_nacl,  	struct t10_pr_registration *pr_reg,  	int *format_code) @@ -192,8 +195,7 @@ static u32 tcm_vhost_get_pr_transport_id_len(  			format_code);  } -static char *tcm_vhost_parse_pr_out_transport_id( -	struct se_portal_group *se_tpg, +static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,  	const char *buf,  	u32 *out_tid_len,  	char **port_nexus_ptr) @@ -236,8 +238,7 @@ static struct se_node_acl *tcm_vhost_alloc_fabric_acl(  	return &nacl->se_node_acl;  } -static void tcm_vhost_release_fabric_acl( -	struct se_portal_group *se_tpg, +static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,  	struct se_node_acl *se_nacl)  {  	struct tcm_vhost_nacl *nacl = container_of(se_nacl, @@ -297,7 +298,16 @@ static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)  	return 0;  } -static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *); +static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd) +{ +	struct vhost_scsi *vs = tv_cmd->tvc_vhost; + +	spin_lock_bh(&vs->vs_completion_lock); +	list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list); +	spin_unlock_bh(&vs->vs_completion_lock); + +	vhost_work_queue(&vs->dev, &vs->vs_completion_work); +}  static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)  { @@ -381,7 +391,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)  					vs_completion_work);  	struct tcm_vhost_cmd *tv_cmd; -	while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs)) != NULL) { +	while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs))) {  		struct virtio_scsi_cmd_resp v_rsp;  		struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;  		int ret; @@ -408,19 +418,6 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)  	vhost_signal(&vs->dev, &vs->vqs[2]);  } -static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd) -{ -	struct vhost_scsi *vs = tv_cmd->tvc_vhost; - -	pr_debug("%s tv_cmd %p\n", __func__, tv_cmd); - -	spin_lock_bh(&vs->vs_completion_lock); -	list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list); -	spin_unlock_bh(&vs->vs_completion_lock); - -	vhost_work_queue(&vs->dev, &vs->vs_completion_work); -} -  static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(  	struct tcm_vhost_tpg *tv_tpg,  	struct virtio_scsi_cmd_req *v_req, @@ -533,8 +530,8 @@ static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,  	sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);  	if (!sg)  		return -ENOMEM; -	pr_debug("%s sg %p sgl_count %u is_err %ld\n", __func__, -	       sg, sgl_count, IS_ERR(sg)); +	pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__, +	       sg, sgl_count, !sg);  	sg_init_table(sg, sgl_count);  	tv_cmd->tvc_sgl = sg; @@ -787,12 +784,12 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs)  static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)  { -	pr_err("%s: The handling func for control queue.\n", __func__); +	pr_debug("%s: The handling func for control queue.\n", __func__);  }  static void vhost_scsi_evt_handle_kick(struct vhost_work *work)  { -	pr_err("%s: The handling func for event queue.\n", __func__); +	pr_debug("%s: The handling func for event queue.\n", __func__);  }  static void vhost_scsi_handle_kick(struct vhost_work *work) @@ -825,11 +822,6 @@ static int vhost_scsi_set_endpoint(  			return -EFAULT;  		}  	} - -	if (vs->vs_tpg) { -		mutex_unlock(&vs->dev.mutex); -		return -EEXIST; -	}  	mutex_unlock(&vs->dev.mutex);  	mutex_lock(&tcm_vhost_mutex); @@ -839,7 +831,7 @@ static int vhost_scsi_set_endpoint(  			mutex_unlock(&tv_tpg->tv_tpg_mutex);  			continue;  		} -		if (atomic_read(&tv_tpg->tv_tpg_vhost_count)) { +		if (tv_tpg->tv_tpg_vhost_count != 0) {  			mutex_unlock(&tv_tpg->tv_tpg_mutex);  			continue;  		} @@ -847,14 +839,20 @@ static int vhost_scsi_set_endpoint(  		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) &&  		    (tv_tpg->tport_tpgt == t->vhost_tpgt)) { -			atomic_inc(&tv_tpg->tv_tpg_vhost_count); -			smp_mb__after_atomic_inc(); +			tv_tpg->tv_tpg_vhost_count++;  			mutex_unlock(&tv_tpg->tv_tpg_mutex);  			mutex_unlock(&tcm_vhost_mutex);  			mutex_lock(&vs->dev.mutex); +			if (vs->vs_tpg) { +				mutex_unlock(&vs->dev.mutex); +				mutex_lock(&tv_tpg->tv_tpg_mutex); +				tv_tpg->tv_tpg_vhost_count--; +				mutex_unlock(&tv_tpg->tv_tpg_mutex); +				return -EEXIST; +			} +  			vs->vs_tpg = tv_tpg; -			atomic_inc(&vs->vhost_ref_cnt);  			smp_mb__after_atomic_inc();  			mutex_unlock(&vs->dev.mutex);  			return 0; @@ -871,38 +869,42 @@ static int vhost_scsi_clear_endpoint(  {  	struct tcm_vhost_tport *tv_tport;  	struct tcm_vhost_tpg *tv_tpg; -	int index; +	int index, ret;  	mutex_lock(&vs->dev.mutex);  	/* Verify that ring has been setup correctly. */  	for (index = 0; index < vs->dev.nvqs; ++index) {  		if (!vhost_vq_access_ok(&vs->vqs[index])) { -			mutex_unlock(&vs->dev.mutex); -			return -EFAULT; +			ret = -EFAULT; +			goto err;  		}  	}  	if (!vs->vs_tpg) { -		mutex_unlock(&vs->dev.mutex); -		return -ENODEV; +		ret = -ENODEV; +		goto err;  	}  	tv_tpg = vs->vs_tpg;  	tv_tport = tv_tpg->tport;  	if (strcmp(tv_tport->tport_name, t->vhost_wwpn) ||  	    (tv_tpg->tport_tpgt != t->vhost_tpgt)) { -		mutex_unlock(&vs->dev.mutex);  		pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"  			" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",  			tv_tport->tport_name, tv_tpg->tport_tpgt,  			t->vhost_wwpn, t->vhost_tpgt); -		return -EINVAL; +		ret = -EINVAL; +		goto err;  	} -	atomic_dec(&tv_tpg->tv_tpg_vhost_count); +	tv_tpg->tv_tpg_vhost_count--;  	vs->vs_tpg = NULL;  	mutex_unlock(&vs->dev.mutex);  	return 0; + +err: +	mutex_unlock(&vs->dev.mutex); +	return ret;  }  static int vhost_scsi_open(struct inode *inode, struct file *f) @@ -918,9 +920,9 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)  	INIT_LIST_HEAD(&s->vs_completion_list);  	spin_lock_init(&s->vs_completion_lock); -	s->vqs[0].handle_kick = vhost_scsi_ctl_handle_kick; -	s->vqs[1].handle_kick = vhost_scsi_evt_handle_kick; -	s->vqs[2].handle_kick = vhost_scsi_handle_kick; +	s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick; +	s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick; +	s->vqs[VHOST_SCSI_VQ_IO].handle_kick = vhost_scsi_handle_kick;  	r = vhost_dev_init(&s->dev, s->vqs, 3);  	if (r < 0) {  		kfree(s); @@ -949,6 +951,18 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)  	return 0;  } +static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) +{ +	vhost_poll_flush(&vs->dev.vqs[index].poll); +} + +static void vhost_scsi_flush(struct vhost_scsi *vs) +{ +	vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_CTL); +	vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_EVT); +	vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_IO); +} +  static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)  {  	if (features & ~VHOST_FEATURES) @@ -961,7 +975,8 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)  		return -EFAULT;  	}  	vs->dev.acked_features = features; -	/* TODO possibly smp_wmb() and flush vqs */ +	smp_wmb(); +	vhost_scsi_flush(vs);  	mutex_unlock(&vs->dev.mutex);  	return 0;  } @@ -974,26 +989,25 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,  	void __user *argp = (void __user *)arg;  	u64 __user *featurep = argp;  	u64 features; -	int r; +	int r, abi_version = VHOST_SCSI_ABI_VERSION;  	switch (ioctl) {  	case VHOST_SCSI_SET_ENDPOINT:  		if (copy_from_user(&backend, argp, sizeof backend))  			return -EFAULT; +		if (backend.reserved != 0) +			return -EOPNOTSUPP;  		return vhost_scsi_set_endpoint(vs, &backend);  	case VHOST_SCSI_CLEAR_ENDPOINT:  		if (copy_from_user(&backend, argp, sizeof backend))  			return -EFAULT; +		if (backend.reserved != 0) +			return -EOPNOTSUPP;  		return vhost_scsi_clear_endpoint(vs, &backend);  	case VHOST_SCSI_GET_ABI_VERSION: -		if (copy_from_user(&backend, argp, sizeof backend)) -			return -EFAULT; - -		backend.abi_version = VHOST_SCSI_ABI_VERSION; - -		if (copy_to_user(argp, &backend, sizeof backend)) +		if (copy_to_user(argp, &abi_version, sizeof abi_version))  			return -EFAULT;  		return 0;  	case VHOST_GET_FEATURES: @@ -1013,11 +1027,21 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,  	}  } +#ifdef CONFIG_COMPAT +static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl, +				unsigned long arg) +{ +	return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg)); +} +#endif +  static const struct file_operations vhost_scsi_fops = {  	.owner          = THIS_MODULE,  	.release        = vhost_scsi_release,  	.unlocked_ioctl = vhost_scsi_ioctl, -	/* TODO compat ioctl? */ +#ifdef CONFIG_COMPAT +	.compat_ioctl	= vhost_scsi_compat_ioctl, +#endif  	.open           = vhost_scsi_open,  	.llseek		= noop_llseek,  }; @@ -1054,28 +1078,28 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)  	return "Unknown";  } -static int tcm_vhost_port_link( -	struct se_portal_group *se_tpg, +static int tcm_vhost_port_link(struct se_portal_group *se_tpg,  	struct se_lun *lun)  {  	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,  				struct tcm_vhost_tpg, se_tpg); -	atomic_inc(&tv_tpg->tv_tpg_port_count); -	smp_mb__after_atomic_inc(); +	mutex_lock(&tv_tpg->tv_tpg_mutex); +	tv_tpg->tv_tpg_port_count++; +	mutex_unlock(&tv_tpg->tv_tpg_mutex);  	return 0;  } -static void tcm_vhost_port_unlink( -	struct se_portal_group *se_tpg, +static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,  	struct se_lun *se_lun)  {  	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,  				struct tcm_vhost_tpg, se_tpg); -	atomic_dec(&tv_tpg->tv_tpg_port_count); -	smp_mb__after_atomic_dec(); +	mutex_lock(&tv_tpg->tv_tpg_mutex); +	tv_tpg->tv_tpg_port_count--; +	mutex_unlock(&tv_tpg->tv_tpg_mutex);  }  static struct se_node_acl *tcm_vhost_make_nodeacl( @@ -1122,8 +1146,7 @@ static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)  	kfree(nacl);  } -static int tcm_vhost_make_nexus( -	struct tcm_vhost_tpg *tv_tpg, +static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg,  	const char *name)  {  	struct se_portal_group *se_tpg; @@ -1168,7 +1191,7 @@ static int tcm_vhost_make_nexus(  		return -ENOMEM;  	}  	/* -	 * Now register the TCM vHost virtual I_T Nexus as active with the +	 * Now register the TCM vhost virtual I_T Nexus as active with the  	 * call to __transport_register_session()  	 */  	__transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, @@ -1179,8 +1202,7 @@ static int tcm_vhost_make_nexus(  	return 0;  } -static int tcm_vhost_drop_nexus( -	struct tcm_vhost_tpg *tpg) +static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)  {  	struct se_session *se_sess;  	struct tcm_vhost_nexus *tv_nexus; @@ -1198,27 +1220,27 @@ static int tcm_vhost_drop_nexus(  		return -ENODEV;  	} -	if (atomic_read(&tpg->tv_tpg_port_count)) { +	if (tpg->tv_tpg_port_count != 0) {  		mutex_unlock(&tpg->tv_tpg_mutex); -		pr_err("Unable to remove TCM_vHost I_T Nexus with" +		pr_err("Unable to remove TCM_vhost I_T Nexus with"  			" active TPG port count: %d\n", -			atomic_read(&tpg->tv_tpg_port_count)); -		return -EPERM; +			tpg->tv_tpg_port_count); +		return -EBUSY;  	} -	if (atomic_read(&tpg->tv_tpg_vhost_count)) { +	if (tpg->tv_tpg_vhost_count != 0) {  		mutex_unlock(&tpg->tv_tpg_mutex); -		pr_err("Unable to remove TCM_vHost I_T Nexus with" +		pr_err("Unable to remove TCM_vhost I_T Nexus with"  			" active TPG vhost count: %d\n", -			atomic_read(&tpg->tv_tpg_vhost_count)); -		return -EPERM; +			tpg->tv_tpg_vhost_count); +		return -EBUSY;  	} -	pr_debug("TCM_vHost_ConfigFS: Removing I_T Nexus to emulated" +	pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"  		" %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),  		tv_nexus->tvn_se_sess->se_node_acl->initiatorname);  	/* -	 * Release the SCSI I_T Nexus to the emulated vHost Target Port +	 * Release the SCSI I_T Nexus to the emulated vhost Target Port  	 */  	transport_deregister_session(tv_nexus->tvn_se_sess);  	tpg->tpg_nexus = NULL; @@ -1228,8 +1250,7 @@ static int tcm_vhost_drop_nexus(  	return 0;  } -static ssize_t tcm_vhost_tpg_show_nexus( -	struct se_portal_group *se_tpg, +static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,  	char *page)  {  	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, @@ -1250,8 +1271,7 @@ static ssize_t tcm_vhost_tpg_show_nexus(  	return ret;  } -static ssize_t tcm_vhost_tpg_store_nexus( -	struct se_portal_group *se_tpg, +static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,  	const char *page,  	size_t count)  { @@ -1336,8 +1356,7 @@ static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {  	NULL,  }; -static struct se_portal_group *tcm_vhost_make_tpg( -	struct se_wwn *wwn, +static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn,  	struct config_group *group,  	const char *name)  { @@ -1385,7 +1404,7 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)  	list_del(&tpg->tv_tpg_list);  	mutex_unlock(&tcm_vhost_mutex);  	/* -	 * Release the virtual I_T Nexus for this vHost TPG +	 * Release the virtual I_T Nexus for this vhost TPG  	 */  	tcm_vhost_drop_nexus(tpg);  	/* @@ -1395,8 +1414,7 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)  	kfree(tpg);  } -static struct se_wwn *tcm_vhost_make_tport( -	struct target_fabric_configfs *tf, +static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf,  	struct config_group *group,  	const char *name)  { @@ -1592,7 +1610,10 @@ static void tcm_vhost_deregister_configfs(void)  static int __init tcm_vhost_init(void)  {  	int ret = -ENOMEM; - +	/* +	 * Use our own dedicated workqueue for submitting I/O into +	 * target core to avoid contention within system_wq. +	 */  	tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);  	if (!tcm_vhost_workqueue)  		goto out; diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h index c983ed21e41..d9e93557d66 100644 --- a/drivers/vhost/tcm_vhost.h +++ b/drivers/vhost/tcm_vhost.h @@ -47,9 +47,9 @@ struct tcm_vhost_tpg {  	/* Vhost port target portal group tag for TCM */  	u16 tport_tpgt;  	/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */ -	atomic_t tv_tpg_port_count; -	/* Used for vhost_scsi device reference to tpg_nexus */ -	atomic_t tv_tpg_vhost_count; +	int tv_tpg_port_count; +	/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */ +	int tv_tpg_vhost_count;  	/* list for tcm_vhost_list */  	struct list_head tv_tpg_list;  	/* Used to protect access for tpg_nexus */ @@ -91,11 +91,13 @@ struct tcm_vhost_tport {  struct vhost_scsi_target {  	int abi_version; -	unsigned char vhost_wwpn[TRANSPORT_IQN_LEN]; +	char vhost_wwpn[TRANSPORT_IQN_LEN];  	unsigned short vhost_tpgt; +	unsigned short reserved;  };  /* VHOST_SCSI specific defines */  #define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target)  #define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target) -#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, struct vhost_scsi_target) +/* Changing this breaks userspace. */ +#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, int) diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 2e471c22abf..88e92041d8f 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -372,8 +372,15 @@ static void fb_flashcursor(struct work_struct *work)  	struct vc_data *vc = NULL;  	int c;  	int mode; +	int ret; + +	/* FIXME: we should sort out the unbind locking instead */ +	/* instead we just fail to flash the cursor if we can't get +	 * the lock instead of blocking fbcon deinit */ +	ret = console_trylock(); +	if (ret == 0) +		return; -	console_lock();  	if (ops && ops->currcon != -1)  		vc = vc_cons[ops->currcon].d; diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c index d90062b211f..92d08e7fcba 100644 --- a/drivers/w1/slaves/w1_therm.c +++ b/drivers/w1/slaves/w1_therm.c @@ -91,6 +91,11 @@ static struct w1_family w1_therm_family_DS28EA00 = {  	.fops = &w1_therm_fops,  }; +static struct w1_family w1_therm_family_DS1825 = { +	.fid = W1_THERM_DS1825, +	.fops = &w1_therm_fops, +}; +  struct w1_therm_family_converter  {  	u8			broken; @@ -120,6 +125,10 @@ static struct w1_therm_family_converter w1_therm_families[] = {  		.f		= &w1_therm_family_DS28EA00,  		.convert	= w1_DS18B20_convert_temp  	}, +	{ +		.f		= &w1_therm_family_DS1825, +		.convert	= w1_DS18B20_convert_temp +	}  };  static inline int w1_DS18B20_convert_temp(u8 rom[9]) diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h index b00ada44a89..a1f0ce151d5 100644 --- a/drivers/w1/w1_family.h +++ b/drivers/w1/w1_family.h @@ -39,6 +39,7 @@  #define W1_EEPROM_DS2431	0x2D  #define W1_FAMILY_DS2760	0x30  #define W1_FAMILY_DS2780	0x32 +#define W1_THERM_DS1825		0x3B  #define W1_FAMILY_DS2781	0x3D  #define W1_THERM_DS28EA00	0x42 diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c index 3fe82d0e8ca..5b06d31ab6a 100644 --- a/drivers/watchdog/booke_wdt.c +++ b/drivers/watchdog/booke_wdt.c @@ -166,18 +166,17 @@ static long booke_wdt_ioctl(struct file *file,  	switch (cmd) {  	case WDIOC_GETSUPPORT: -		if (copy_to_user((void *)arg, &ident, sizeof(ident))) -			return -EFAULT; +		return copy_to_user(p, &ident, sizeof(ident)) ? -EFAULT : 0;  	case WDIOC_GETSTATUS:  		return put_user(0, p);  	case WDIOC_GETBOOTSTATUS:  		/* XXX: something is clearing TSR */  		tmp = mfspr(SPRN_TSR) & TSR_WRS(3);  		/* returns CARDRESET if last reset was caused by the WDT */ -		return (tmp ? WDIOF_CARDRESET : 0); +		return put_user((tmp ? WDIOF_CARDRESET : 0), p);  	case WDIOC_SETOPTIONS:  		if (get_user(tmp, p)) -			return -EINVAL; +			return -EFAULT;  		if (tmp == WDIOS_ENABLECARD) {  			booke_wdt_ping();  			break; diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c index d4c50d63acb..97ca359ae2b 100644 --- a/drivers/xen/platform-pci.c +++ b/drivers/xen/platform-pci.c @@ -101,19 +101,6 @@ static int platform_pci_resume(struct pci_dev *pdev)  	return 0;  } -static void __devinit prepare_shared_info(void) -{ -#ifdef CONFIG_KEXEC -	unsigned long addr; -	struct shared_info *hvm_shared_info; - -	addr = alloc_xen_mmio(PAGE_SIZE); -	hvm_shared_info = ioremap(addr, PAGE_SIZE); -	memset(hvm_shared_info, 0, PAGE_SIZE); -	xen_hvm_prepare_kexec(hvm_shared_info, addr >> PAGE_SHIFT); -#endif -} -  static int __devinit platform_pci_init(struct pci_dev *pdev,  				       const struct pci_device_id *ent)  { @@ -151,8 +138,6 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,  	platform_mmio = mmio_addr;  	platform_mmiolen = mmio_len; -	prepare_shared_info(); -  	if (!xen_have_vector_callback) {  		ret = xen_allocate_irq(pdev);  		if (ret) { diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index 8c0e56d9293..842d00048a6 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c @@ -399,11 +399,6 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,  			DPRINTK("checking mountpoint %p %.*s",  				dentry, (int)dentry->d_name.len, dentry->d_name.name); -			/* Path walk currently on this dentry? */ -			ino_count = atomic_read(&ino->count) + 2; -			if (dentry->d_count > ino_count) -				goto next; -  			/* Can we umount this guy */  			if (autofs4_mount_busy(mnt, dentry))  				goto next; @@ -73,7 +73,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)  {  	unsigned int sz = sizeof(struct bio) + extra_size;  	struct kmem_cache *slab = NULL; -	struct bio_slab *bslab; +	struct bio_slab *bslab, *new_bio_slabs;  	unsigned int i, entry = -1;  	mutex_lock(&bio_slab_lock); @@ -97,11 +97,12 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)  	if (bio_slab_nr == bio_slab_max && entry == -1) {  		bio_slab_max <<= 1; -		bio_slabs = krealloc(bio_slabs, -				     bio_slab_max * sizeof(struct bio_slab), -				     GFP_KERNEL); -		if (!bio_slabs) +		new_bio_slabs = krealloc(bio_slabs, +					 bio_slab_max * sizeof(struct bio_slab), +					 GFP_KERNEL); +		if (!new_bio_slabs)  			goto out_unlock; +		bio_slabs = new_bio_slabs;  	}  	if (entry == -1)  		entry = bio_slab_nr++; diff --git a/fs/block_dev.c b/fs/block_dev.c index 1e519195d45..38e721b35d4 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1578,10 +1578,12 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,  			 unsigned long nr_segs, loff_t pos)  {  	struct file *file = iocb->ki_filp; +	struct blk_plug plug;  	ssize_t ret;  	BUG_ON(iocb->ki_pos != pos); +	blk_start_plug(&plug);  	ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);  	if (ret > 0 || ret == -EIOCBQUEUED) {  		ssize_t err; @@ -1590,6 +1592,7 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,  		if (err < 0 && ret > 0)  			ret = err;  	} +	blk_finish_plug(&plug);  	return ret;  }  EXPORT_SYMBOL_GPL(blkdev_aio_write); diff --git a/fs/buffer.c b/fs/buffer.c index 9f6d2e41281..58e2e7b7737 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -914,7 +914,7 @@ link_dev_buffers(struct page *page, struct buffer_head *head)  /*   * Initialise the state of a blockdev page's buffers.   */  -static void +static sector_t  init_page_buffers(struct page *page, struct block_device *bdev,  			sector_t block, int size)  { @@ -936,33 +936,41 @@ init_page_buffers(struct page *page, struct block_device *bdev,  		block++;  		bh = bh->b_this_page;  	} while (bh != head); + +	/* +	 * Caller needs to validate requested block against end of device. +	 */ +	return end_block;  }  /*   * Create the page-cache page that contains the requested block.   * - * This is user purely for blockdev mappings. + * This is used purely for blockdev mappings.   */ -static struct page * +static int  grow_dev_page(struct block_device *bdev, sector_t block, -		pgoff_t index, int size) +		pgoff_t index, int size, int sizebits)  {  	struct inode *inode = bdev->bd_inode;  	struct page *page;  	struct buffer_head *bh; +	sector_t end_block; +	int ret = 0;		/* Will call free_more_memory() */  	page = find_or_create_page(inode->i_mapping, index,  		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);  	if (!page) -		return NULL; +		return ret;  	BUG_ON(!PageLocked(page));  	if (page_has_buffers(page)) {  		bh = page_buffers(page);  		if (bh->b_size == size) { -			init_page_buffers(page, bdev, block, size); -			return page; +			end_block = init_page_buffers(page, bdev, +						index << sizebits, size); +			goto done;  		}  		if (!try_to_free_buffers(page))  			goto failed; @@ -982,14 +990,14 @@ grow_dev_page(struct block_device *bdev, sector_t block,  	 */  	spin_lock(&inode->i_mapping->private_lock);  	link_dev_buffers(page, bh); -	init_page_buffers(page, bdev, block, size); +	end_block = init_page_buffers(page, bdev, index << sizebits, size);  	spin_unlock(&inode->i_mapping->private_lock); -	return page; - +done: +	ret = (block < end_block) ? 1 : -ENXIO;  failed:  	unlock_page(page);  	page_cache_release(page); -	return NULL; +	return ret;  }  /* @@ -999,7 +1007,6 @@ failed:  static int  grow_buffers(struct block_device *bdev, sector_t block, int size)  { -	struct page *page;  	pgoff_t index;  	int sizebits; @@ -1023,22 +1030,14 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)  			bdevname(bdev, b));  		return -EIO;  	} -	block = index << sizebits; +  	/* Create a page with the proper size buffers.. */ -	page = grow_dev_page(bdev, block, index, size); -	if (!page) -		return 0; -	unlock_page(page); -	page_cache_release(page); -	return 1; +	return grow_dev_page(bdev, block, index, size, sizebits);  }  static struct buffer_head *  __getblk_slow(struct block_device *bdev, sector_t block, int size)  { -	int ret; -	struct buffer_head *bh; -  	/* Size must be multiple of hard sectorsize */  	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||  			(size < 512 || size > PAGE_SIZE))) { @@ -1051,21 +1050,20 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)  		return NULL;  	} -retry: -	bh = __find_get_block(bdev, block, size); -	if (bh) -		return bh; +	for (;;) { +		struct buffer_head *bh; +		int ret; -	ret = grow_buffers(bdev, block, size); -	if (ret == 0) { -		free_more_memory(); -		goto retry; -	} else if (ret > 0) {  		bh = __find_get_block(bdev, block, size);  		if (bh)  			return bh; + +		ret = grow_buffers(bdev, block, size); +		if (ret < 0) +			return NULL; +		if (ret == 0) +			free_more_memory();  	} -	return NULL;  }  /* @@ -1321,10 +1319,6 @@ EXPORT_SYMBOL(__find_get_block);   * which corresponds to the passed block_device, block and size. The   * returned buffer has its reference count incremented.   * - * __getblk() cannot fail - it just keeps trying.  If you pass it an - * illegal block number, __getblk() will happily return a buffer_head - * which represents the non-existent block.  Very weird. - *   * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()   * attempt is failing.  FIXME, perhaps?   */ diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index fb962efdace..6d59006bfa2 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c @@ -201,6 +201,7 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)  	int err = -ENOMEM;  	dout("ceph_fs_debugfs_init\n"); +	BUG_ON(!fsc->client->debugfs_dir);  	fsc->debugfs_congestion_kb =  		debugfs_create_file("writeback_congestion_kb",  				    0600, diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 9fff9f3b17e..4b5762ef7c2 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -992,11 +992,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,  	if (rinfo->head->is_dentry) {  		struct inode *dir = req->r_locked_dir; -		err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag, -				 session, req->r_request_started, -1, -				 &req->r_caps_reservation); -		if (err < 0) -			return err; +		if (dir) { +			err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag, +					 session, req->r_request_started, -1, +					 &req->r_caps_reservation); +			if (err < 0) +				return err; +		} else { +			WARN_ON_ONCE(1); +		}  	}  	/* @@ -1004,6 +1008,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,  	 * will have trouble splicing in the virtual snapdir later  	 */  	if (rinfo->head->is_dentry && !req->r_aborted && +	    req->r_locked_dir &&  	    (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,  					       fsc->mount_options->snapdir_name,  					       req->r_dentry->d_name.len))) { diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c index 8e3fb69fbe6..1396ceb4679 100644 --- a/fs/ceph/ioctl.c +++ b/fs/ceph/ioctl.c @@ -42,7 +42,8 @@ static long __validate_layout(struct ceph_mds_client *mdsc,  	/* validate striping parameters */  	if ((l->object_size & ~PAGE_MASK) ||  	    (l->stripe_unit & ~PAGE_MASK) || -	    ((unsigned)l->object_size % (unsigned)l->stripe_unit)) +	    (l->stripe_unit != 0 && +	     ((unsigned)l->object_size % (unsigned)l->stripe_unit)))  		return -EINVAL;  	/* make sure it's a valid data pool */ diff --git a/fs/compat.c b/fs/compat.c index 6161255fac4..1bdb350ea5d 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -1155,11 +1155,14 @@ compat_sys_readv(unsigned long fd, const struct compat_iovec __user *vec,  	struct file *file;  	int fput_needed;  	ssize_t ret; +	loff_t pos;  	file = fget_light(fd, &fput_needed);  	if (!file)  		return -EBADF; -	ret = compat_readv(file, vec, vlen, &file->f_pos); +	pos = file->f_pos; +	ret = compat_readv(file, vec, vlen, &pos); +	file->f_pos = pos;  	fput_light(file, fput_needed);  	return ret;  } @@ -1221,11 +1224,14 @@ compat_sys_writev(unsigned long fd, const struct compat_iovec __user *vec,  	struct file *file;  	int fput_needed;  	ssize_t ret; +	loff_t pos;  	file = fget_light(fd, &fput_needed);  	if (!file)  		return -EBADF; -	ret = compat_writev(file, vec, vlen, &file->f_pos); +	pos = file->f_pos; +	ret = compat_writev(file, vec, vlen, &pos); +	file->f_pos = pos;  	fput_light(file, fput_needed);  	return ret;  } diff --git a/fs/direct-io.c b/fs/direct-io.c index 1faf4cb56f3..f86c720dba0 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -1062,6 +1062,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,  	unsigned long user_addr;  	size_t bytes;  	struct buffer_head map_bh = { 0, }; +	struct blk_plug plug;  	if (rw & WRITE)  		rw = WRITE_ODIRECT; @@ -1177,6 +1178,8 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,  				PAGE_SIZE - user_addr / PAGE_SIZE);  	} +	blk_start_plug(&plug); +  	for (seg = 0; seg < nr_segs; seg++) {  		user_addr = (unsigned long)iov[seg].iov_base;  		sdio.size += bytes = iov[seg].iov_len; @@ -1235,6 +1238,8 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,  	if (sdio.bio)  		dio_bio_submit(dio, &sdio); +	blk_finish_plug(&plug); +  	/*  	 * It is possible that, we return short IO due to end of file.  	 * In that case, we need to release all the pages we got hold on. diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 1c8b5567080..eedec84c180 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1654,8 +1654,8 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)  		error = PTR_ERR(file);  		goto out_free_fd;  	} -	fd_install(fd, file);  	ep->file = file; +	fd_install(fd, file);  	return fd;  out_free_fd: diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index d23b31ca9d7..1b5089067d0 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -280,14 +280,18 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,  	return desc;  } -static int ext4_valid_block_bitmap(struct super_block *sb, -				   struct ext4_group_desc *desc, -				   unsigned int block_group, -				   struct buffer_head *bh) +/* + * Return the block number which was discovered to be invalid, or 0 if + * the block bitmap is valid. + */ +static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb, +					    struct ext4_group_desc *desc, +					    unsigned int block_group, +					    struct buffer_head *bh)  {  	ext4_grpblk_t offset;  	ext4_grpblk_t next_zero_bit; -	ext4_fsblk_t bitmap_blk; +	ext4_fsblk_t blk;  	ext4_fsblk_t group_first_block;  	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) { @@ -297,37 +301,33 @@ static int ext4_valid_block_bitmap(struct super_block *sb,  		 * or it has to also read the block group where the bitmaps  		 * are located to verify they are set.  		 */ -		return 1; +		return 0;  	}  	group_first_block = ext4_group_first_block_no(sb, block_group);  	/* check whether block bitmap block number is set */ -	bitmap_blk = ext4_block_bitmap(sb, desc); -	offset = bitmap_blk - group_first_block; +	blk = ext4_block_bitmap(sb, desc); +	offset = blk - group_first_block;  	if (!ext4_test_bit(offset, bh->b_data))  		/* bad block bitmap */ -		goto err_out; +		return blk;  	/* check whether the inode bitmap block number is set */ -	bitmap_blk = ext4_inode_bitmap(sb, desc); -	offset = bitmap_blk - group_first_block; +	blk = ext4_inode_bitmap(sb, desc); +	offset = blk - group_first_block;  	if (!ext4_test_bit(offset, bh->b_data))  		/* bad block bitmap */ -		goto err_out; +		return blk;  	/* check whether the inode table block number is set */ -	bitmap_blk = ext4_inode_table(sb, desc); -	offset = bitmap_blk - group_first_block; +	blk = ext4_inode_table(sb, desc); +	offset = blk - group_first_block;  	next_zero_bit = ext4_find_next_zero_bit(bh->b_data,  				offset + EXT4_SB(sb)->s_itb_per_group,  				offset); -	if (next_zero_bit >= offset + EXT4_SB(sb)->s_itb_per_group) -		/* good bitmap for inode tables */ -		return 1; - -err_out: -	ext4_error(sb, "Invalid block bitmap - block_group = %d, block = %llu", -			block_group, bitmap_blk); +	if (next_zero_bit < offset + EXT4_SB(sb)->s_itb_per_group) +		/* bad bitmap for inode tables */ +		return blk;  	return 0;  } @@ -336,14 +336,26 @@ void ext4_validate_block_bitmap(struct super_block *sb,  			       unsigned int block_group,  			       struct buffer_head *bh)  { +	ext4_fsblk_t	blk; +  	if (buffer_verified(bh))  		return;  	ext4_lock_group(sb, block_group); -	if (ext4_valid_block_bitmap(sb, desc, block_group, bh) && -	    ext4_block_bitmap_csum_verify(sb, block_group, desc, bh, -					  EXT4_BLOCKS_PER_GROUP(sb) / 8)) -		set_buffer_verified(bh); +	blk = ext4_valid_block_bitmap(sb, desc, block_group, bh); +	if (unlikely(blk != 0)) { +		ext4_unlock_group(sb, block_group); +		ext4_error(sb, "bg %u: block %llu: invalid block bitmap", +			   block_group, blk); +		return; +	} +	if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group, +			desc, bh, EXT4_BLOCKS_PER_GROUP(sb) / 8))) { +		ext4_unlock_group(sb, block_group); +		ext4_error(sb, "bg %u: bad block bitmap checksum", block_group); +		return; +	} +	set_buffer_verified(bh);  	ext4_unlock_group(sb, block_group);  } diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c index f8716eab999..5c2d1813ebe 100644 --- a/fs/ext4/bitmap.c +++ b/fs/ext4/bitmap.c @@ -79,7 +79,6 @@ int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,  	if (provided == calculated)  		return 1; -	ext4_error(sb, "Bad block bitmap checksum: block_group = %u", group);  	return 0;  } diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index cd0c7ed0677..aabbb3f5368 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -2662,6 +2662,7 @@ cont:  		}  		path[0].p_depth = depth;  		path[0].p_hdr = ext_inode_hdr(inode); +		i = 0;  		if (ext4_ext_check(inode, path[0].p_hdr, depth)) {  			err = -EIO; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 3e0851e4f46..c6e0cb3d1f4 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -948,6 +948,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)  	ei->i_reserved_meta_blocks = 0;  	ei->i_allocated_meta_blocks = 0;  	ei->i_da_metadata_calc_len = 0; +	ei->i_da_metadata_calc_last_lblock = 0;  	spin_lock_init(&(ei->i_block_reservation_lock));  #ifdef CONFIG_QUOTA  	ei->i_reserved_quota = 0; @@ -3108,6 +3109,10 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,  	ext4_group_t		i, ngroups = ext4_get_groups_count(sb);  	int			s, j, count = 0; +	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC)) +		return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) + +			sbi->s_itb_per_group + 2); +  	first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +  		(grp * EXT4_BLOCKS_PER_GROUP(sb));  	last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1; @@ -4419,6 +4424,7 @@ static void ext4_clear_journal_err(struct super_block *sb,  		ext4_commit_super(sb, 1);  		jbd2_journal_clear_err(journal); +		jbd2_journal_update_sb_errno(journal);  	}  } diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 8964cf3999b..324bc085053 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -383,6 +383,9 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,  	struct fuse_entry_out outentry;  	struct fuse_file *ff; +	/* Userspace expects S_IFREG in create mode */ +	BUG_ON((mode & S_IFMT) != S_IFREG); +  	forget = fuse_alloc_forget();  	err = -ENOMEM;  	if (!forget) diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index 09357508ec9..a2862339323 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -1113,6 +1113,11 @@ static void mark_journal_empty(journal_t *journal)  	BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));  	spin_lock(&journal->j_state_lock); +	/* Is it already empty? */ +	if (sb->s_start == 0) { +		spin_unlock(&journal->j_state_lock); +		return; +	}  	jbd_debug(1, "JBD: Marking journal as empty (seq %d)\n",          	  journal->j_tail_sequence); diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 8625da27ecc..e149b99a7ff 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1377,7 +1377,7 @@ static void jbd2_mark_journal_empty(journal_t *journal)   * Update a journal's errno.  Write updated superblock to disk waiting for IO   * to complete.   */ -static void jbd2_journal_update_sb_errno(journal_t *journal) +void jbd2_journal_update_sb_errno(journal_t *journal)  {  	journal_superblock_t *sb = journal->j_superblock; @@ -1390,6 +1390,7 @@ static void jbd2_journal_update_sb_errno(journal_t *journal)  	jbd2_write_superblock(journal, WRITE_SYNC);  } +EXPORT_SYMBOL(jbd2_journal_update_sb_errno);  /*   * Read the superblock for a given journal, performing initial diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c index df0de27c273..e784a217b50 100644 --- a/fs/logfs/dev_bdev.c +++ b/fs/logfs/dev_bdev.c @@ -26,6 +26,7 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)  	struct completion complete;  	bio_init(&bio); +	bio.bi_max_vecs = 1;  	bio.bi_io_vec = &bio_vec;  	bio_vec.bv_page = page;  	bio_vec.bv_len = PAGE_SIZE; @@ -95,12 +96,11 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,  	struct address_space *mapping = super->s_mapping_inode->i_mapping;  	struct bio *bio;  	struct page *page; -	struct request_queue *q = bdev_get_queue(sb->s_bdev); -	unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); +	unsigned int max_pages;  	int i; -	if (max_pages > BIO_MAX_PAGES) -		max_pages = BIO_MAX_PAGES; +	max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev)); +  	bio = bio_alloc(GFP_NOFS, max_pages);  	BUG_ON(!bio); @@ -190,12 +190,11 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,  {  	struct logfs_super *super = logfs_super(sb);  	struct bio *bio; -	struct request_queue *q = bdev_get_queue(sb->s_bdev); -	unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); +	unsigned int max_pages;  	int i; -	if (max_pages > BIO_MAX_PAGES) -		max_pages = BIO_MAX_PAGES; +	max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev)); +  	bio = bio_alloc(GFP_NOFS, max_pages);  	BUG_ON(!bio); diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c index a422f42238b..6984562738d 100644 --- a/fs/logfs/inode.c +++ b/fs/logfs/inode.c @@ -156,10 +156,26 @@ static void __logfs_destroy_inode(struct inode *inode)  	call_rcu(&inode->i_rcu, logfs_i_callback);  } +static void __logfs_destroy_meta_inode(struct inode *inode) +{ +	struct logfs_inode *li = logfs_inode(inode); +	BUG_ON(li->li_block); +	call_rcu(&inode->i_rcu, logfs_i_callback); +} +  static void logfs_destroy_inode(struct inode *inode)  {  	struct logfs_inode *li = logfs_inode(inode); +	if (inode->i_ino < LOGFS_RESERVED_INOS) { +		/* +		 * The reserved inodes are never destroyed unless we are in +		 * unmont path. +		 */ +		__logfs_destroy_meta_inode(inode); +		return; +	} +  	BUG_ON(list_empty(&li->li_freeing_list));  	spin_lock(&logfs_inode_lock);  	li->li_refcount--; @@ -373,8 +389,8 @@ static void logfs_put_super(struct super_block *sb)  {  	struct logfs_super *super = logfs_super(sb);  	/* kill the meta-inodes */ -	iput(super->s_master_inode);  	iput(super->s_segfile_inode); +	iput(super->s_master_inode);  	iput(super->s_mapping_inode);  } diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c index 1e1c369df22..2a09b8d7398 100644 --- a/fs/logfs/journal.c +++ b/fs/logfs/journal.c @@ -565,7 +565,7 @@ static void write_wbuf(struct super_block *sb, struct logfs_area *area,  	index = ofs >> PAGE_SHIFT;  	page_ofs = ofs & (PAGE_SIZE - 1); -	page = find_lock_page(mapping, index); +	page = find_or_create_page(mapping, index, GFP_NOFS);  	BUG_ON(!page);  	memcpy(wbuf, page_address(page) + page_ofs, super->s_writesize);  	unlock_page(page); diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c index f1cb512c501..5be0abef603 100644 --- a/fs/logfs/readwrite.c +++ b/fs/logfs/readwrite.c @@ -2189,7 +2189,6 @@ void logfs_evict_inode(struct inode *inode)  		return;  	} -	BUG_ON(inode->i_ino < LOGFS_RESERVED_INOS);  	page = inode_to_page(inode);  	BUG_ON(!page); /* FIXME: Use emergency page */  	logfs_put_write_page(page); diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c index e28d090c98d..038da099179 100644 --- a/fs/logfs/segment.c +++ b/fs/logfs/segment.c @@ -886,7 +886,7 @@ static struct logfs_area *alloc_area(struct super_block *sb)  static void map_invalidatepage(struct page *page, unsigned long l)  { -	BUG(); +	return;  }  static int map_releasepage(struct page *page, gfp_t g) diff --git a/fs/namei.c b/fs/namei.c index 1b464390dde..dd1ed1b8e98 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -352,6 +352,7 @@ int __inode_permission(struct inode *inode, int mask)  /**   * sb_permission - Check superblock-level permissions   * @sb: Superblock of inode to check permission on + * @inode: Inode to check permission on   * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)   *   * Separate out file-system wide checks from inode-specific permission checks. @@ -656,6 +657,7 @@ int sysctl_protected_hardlinks __read_mostly = 1;  /**   * may_follow_link - Check symlink following for unsafe situations   * @link: The path of the symlink + * @nd: nameidata pathwalk data   *   * In the case of the sysctl_protected_symlinks sysctl being enabled,   * CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is @@ -2414,7 +2416,7 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,  		goto out;  	} -	mode = op->mode & S_IALLUGO; +	mode = op->mode;  	if ((open_flag & O_CREAT) && !IS_POSIXACL(dir))  		mode &= ~current_umask(); @@ -2452,7 +2454,7 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,  	}  	if (open_flag & O_CREAT) { -		error = may_o_create(&nd->path, dentry, op->mode); +		error = may_o_create(&nd->path, dentry, mode);  		if (error) {  			create_error = error;  			if (open_flag & O_EXCL) @@ -2489,6 +2491,10 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,  			dput(dentry);  			dentry = file->f_path.dentry;  		} +		if (create_error && dentry->d_inode == NULL) { +			error = create_error; +			goto out; +		}  		goto looked_up;  	} diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile index 8bf3a3f6925..b7db60897f9 100644 --- a/fs/nfs/Makefile +++ b/fs/nfs/Makefile @@ -12,19 +12,19 @@ nfs-$(CONFIG_ROOT_NFS)	+= nfsroot.o  nfs-$(CONFIG_SYSCTL)	+= sysctl.o  nfs-$(CONFIG_NFS_FSCACHE) += fscache.o fscache-index.o -obj-$(CONFIG_NFS_V2) += nfs2.o -nfs2-y := nfs2super.o proc.o nfs2xdr.o +obj-$(CONFIG_NFS_V2) += nfsv2.o +nfsv2-y := nfs2super.o proc.o nfs2xdr.o -obj-$(CONFIG_NFS_V3) += nfs3.o -nfs3-y := nfs3super.o nfs3client.o nfs3proc.o nfs3xdr.o -nfs3-$(CONFIG_NFS_V3_ACL) += nfs3acl.o +obj-$(CONFIG_NFS_V3) += nfsv3.o +nfsv3-y := nfs3super.o nfs3client.o nfs3proc.o nfs3xdr.o +nfsv3-$(CONFIG_NFS_V3_ACL) += nfs3acl.o -obj-$(CONFIG_NFS_V4) += nfs4.o -nfs4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o \ +obj-$(CONFIG_NFS_V4) += nfsv4.o +nfsv4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o \  	  delegation.o idmap.o callback.o callback_xdr.o callback_proc.o \  	  nfs4namespace.o nfs4getroot.o nfs4client.o -nfs4-$(CONFIG_SYSCTL)	+= nfs4sysctl.o -nfs4-$(CONFIG_NFS_V4_1)	+= pnfs.o pnfs_dev.o +nfsv4-$(CONFIG_SYSCTL)	+= nfs4sysctl.o +nfsv4-$(CONFIG_NFS_V4_1)	+= pnfs.o pnfs_dev.o  obj-$(CONFIG_PNFS_FILE_LAYOUT) += nfs_layout_nfsv41_files.o  nfs_layout_nfsv41_files-y := nfs4filelayout.o nfs4filelayoutdev.o diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 9fc0d9dfc91..99694442b93 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -105,7 +105,7 @@ struct nfs_subversion *get_nfs_version(unsigned int version)  	if (IS_ERR(nfs)) {  		mutex_lock(&nfs_version_mutex); -		request_module("nfs%d", version); +		request_module("nfsv%d", version);  		nfs = find_nfs_version(version);  		mutex_unlock(&nfs_version_mutex);  	} diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index b701358c39c..a850079467d 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -61,6 +61,12 @@ struct idmap {  	struct mutex		idmap_mutex;  }; +struct idmap_legacy_upcalldata { +	struct rpc_pipe_msg pipe_msg; +	struct idmap_msg idmap_msg; +	struct idmap *idmap; +}; +  /**   * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields   * @fattr: fully initialised struct nfs_fattr @@ -324,6 +330,7 @@ static ssize_t nfs_idmap_get_key(const char *name, size_t namelen,  		ret = nfs_idmap_request_key(&key_type_id_resolver_legacy,  					    name, namelen, type, data,  					    data_size, idmap); +		idmap->idmap_key_cons = NULL;  		mutex_unlock(&idmap->idmap_mutex);  	}  	return ret; @@ -380,11 +387,13 @@ static const match_table_t nfs_idmap_tokens = {  static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *);  static ssize_t idmap_pipe_downcall(struct file *, const char __user *,  				   size_t); +static void idmap_release_pipe(struct inode *);  static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *);  static const struct rpc_pipe_ops idmap_upcall_ops = {  	.upcall		= rpc_pipe_generic_upcall,  	.downcall	= idmap_pipe_downcall, +	.release_pipe	= idmap_release_pipe,  	.destroy_msg	= idmap_pipe_destroy_msg,  }; @@ -616,7 +625,8 @@ void nfs_idmap_quit(void)  	nfs_idmap_quit_keyring();  } -static int nfs_idmap_prepare_message(char *desc, struct idmap_msg *im, +static int nfs_idmap_prepare_message(char *desc, struct idmap *idmap, +				     struct idmap_msg *im,  				     struct rpc_pipe_msg *msg)  {  	substring_t substr; @@ -659,6 +669,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,  				   const char *op,  				   void *aux)  { +	struct idmap_legacy_upcalldata *data;  	struct rpc_pipe_msg *msg;  	struct idmap_msg *im;  	struct idmap *idmap = (struct idmap *)aux; @@ -666,15 +677,15 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,  	int ret = -ENOMEM;  	/* msg and im are freed in idmap_pipe_destroy_msg */ -	msg = kmalloc(sizeof(*msg), GFP_KERNEL); -	if (!msg) -		goto out0; - -	im = kmalloc(sizeof(*im), GFP_KERNEL); -	if (!im) +	data = kmalloc(sizeof(*data), GFP_KERNEL); +	if (!data)  		goto out1; -	ret = nfs_idmap_prepare_message(key->description, im, msg); +	msg = &data->pipe_msg; +	im = &data->idmap_msg; +	data->idmap = idmap; + +	ret = nfs_idmap_prepare_message(key->description, idmap, im, msg);  	if (ret < 0)  		goto out2; @@ -683,15 +694,15 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,  	ret = rpc_queue_upcall(idmap->idmap_pipe, msg);  	if (ret < 0) -		goto out2; +		goto out3;  	return ret; +out3: +	idmap->idmap_key_cons = NULL;  out2: -	kfree(im); +	kfree(data);  out1: -	kfree(msg); -out0:  	complete_request_key(cons, ret);  	return ret;  } @@ -749,9 +760,8 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)  	}  	if (!(im.im_status & IDMAP_STATUS_SUCCESS)) { -		ret = mlen; -		complete_request_key(cons, -ENOKEY); -		goto out_incomplete; +		ret = -ENOKEY; +		goto out;  	}  	namelen_in = strnlen(im.im_name, IDMAP_NAMESZ); @@ -768,16 +778,32 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)  out:  	complete_request_key(cons, ret); -out_incomplete:  	return ret;  }  static void  idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg)  { +	struct idmap_legacy_upcalldata *data = container_of(msg, +			struct idmap_legacy_upcalldata, +			pipe_msg); +	struct idmap *idmap = data->idmap; +	struct key_construction *cons; +	if (msg->errno) { +		cons = ACCESS_ONCE(idmap->idmap_key_cons); +		idmap->idmap_key_cons = NULL; +		complete_request_key(cons, msg->errno); +	}  	/* Free memory allocated in nfs_idmap_legacy_upcall() */ -	kfree(msg->data); -	kfree(msg); +	kfree(data); +} + +static void +idmap_release_pipe(struct inode *inode) +{ +	struct rpc_inode *rpci = RPC_I(inode); +	struct idmap *idmap = (struct idmap *)rpci->private; +	idmap->idmap_key_cons = NULL;  }  int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *uid) diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 0952c791df3..d6b3b5f2d77 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -69,7 +69,7 @@ do_proc_get_root(struct rpc_clnt *client, struct nfs_fh *fhandle,  	nfs_fattr_init(info->fattr);  	status = rpc_call_sync(client, &msg, 0);  	dprintk("%s: reply fsinfo: %d\n", __func__, status); -	if (!(info->fattr->valid & NFS_ATTR_FATTR)) { +	if (status == 0 && !(info->fattr->valid & NFS_ATTR_FATTR)) {  		msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR];  		msg.rpc_resp = info->fattr;  		status = rpc_call_sync(client, &msg, 0); diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 3b950dd81e8..da0618aeead 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -205,6 +205,9 @@ extern const struct dentry_operations nfs4_dentry_operations;  int nfs_atomic_open(struct inode *, struct dentry *, struct file *,  		    unsigned, umode_t, int *); +/* super.c */ +extern struct file_system_type nfs4_fs_type; +  /* nfs4namespace.c */  rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *);  struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *); diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index cbcdfaf3250..24eb663f8ed 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -74,7 +74,7 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)  	return clp;  error: -	kfree(clp); +	nfs_free_client(clp);  	return ERR_PTR(err);  } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index a99a8d94872..635274140b1 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3737,9 +3737,10 @@ out:  static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)  {  	struct nfs4_cached_acl *acl; +	size_t buflen = sizeof(*acl) + acl_len; -	if (pages && acl_len <= PAGE_SIZE) { -		acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL); +	if (pages && buflen <= PAGE_SIZE) { +		acl = kmalloc(buflen, GFP_KERNEL);  		if (acl == NULL)  			goto out;  		acl->cached = 1; @@ -3819,7 +3820,7 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu  	if (ret)  		goto out_free; -	acl_len = res.acl_len - res.acl_data_offset; +	acl_len = res.acl_len;  	if (acl_len > args.acl_len)  		nfs4_write_cached_acl(inode, NULL, 0, acl_len);  	else @@ -6223,11 +6224,58 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)  	dprintk("<-- %s\n", __func__);  } +static size_t max_response_pages(struct nfs_server *server) +{ +	u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; +	return nfs_page_array_len(0, max_resp_sz); +} + +static void nfs4_free_pages(struct page **pages, size_t size) +{ +	int i; + +	if (!pages) +		return; + +	for (i = 0; i < size; i++) { +		if (!pages[i]) +			break; +		__free_page(pages[i]); +	} +	kfree(pages); +} + +static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags) +{ +	struct page **pages; +	int i; + +	pages = kcalloc(size, sizeof(struct page *), gfp_flags); +	if (!pages) { +		dprintk("%s: can't alloc array of %zu pages\n", __func__, size); +		return NULL; +	} + +	for (i = 0; i < size; i++) { +		pages[i] = alloc_page(gfp_flags); +		if (!pages[i]) { +			dprintk("%s: failed to allocate page\n", __func__); +			nfs4_free_pages(pages, size); +			return NULL; +		} +	} + +	return pages; +} +  static void nfs4_layoutget_release(void *calldata)  {  	struct nfs4_layoutget *lgp = calldata; +	struct nfs_server *server = NFS_SERVER(lgp->args.inode); +	size_t max_pages = max_response_pages(server);  	dprintk("--> %s\n", __func__); +	nfs4_free_pages(lgp->args.layout.pages, max_pages);  	put_nfs_open_context(lgp->args.ctx);  	kfree(calldata);  	dprintk("<-- %s\n", __func__); @@ -6239,9 +6287,10 @@ static const struct rpc_call_ops nfs4_layoutget_call_ops = {  	.rpc_release = nfs4_layoutget_release,  }; -int nfs4_proc_layoutget(struct nfs4_layoutget *lgp) +void nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)  {  	struct nfs_server *server = NFS_SERVER(lgp->args.inode); +	size_t max_pages = max_response_pages(server);  	struct rpc_task *task;  	struct rpc_message msg = {  		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], @@ -6259,12 +6308,19 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)  	dprintk("--> %s\n", __func__); +	lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags); +	if (!lgp->args.layout.pages) { +		nfs4_layoutget_release(lgp); +		return; +	} +	lgp->args.layout.pglen = max_pages * PAGE_SIZE; +  	lgp->res.layoutp = &lgp->args.layout;  	lgp->res.seq_res.sr_slot = NULL;  	nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);  	task = rpc_run_task(&task_setup_data);  	if (IS_ERR(task)) -		return PTR_ERR(task); +		return;  	status = nfs4_wait_for_completion_rpc_task(task);  	if (status == 0)  		status = task->tk_status; @@ -6272,7 +6328,7 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)  		status = pnfs_layout_process(lgp);  	rpc_put_task(task);  	dprintk("<-- %s status=%d\n", __func__, status); -	return status; +	return;  }  static void @@ -6304,12 +6360,8 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)  		return;  	}  	spin_lock(&lo->plh_inode->i_lock); -	if (task->tk_status == 0) { -		if (lrp->res.lrs_present) { -			pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); -		} else -			BUG_ON(!list_empty(&lo->plh_segs)); -	} +	if (task->tk_status == 0 && lrp->res.lrs_present) +		pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);  	lo->plh_block_lgets--;  	spin_unlock(&lo->plh_inode->i_lock);  	dprintk("<-- %s\n", __func__); diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c index 12a31a9dbcd..bd61221ad2c 100644 --- a/fs/nfs/nfs4super.c +++ b/fs/nfs/nfs4super.c @@ -23,14 +23,6 @@ static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,  static struct dentry *nfs4_remote_referral_mount(struct file_system_type *fs_type,  	int flags, const char *dev_name, void *raw_data); -static struct file_system_type nfs4_fs_type = { -	.owner		= THIS_MODULE, -	.name		= "nfs4", -	.mount		= nfs_fs_mount, -	.kill_sb	= nfs_kill_super, -	.fs_flags	= FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA, -}; -  static struct file_system_type nfs4_remote_fs_type = {  	.owner		= THIS_MODULE,  	.name		= "nfs4", @@ -344,14 +336,8 @@ static int __init init_nfs_v4(void)  	if (err)  		goto out1; -	err = register_filesystem(&nfs4_fs_type); -	if (err < 0) -		goto out2; -  	register_nfs_version(&nfs_v4);  	return 0; -out2: -	nfs4_unregister_sysctl();  out1:  	nfs_idmap_quit();  out: @@ -361,7 +347,6 @@ out:  static void __exit exit_nfs_v4(void)  {  	unregister_nfs_version(&nfs_v4); -	unregister_filesystem(&nfs4_fs_type);  	nfs4_unregister_sysctl();  	nfs_idmap_quit();  } diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index ca13483edd6..1bfbd67c556 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -5045,22 +5045,19 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,  			 struct nfs_getaclres *res)  {  	unsigned int savep; -	__be32 *bm_p;  	uint32_t attrlen,  		 bitmap[3] = {0};  	int status; -	size_t page_len = xdr->buf->page_len; +	unsigned int pg_offset;  	res->acl_len = 0;  	if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)  		goto out; -	bm_p = xdr->p; -	res->acl_data_offset = be32_to_cpup(bm_p) + 2; -	res->acl_data_offset <<= 2; -	/* Check if the acl data starts beyond the allocated buffer */ -	if (res->acl_data_offset > page_len) -		return -ERANGE; +	xdr_enter_page(xdr, xdr->buf->page_len); + +	/* Calculate the offset of the page data */ +	pg_offset = xdr->buf->head[0].iov_len;  	if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)  		goto out; @@ -5074,23 +5071,20 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,  		/* The bitmap (xdr len + bitmaps) and the attr xdr len words  		 * are stored with the acl data to handle the problem of  		 * variable length bitmaps.*/ -		xdr->p = bm_p; +		res->acl_data_offset = xdr_stream_pos(xdr) - pg_offset;  		/* We ignore &savep and don't do consistency checks on  		 * the attr length.  Let userspace figure it out.... */ -		attrlen += res->acl_data_offset; -		if (attrlen > page_len) { +		res->acl_len = attrlen; +		if (attrlen > (xdr->nwords << 2)) {  			if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {  				/* getxattr interface called with a NULL buf */ -				res->acl_len = attrlen;  				goto out;  			} -			dprintk("NFS: acl reply: attrlen %u > page_len %zu\n", -					attrlen, page_len); +			dprintk("NFS: acl reply: attrlen %u > page_len %u\n", +					attrlen, xdr->nwords << 2);  			return -EINVAL;  		} -		xdr_read_pages(xdr, attrlen); -		res->acl_len = attrlen;  	} else  		status = -EOPNOTSUPP; diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c index f50d3e8d6f2..ea6d111b03e 100644 --- a/fs/nfs/objlayout/objio_osd.c +++ b/fs/nfs/objlayout/objio_osd.c @@ -570,17 +570,66 @@ static bool objio_pg_test(struct nfs_pageio_descriptor *pgio,  		return false;  	return pgio->pg_count + req->wb_bytes <= -			OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length; +			(unsigned long)pgio->pg_layout_private; +} + +void objio_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) +{ +	pnfs_generic_pg_init_read(pgio, req); +	if (unlikely(pgio->pg_lseg == NULL)) +		return; /* Not pNFS */ + +	pgio->pg_layout_private = (void *) +				OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length; +} + +static bool aligned_on_raid_stripe(u64 offset, struct ore_layout *layout, +				   unsigned long *stripe_end) +{ +	u32 stripe_off; +	unsigned stripe_size; + +	if (layout->raid_algorithm == PNFS_OSD_RAID_0) +		return true; + +	stripe_size = layout->stripe_unit * +				(layout->group_width - layout->parity); + +	div_u64_rem(offset, stripe_size, &stripe_off); +	if (!stripe_off) +		return true; + +	*stripe_end = stripe_size - stripe_off; +	return false; +} + +void objio_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) +{ +	unsigned long stripe_end = 0; + +	pnfs_generic_pg_init_write(pgio, req); +	if (unlikely(pgio->pg_lseg == NULL)) +		return; /* Not pNFS */ + +	if (req->wb_offset || +	    !aligned_on_raid_stripe(req->wb_index * PAGE_SIZE, +			       &OBJIO_LSEG(pgio->pg_lseg)->layout, +			       &stripe_end)) { +		pgio->pg_layout_private = (void *)stripe_end; +	} else { +		pgio->pg_layout_private = (void *) +				OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length; +	}  }  static const struct nfs_pageio_ops objio_pg_read_ops = { -	.pg_init = pnfs_generic_pg_init_read, +	.pg_init = objio_init_read,  	.pg_test = objio_pg_test,  	.pg_doio = pnfs_generic_pg_readpages,  };  static const struct nfs_pageio_ops objio_pg_write_ops = { -	.pg_init = pnfs_generic_pg_init_write, +	.pg_init = objio_init_write,  	.pg_test = objio_pg_test,  	.pg_doio = pnfs_generic_pg_writepages,  }; diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 1a6732ed04a..311a79681e2 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -49,6 +49,7 @@ void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,  	hdr->io_start = req_offset(hdr->req);  	hdr->good_bytes = desc->pg_count;  	hdr->dreq = desc->pg_dreq; +	hdr->layout_private = desc->pg_layout_private;  	hdr->release = release;  	hdr->completion_ops = desc->pg_completion_ops;  	if (hdr->completion_ops->init_hdr) @@ -268,6 +269,7 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,  	desc->pg_error = 0;  	desc->pg_lseg = NULL;  	desc->pg_dreq = NULL; +	desc->pg_layout_private = NULL;  }  EXPORT_SYMBOL_GPL(nfs_pageio_init); diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 76875bfcf19..2e00feacd4b 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -583,9 +583,6 @@ send_layoutget(struct pnfs_layout_hdr *lo,  	struct nfs_server *server = NFS_SERVER(ino);  	struct nfs4_layoutget *lgp;  	struct pnfs_layout_segment *lseg = NULL; -	struct page **pages = NULL; -	int i; -	u32 max_resp_sz, max_pages;  	dprintk("--> %s\n", __func__); @@ -594,20 +591,6 @@ send_layoutget(struct pnfs_layout_hdr *lo,  	if (lgp == NULL)  		return NULL; -	/* allocate pages for xdr post processing */ -	max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; -	max_pages = nfs_page_array_len(0, max_resp_sz); - -	pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags); -	if (!pages) -		goto out_err_free; - -	for (i = 0; i < max_pages; i++) { -		pages[i] = alloc_page(gfp_flags); -		if (!pages[i]) -			goto out_err_free; -	} -  	lgp->args.minlength = PAGE_CACHE_SIZE;  	if (lgp->args.minlength > range->length)  		lgp->args.minlength = range->length; @@ -616,39 +599,19 @@ send_layoutget(struct pnfs_layout_hdr *lo,  	lgp->args.type = server->pnfs_curr_ld->id;  	lgp->args.inode = ino;  	lgp->args.ctx = get_nfs_open_context(ctx); -	lgp->args.layout.pages = pages; -	lgp->args.layout.pglen = max_pages * PAGE_SIZE;  	lgp->lsegpp = &lseg;  	lgp->gfp_flags = gfp_flags;  	/* Synchronously retrieve layout information from server and  	 * store in lseg.  	 */ -	nfs4_proc_layoutget(lgp); +	nfs4_proc_layoutget(lgp, gfp_flags);  	if (!lseg) {  		/* remember that LAYOUTGET failed and suspend trying */  		set_bit(lo_fail_bit(range->iomode), &lo->plh_flags);  	} -	/* free xdr pages */ -	for (i = 0; i < max_pages; i++) -		__free_page(pages[i]); -	kfree(pages); -  	return lseg; - -out_err_free: -	/* free any allocated xdr pages, lgp as it's not used */ -	if (pages) { -		for (i = 0; i < max_pages; i++) { -			if (!pages[i]) -				break; -			__free_page(pages[i]); -		} -		kfree(pages); -	} -	kfree(lgp); -	return NULL;  }  /* diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 2c6c80503ba..745aa1b39e7 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -172,7 +172,7 @@ extern int nfs4_proc_getdevicelist(struct nfs_server *server,  				   struct pnfs_devicelist *devlist);  extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,  				   struct pnfs_device *dev); -extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp); +extern void nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags);  extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp);  /* pnfs.c */ diff --git a/fs/nfs/super.c b/fs/nfs/super.c index ac6a3c55dce..239aff7338e 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -319,6 +319,34 @@ EXPORT_SYMBOL_GPL(nfs_sops);  static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *);  static int nfs4_validate_mount_data(void *options,  	struct nfs_parsed_mount_data *args, const char *dev_name); + +struct file_system_type nfs4_fs_type = { +	.owner		= THIS_MODULE, +	.name		= "nfs4", +	.mount		= nfs_fs_mount, +	.kill_sb	= nfs_kill_super, +	.fs_flags	= FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA, +}; +EXPORT_SYMBOL_GPL(nfs4_fs_type); + +static int __init register_nfs4_fs(void) +{ +	return register_filesystem(&nfs4_fs_type); +} + +static void unregister_nfs4_fs(void) +{ +	unregister_filesystem(&nfs4_fs_type); +} +#else +static int __init register_nfs4_fs(void) +{ +	return 0; +} + +static void unregister_nfs4_fs(void) +{ +}  #endif  static struct shrinker acl_shrinker = { @@ -337,12 +365,18 @@ int __init register_nfs_fs(void)  	if (ret < 0)  		goto error_0; -	ret = nfs_register_sysctl(); +	ret = register_nfs4_fs();  	if (ret < 0)  		goto error_1; + +	ret = nfs_register_sysctl(); +	if (ret < 0) +		goto error_2;  	register_shrinker(&acl_shrinker);  	return 0; +error_2: +	unregister_nfs4_fs();  error_1:  	unregister_filesystem(&nfs_fs_type);  error_0: @@ -356,6 +390,7 @@ void __exit unregister_nfs_fs(void)  {  	unregister_shrinker(&acl_shrinker);  	nfs_unregister_sysctl(); +	unregister_nfs4_fs();  	unregister_filesystem(&nfs_fs_type);  } @@ -2645,4 +2680,6 @@ MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 "  module_param(send_implementation_id, ushort, 0644);  MODULE_PARM_DESC(send_implementation_id,  		"Send implementation ID with NFSv4.1 exchange_id"); +MODULE_ALIAS("nfs4"); +  #endif /* CONFIG_NFS_V4 */ diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 5829d0ce7cf..e3b55372726 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1814,19 +1814,19 @@ int __init nfs_init_writepagecache(void)  	nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,  						     nfs_wdata_cachep);  	if (nfs_wdata_mempool == NULL) -		return -ENOMEM; +		goto out_destroy_write_cache;  	nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",  					     sizeof(struct nfs_commit_data),  					     0, SLAB_HWCACHE_ALIGN,  					     NULL);  	if (nfs_cdata_cachep == NULL) -		return -ENOMEM; +		goto out_destroy_write_mempool;  	nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,  						      nfs_wdata_cachep);  	if (nfs_commit_mempool == NULL) -		return -ENOMEM; +		goto out_destroy_commit_cache;  	/*  	 * NFS congestion size, scale with available memory. @@ -1849,11 +1849,20 @@ int __init nfs_init_writepagecache(void)  		nfs_congestion_kb = 256*1024;  	return 0; + +out_destroy_commit_cache: +	kmem_cache_destroy(nfs_cdata_cachep); +out_destroy_write_mempool: +	mempool_destroy(nfs_wdata_mempool); +out_destroy_write_cache: +	kmem_cache_destroy(nfs_wdata_cachep); +	return -ENOMEM;  }  void nfs_destroy_writepagecache(void)  {  	mempool_destroy(nfs_commit_mempool); +	kmem_cache_destroy(nfs_cdata_cachep);  	mempool_destroy(nfs_wdata_mempool);  	kmem_cache_destroy(nfs_wdata_cachep);  } diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index cbaf4f8bb7b..4c7bd35b187 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -651,12 +651,12 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c  	if (clp->cl_minorversion == 0) {  		if (!clp->cl_cred.cr_principal && -				(clp->cl_flavor >= RPC_AUTH_GSS_KRB5)) +				(clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5))  			return -EINVAL;  		args.client_name = clp->cl_cred.cr_principal;  		args.prognumber	= conn->cb_prog,  		args.protocol = XPRT_TRANSPORT_TCP; -		args.authflavor = clp->cl_flavor; +		args.authflavor = clp->cl_cred.cr_flavor;  		clp->cl_cb_ident = conn->cb_ident;  	} else {  		if (!conn->cb_xprt) diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index e6173147f98..22bd0a66c35 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -231,7 +231,6 @@ struct nfs4_client {  	nfs4_verifier		cl_verifier; 	/* generated by client */  	time_t                  cl_time;        /* time of last lease renewal */  	struct sockaddr_storage	cl_addr; 	/* client ipaddress */ -	u32			cl_flavor;	/* setclientid pseudoflavor */  	struct svc_cred		cl_cred; 	/* setclientid principal */  	clientid_t		cl_clientid;	/* generated by server */  	nfs4_verifier		cl_confirm;	/* generated by server */ diff --git a/fs/open.c b/fs/open.c index bc132e167d2..e1f2cdb91a4 100644 --- a/fs/open.c +++ b/fs/open.c @@ -852,9 +852,10 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o  	int lookup_flags = 0;  	int acc_mode; -	if (!(flags & O_CREAT)) -		mode = 0; -	op->mode = mode; +	if (flags & O_CREAT) +		op->mode = (mode & S_IALLUGO) | S_IFREG; +	else +		op->mode = 0;  	/* Must never be set by userspace */  	flags &= ~FMODE_NONOTIFY; diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 36a29b753c7..c495a3055e2 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -1589,10 +1589,10 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)  		goto out;  	} -	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);  	for (cnt = 0; cnt < MAXQUOTAS; cnt++)  		warn[cnt].w_type = QUOTA_NL_NOWARN; +	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);  	spin_lock(&dq_data_lock);  	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {  		if (!dquots[cnt]) diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c index 4c0c7d163d1..a98b7740a0f 100644 --- a/fs/reiserfs/bitmap.c +++ b/fs/reiserfs/bitmap.c @@ -1334,9 +1334,7 @@ struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb,  	else if (bitmap == 0)  		block = (REISERFS_DISK_OFFSET_IN_BYTES >> sb->s_blocksize_bits) + 1; -	reiserfs_write_unlock(sb);  	bh = sb_bread(sb, block); -	reiserfs_write_lock(sb);  	if (bh == NULL)  		reiserfs_warning(sb, "sh-2029: %s: bitmap block (#%u) "  		                 "reading failed", __func__, block); diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index a6d4268fb6c..855da58db14 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -76,10 +76,10 @@ void reiserfs_evict_inode(struct inode *inode)  		;  	}        out: +	reiserfs_write_unlock_once(inode->i_sb, depth);  	clear_inode(inode);	/* note this must go after the journal_end to prevent deadlock */  	dquot_drop(inode);  	inode->i_blocks = 0; -	reiserfs_write_unlock_once(inode->i_sb, depth);  	return;  no_delete: diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index 8b8cc4e945f..760de723dad 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -167,7 +167,7 @@ struct ubifs_global_debug_info {  #define ubifs_dbg_msg(type, fmt, ...) \  	pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__) -#define DBG_KEY_BUF_LEN 32 +#define DBG_KEY_BUF_LEN 48  #define ubifs_dbg_msg_key(type, key, fmt, ...) do {                            \  	char __tmp_key_buf[DBG_KEY_BUF_LEN];                                   \  	pr_debug("UBIFS DBG " type ": " fmt "%s\n", ##__VA_ARGS__,             \ diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c index ce33b2beb15..8640920766e 100644 --- a/fs/ubifs/lpt.c +++ b/fs/ubifs/lpt.c @@ -1749,7 +1749,10 @@ int ubifs_lpt_init(struct ubifs_info *c, int rd, int wr)  	return 0;  out_err: -	ubifs_lpt_free(c, 0); +	if (wr) +		ubifs_lpt_free(c, 1); +	if (rd) +		ubifs_lpt_free(c, 0);  	return err;  } diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index c30d976b4be..edeec499c04 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c @@ -788,7 +788,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,  corrupted_rescan:  	/* Re-scan the corrupted data with verbose messages */ -	ubifs_err("corruptio %d", ret); +	ubifs_err("corruption %d", ret);  	ubifs_scan_a_node(c, buf, len, lnum, offs, 1);  corrupted:  	ubifs_scanned_corruption(c, lnum, offs, buf); diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c index eba46d4a761..94d78fc5d4e 100644 --- a/fs/ubifs/replay.c +++ b/fs/ubifs/replay.c @@ -1026,7 +1026,6 @@ int ubifs_replay_journal(struct ubifs_info *c)  	c->replaying = 1;  	lnum = c->ltail_lnum = c->lhead_lnum; -	lnum = UBIFS_LOG_LNUM;  	do {  		err = replay_log_leb(c, lnum, 0, c->sbuf);  		if (err == 1) @@ -1035,7 +1034,7 @@ int ubifs_replay_journal(struct ubifs_info *c)  		if (err)  			goto out;  		lnum = ubifs_next_log_lnum(c, lnum); -	} while (lnum != UBIFS_LOG_LNUM); +	} while (lnum != c->ltail_lnum);  	err = replay_buds(c);  	if (err) diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index c3fa6c5327a..71a197f0f93 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1157,9 +1157,6 @@ static int check_free_space(struct ubifs_info *c)   *   * This function mounts UBIFS file system. Returns zero in case of success and   * a negative error code in case of failure. - * - * Note, the function does not de-allocate resources it it fails half way - * through, and the caller has to do this instead.   */  static int mount_ubifs(struct ubifs_info *c)  { diff --git a/fs/udf/inode.c b/fs/udf/inode.c index fafaad795cd..aa233469b3c 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -1124,14 +1124,17 @@ int udf_setsize(struct inode *inode, loff_t newsize)  				if (err)  					return err;  				down_write(&iinfo->i_data_sem); -			} else +			} else {  				iinfo->i_lenAlloc = newsize; +				goto set_size; +			}  		}  		err = udf_extend_file(inode, newsize);  		if (err) {  			up_write(&iinfo->i_data_sem);  			return err;  		} +set_size:  		truncate_setsize(inode, newsize);  		up_write(&iinfo->i_data_sem);  	} else { diff --git a/fs/udf/super.c b/fs/udf/super.c index dcbf98722af..18fc038a438 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -1344,6 +1344,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,  		udf_err(sb, "error loading logical volume descriptor: "  			"Partition table too long (%u > %lu)\n", table_len,  			sb->s_blocksize - sizeof(*lvd)); +		ret = 1;  		goto out_bh;  	} @@ -1388,8 +1389,10 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,  						UDF_ID_SPARABLE,  						strlen(UDF_ID_SPARABLE))) {  				if (udf_load_sparable_map(sb, map, -				    (struct sparablePartitionMap *)gpm) < 0) +				    (struct sparablePartitionMap *)gpm) < 0) { +					ret = 1;  					goto out_bh; +				}  			} else if (!strncmp(upm2->partIdent.ident,  						UDF_ID_METADATA,  						strlen(UDF_ID_METADATA))) { @@ -2000,6 +2003,8 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)  			if (!silent)  				pr_notice("Rescanning with blocksize %d\n",  					  UDF_DEFAULT_BLOCKSIZE); +			brelse(sbi->s_lvid_bh); +			sbi->s_lvid_bh = NULL;  			uopt.blocksize = UDF_DEFAULT_BLOCKSIZE;  			ret = udf_load_vrs(sb, &uopt, silent, &fileset);  		} diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c index f9c3fe304a1..69cf4fcde03 100644 --- a/fs/xfs/xfs_discard.c +++ b/fs/xfs/xfs_discard.c @@ -179,12 +179,14 @@ xfs_ioc_trim(  	 * used by the fstrim application.  In the end it really doesn't  	 * matter as trimming blocks is an advisory interface.  	 */ +	if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) || +	    range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp))) +		return -XFS_ERROR(EINVAL); +  	start = BTOBB(range.start);  	end = start + BTOBBT(range.len) - 1;  	minlen = BTOBB(max_t(u64, granularity, range.minlen)); -	if (XFS_BB_TO_FSB(mp, start) >= mp->m_sb.sb_dblocks) -		return -XFS_ERROR(EINVAL);  	if (end > XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1)  		end = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)- 1; diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index 21e37b55f7e..5aceb3f8ecd 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -962,23 +962,22 @@ xfs_dialloc(  		if (!pag->pagi_freecount && !okalloc)  			goto nextag; +		/* +		 * Then read in the AGI buffer and recheck with the AGI buffer +		 * lock held. +		 */  		error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);  		if (error)  			goto out_error; -		/* -		 * Once the AGI has been read in we have to recheck -		 * pagi_freecount with the AGI buffer lock held. -		 */  		if (pag->pagi_freecount) {  			xfs_perag_put(pag);  			goto out_alloc;  		} -		if (!okalloc) { -			xfs_trans_brelse(tp, agbp); -			goto nextag; -		} +		if (!okalloc) +			goto nextag_relse_buffer; +  		error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced);  		if (error) { @@ -1007,6 +1006,8 @@ xfs_dialloc(  			return 0;  		} +nextag_relse_buffer: +		xfs_trans_brelse(tp, agbp);  nextag:  		xfs_perag_put(pag);  		if (++agno == mp->m_sb.sb_agcount) diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index 92d4331cd4f..ca28a4ba4b5 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c @@ -857,7 +857,7 @@ xfs_rtbuf_get(  	xfs_buf_t	*bp;		/* block buffer, result */  	xfs_inode_t	*ip;		/* bitmap or summary inode */  	xfs_bmbt_irec_t	map; -	int		nmap; +	int		nmap = 1;  	int		error;		/* error value */  	ip = issum ? mp->m_rsumip : mp->m_rbmip; diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h index 580a6d35c70..c04e0db8a2d 100644 --- a/include/asm-generic/mutex-xchg.h +++ b/include/asm-generic/mutex-xchg.h @@ -26,7 +26,13 @@ static inline void  __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))  {  	if (unlikely(atomic_xchg(count, 0) != 1)) -		fail_fn(count); +		/* +		 * We failed to acquire the lock, so mark it contended +		 * to ensure that any waiting tasks are woken up by the +		 * unlock slow path. +		 */ +		if (likely(atomic_xchg(count, -1) != 1)) +			fail_fn(count);  }  /** @@ -43,7 +49,8 @@ static inline int  __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))  {  	if (unlikely(atomic_xchg(count, 0) != 1)) -		return fail_fn(count); +		if (likely(atomic_xchg(count, -1) != 1)) +			return fail_fn(count);  	return 0;  } diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index a1a0386e016..617d87ae2b1 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -166,8 +166,6 @@ struct drm_display_mode {  	int crtc_vsync_start;  	int crtc_vsync_end;  	int crtc_vtotal; -	int crtc_hadjusted; -	int crtc_vadjusted;  	/* Driver private mode info */  	int private_size; @@ -216,8 +214,6 @@ struct drm_display_info {  	u32 color_formats;  	u8 cea_rev; - -	char *raw_edid; /* if any */  };  struct drm_framebuffer_funcs { diff --git a/include/drm/drm_sarea.h b/include/drm/drm_sarea.h index ee5389d22c6..1d1a858a203 100644 --- a/include/drm/drm_sarea.h +++ b/include/drm/drm_sarea.h @@ -37,6 +37,8 @@  /* SAREA area needs to be at least a page */  #if defined(__alpha__)  #define SAREA_MAX                       0x2000U +#elif defined(__mips__) +#define SAREA_MAX                       0x4000U  #elif defined(__ia64__)  #define SAREA_MAX                       0x10000U	/* 64kB */  #else diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 4e72a9d4823..4a2ab7c8539 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -601,7 +601,7 @@ static inline void blk_clear_rl_full(struct request_list *rl, bool sync)   * it already be started by driver.   */  #define RQ_NOMERGE_FLAGS	\ -	(REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) +	(REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_DISCARD)  #define rq_mergeable(rq)	\  	(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \  	 (((rq)->cmd_flags & REQ_DISCARD) || \ @@ -894,6 +894,8 @@ extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);  extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);  extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); +extern int blk_bio_map_sg(struct request_queue *q, struct bio *bio, +			  struct scatterlist *sglist);  extern void blk_dump_rq_flags(struct request *, char *);  extern long nr_blockdev_pages(void); @@ -1139,6 +1141,16 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector  		& (lim->discard_granularity - 1);  } +static inline int bdev_discard_alignment(struct block_device *bdev) +{ +	struct request_queue *q = bdev_get_queue(bdev); + +	if (bdev != bdev->bd_contains) +		return bdev->bd_part->discard_alignment; + +	return q->limits.discard_alignment; +} +  static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)  {  	if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 133ddcf8339..ef658147e4e 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -22,7 +22,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,  extern int fragmentation_index(struct zone *zone, unsigned int order);  extern unsigned long try_to_compact_pages(struct zonelist *zonelist,  			int order, gfp_t gfp_mask, nodemask_t *mask, -			bool sync); +			bool sync, bool *contended);  extern int compact_pgdat(pg_data_t *pgdat, int order);  extern unsigned long compaction_suitable(struct zone *zone, int order); @@ -64,7 +64,7 @@ static inline bool compaction_deferred(struct zone *zone, int order)  #else  static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,  			int order, gfp_t gfp_mask, nodemask_t *nodemask, -			bool sync) +			bool sync, bool *contended)  {  	return COMPACT_CONTINUE;  } diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 040b13b5c14..279b1eaa8b7 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -194,6 +194,10 @@ static inline int cpuidle_play_dead(void) {return -ENODEV; }  #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED  void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a); +#else +static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a) +{ +}  #endif  /****************************** diff --git a/include/linux/efi.h b/include/linux/efi.h index 103adc6d7e3..ec45ccd8708 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -503,6 +503,8 @@ extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);  extern int __init efi_uart_console_only (void);  extern void efi_initialize_iomem_resources(struct resource *code_resource,  		struct resource *data_resource, struct resource *bss_resource); +extern unsigned long efi_get_time(void); +extern int efi_set_rtc_mmss(unsigned long nowtime);  extern void efi_reserve_boot_services(void);  extern struct efi_memory_map memmap; diff --git a/include/linux/if_team.h b/include/linux/if_team.h index 6960fc1841a..aa2e167e1ef 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -96,21 +96,6 @@ static inline void team_netpoll_send_skb(struct team_port *port,  }  #endif -static inline int team_dev_queue_xmit(struct team *team, struct team_port *port, -				      struct sk_buff *skb) -{ -	BUILD_BUG_ON(sizeof(skb->queue_mapping) != -		     sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping)); -	skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); - -	skb->dev = port->dev; -	if (unlikely(netpoll_tx_running(port->dev))) { -		team_netpoll_send_skb(port, skb); -		return 0; -	} -	return dev_queue_xmit(skb); -} -  struct team_mode_ops {  	int (*init)(struct team *team);  	void (*exit)(struct team *team); @@ -200,6 +185,21 @@ struct team {  	long mode_priv[TEAM_MODE_PRIV_LONGS];  }; +static inline int team_dev_queue_xmit(struct team *team, struct team_port *port, +				      struct sk_buff *skb) +{ +	BUILD_BUG_ON(sizeof(skb->queue_mapping) != +		     sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping)); +	skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); + +	skb->dev = port->dev; +	if (unlikely(netpoll_tx_running(team->dev))) { +		team_netpoll_send_skb(port, skb); +		return 0; +	} +	return dev_queue_xmit(skb); +} +  static inline struct hlist_head *team_port_index_hash(struct team *team,  						      int port_index)  { diff --git a/include/linux/iio/frequency/adf4350.h b/include/linux/iio/frequency/adf4350.h index b76b4a87065..be91f344d5f 100644 --- a/include/linux/iio/frequency/adf4350.h +++ b/include/linux/iio/frequency/adf4350.h @@ -87,6 +87,8 @@  #define ADF4350_MAX_BANDSEL_CLK		125000 /* Hz */  #define ADF4350_MAX_FREQ_REFIN		250000000 /* Hz */  #define ADF4350_MAX_MODULUS		4095 +#define ADF4350_MAX_R_CNT		1023 +  /**   * struct adf4350_platform_data - platform specific information diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index f334c7fab96..3efc43f3f16 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1125,6 +1125,7 @@ extern int	   jbd2_journal_destroy    (journal_t *);  extern int	   jbd2_journal_recover    (journal_t *journal);  extern int	   jbd2_journal_wipe       (journal_t *, int);  extern int	   jbd2_journal_skip_recovery	(journal_t *); +extern void	   jbd2_journal_update_sb_errno(journal_t *);  extern void	   jbd2_journal_update_sb_log_tail	(journal_t *, tid_t,  				unsigned long, int);  extern void	   __jbd2_journal_abort_hard	(journal_t *); diff --git a/include/linux/kref.h b/include/linux/kref.h index 9c07dcebded..65af6887872 100644 --- a/include/linux/kref.h +++ b/include/linux/kref.h @@ -18,6 +18,7 @@  #include <linux/bug.h>  #include <linux/atomic.h>  #include <linux/kernel.h> +#include <linux/mutex.h>  struct kref {  	atomic_t refcount; @@ -93,4 +94,21 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)  {  	return kref_sub(kref, 1, release);  } + +static inline int kref_put_mutex(struct kref *kref, +				 void (*release)(struct kref *kref), +				 struct mutex *lock) +{ +	WARN_ON(release == NULL); +        if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) { +		mutex_lock(lock); +		if (unlikely(!atomic_dec_and_test(&kref->refcount))) { +			mutex_unlock(lock); +			return 0; +		} +		release(kref); +		return 1; +	} +	return 0; +}  #endif /* _KREF_H_ */ diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 603bec2913b..06177ba10a1 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -58,13 +58,6 @@ union ktime {  typedef union ktime ktime_t;		/* Kill this */ -#define KTIME_MAX			((s64)~((u64)1 << 63)) -#if (BITS_PER_LONG == 64) -# define KTIME_SEC_MAX			(KTIME_MAX / NSEC_PER_SEC) -#else -# define KTIME_SEC_MAX			LONG_MAX -#endif -  /*   * ktime_t definitions when using the 64-bit scalar representation:   */ diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h index 51bf8ada6dc..49258e0ed1c 100644 --- a/include/linux/mv643xx_eth.h +++ b/include/linux/mv643xx_eth.h @@ -15,6 +15,8 @@  #define MV643XX_ETH_SIZE_REG_4		0x2224  #define MV643XX_ETH_BASE_ADDR_ENABLE_REG	0x2290 +#define MV643XX_TX_CSUM_DEFAULT_LIMIT	0 +  struct mv643xx_eth_shared_platform_data {  	struct mbus_dram_target_info	*dram;  	struct platform_device	*shared_smi; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index a9db4f33407..59dc05f3824 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -953,7 +953,8 @@ struct net_device_ops {  #ifdef CONFIG_NET_POLL_CONTROLLER  	void                    (*ndo_poll_controller)(struct net_device *dev);  	int			(*ndo_netpoll_setup)(struct net_device *dev, -						     struct netpoll_info *info); +						     struct netpoll_info *info, +						     gfp_t gfp);  	void			(*ndo_netpoll_cleanup)(struct net_device *dev);  #endif  	int			(*ndo_set_vf_mac)(struct net_device *dev, @@ -1521,6 +1522,8 @@ struct packet_type {  	struct sk_buff		**(*gro_receive)(struct sk_buff **head,  					       struct sk_buff *skb);  	int			(*gro_complete)(struct sk_buff *skb); +	bool			(*id_match)(struct packet_type *ptype, +					    struct sock *sk);  	void			*af_packet_priv;  	struct list_head	list;  }; diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h index 0dfc8b7210a..89f2a627f3f 100644 --- a/include/linux/netfilter/nf_conntrack_sip.h +++ b/include/linux/netfilter/nf_conntrack_sip.h @@ -164,7 +164,7 @@ extern int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr  				      unsigned int dataoff, unsigned int datalen,  				      const char *name,  				      unsigned int *matchoff, unsigned int *matchlen, -				      union nf_inet_addr *addr); +				      union nf_inet_addr *addr, bool delim);  extern int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,  					unsigned int off, unsigned int datalen,  					const char *name, diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 28f5389c924..66d5379c305 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -23,6 +23,7 @@ struct netpoll {  	u8 remote_mac[ETH_ALEN];  	struct list_head rx; /* rx_np list element */ +	struct rcu_head rcu;  };  struct netpoll_info { @@ -38,28 +39,40 @@ struct netpoll_info {  	struct delayed_work tx_work;  	struct netpoll *netpoll; +	struct rcu_head rcu;  };  void netpoll_send_udp(struct netpoll *np, const char *msg, int len);  void netpoll_print_options(struct netpoll *np);  int netpoll_parse_options(struct netpoll *np, char *opt); -int __netpoll_setup(struct netpoll *np, struct net_device *ndev); +int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp);  int netpoll_setup(struct netpoll *np);  int netpoll_trap(void);  void netpoll_set_trap(int trap);  void __netpoll_cleanup(struct netpoll *np); +void __netpoll_free_rcu(struct netpoll *np);  void netpoll_cleanup(struct netpoll *np); -int __netpoll_rx(struct sk_buff *skb); +int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);  void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,  			     struct net_device *dev);  static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)  { +	unsigned long flags; +	local_irq_save(flags);  	netpoll_send_skb_on_dev(np, skb, np->dev); +	local_irq_restore(flags);  }  #ifdef CONFIG_NETPOLL +static inline bool netpoll_rx_on(struct sk_buff *skb) +{ +	struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo); + +	return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags); +} +  static inline bool netpoll_rx(struct sk_buff *skb)  {  	struct netpoll_info *npinfo; @@ -67,14 +80,14 @@ static inline bool netpoll_rx(struct sk_buff *skb)  	bool ret = false;  	local_irq_save(flags); -	npinfo = rcu_dereference_bh(skb->dev->npinfo); -	if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags)) +	if (!netpoll_rx_on(skb))  		goto out; +	npinfo = rcu_dereference_bh(skb->dev->npinfo);  	spin_lock(&npinfo->rx_lock);  	/* check rx_flags again with the lock held */ -	if (npinfo->rx_flags && __netpoll_rx(skb)) +	if (npinfo->rx_flags && __netpoll_rx(skb, npinfo))  		ret = true;  	spin_unlock(&npinfo->rx_lock); @@ -83,13 +96,6 @@ out:  	return ret;  } -static inline int netpoll_rx_on(struct sk_buff *skb) -{ -	struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo); - -	return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags); -} -  static inline int netpoll_receive_skb(struct sk_buff *skb)  {  	if (!list_empty(&skb->dev->napi_list)) @@ -119,7 +125,7 @@ static inline void netpoll_poll_unlock(void *have)  	}  } -static inline int netpoll_tx_running(struct net_device *dev) +static inline bool netpoll_tx_running(struct net_device *dev)  {  	return irqs_disabled();  } @@ -127,11 +133,11 @@ static inline int netpoll_tx_running(struct net_device *dev)  #else  static inline bool netpoll_rx(struct sk_buff *skb)  { -	return 0; +	return false;  } -static inline int netpoll_rx_on(struct sk_buff *skb) +static inline bool netpoll_rx_on(struct sk_buff *skb)  { -	return 0; +	return false;  }  static inline int netpoll_receive_skb(struct sk_buff *skb)  { @@ -147,9 +153,9 @@ static inline void netpoll_poll_unlock(void *have)  static inline void netpoll_netdev_init(struct net_device *dev)  {  } -static inline int netpoll_tx_running(struct net_device *dev) +static inline bool netpoll_tx_running(struct net_device *dev)  { -	return 0; +	return false;  }  #endif diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 880805774f9..92ce5783b70 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -69,6 +69,7 @@ struct nfs_pageio_descriptor {  	const struct nfs_pgio_completion_ops *pg_completion_ops;  	struct pnfs_layout_segment *pg_lseg;  	struct nfs_direct_req	*pg_dreq; +	void			*pg_layout_private;  };  #define NFS_WBACK_BUSY(req)	(test_bit(PG_BUSY,&(req)->wb_flags)) diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 00485e08439..ac7c8ae254f 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1248,6 +1248,7 @@ struct nfs_pgio_header {  	void (*release) (struct nfs_pgio_header *hdr);  	const struct nfs_pgio_completion_ops *completion_ops;  	struct nfs_direct_req	*dreq; +	void			*layout_private;  	spinlock_t		lock;  	/* fields protected by lock */  	int			pnfs_error; diff --git a/include/linux/of.h b/include/linux/of.h index 5919ee33f2b..1b1163225f3 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -190,10 +190,17 @@ extern struct device_node *of_get_parent(const struct device_node *node);  extern struct device_node *of_get_next_parent(struct device_node *node);  extern struct device_node *of_get_next_child(const struct device_node *node,  					     struct device_node *prev); +extern struct device_node *of_get_next_available_child( +	const struct device_node *node, struct device_node *prev); +  #define for_each_child_of_node(parent, child) \  	for (child = of_get_next_child(parent, NULL); child != NULL; \  	     child = of_get_next_child(parent, child)) +#define for_each_available_child_of_node(parent, child) \ +	for (child = of_get_next_available_child(parent, NULL); child != NULL; \ +	     child = of_get_next_available_child(parent, child)) +  static inline int of_get_child_count(const struct device_node *np)  {  	struct device_node *child; diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h index 6dd96fb4548..e9b7f435084 100644 --- a/include/linux/pinctrl/consumer.h +++ b/include/linux/pinctrl/consumer.h @@ -20,6 +20,7 @@  /* This struct is private to the core and should be regarded as a cookie */  struct pinctrl;  struct pinctrl_state; +struct device;  #ifdef CONFIG_PINCTRL diff --git a/include/linux/string.h b/include/linux/string.h index ffe0442e18d..b9178812d9d 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -144,8 +144,8 @@ static inline bool strstarts(const char *str, const char *prefix)  {  	return strncmp(str, prefix, strlen(prefix)) == 0;  } -#endif  extern size_t memweight(const void *ptr, size_t bytes); +#endif /* __KERNEL__ */  #endif /* _LINUX_STRING_H_ */ diff --git a/include/linux/time.h b/include/linux/time.h index c81c5e40fcb..b0bbd8f0130 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -107,11 +107,29 @@ static inline struct timespec timespec_sub(struct timespec lhs,  	return ts_delta;  } +#define KTIME_MAX			((s64)~((u64)1 << 63)) +#if (BITS_PER_LONG == 64) +# define KTIME_SEC_MAX			(KTIME_MAX / NSEC_PER_SEC) +#else +# define KTIME_SEC_MAX			LONG_MAX +#endif +  /*   * Returns true if the timespec is norm, false if denorm:   */ -#define timespec_valid(ts) \ -	(((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC)) +static inline bool timespec_valid(const struct timespec *ts) +{ +	/* Dates before 1970 are bogus */ +	if (ts->tv_sec < 0) +		return false; +	/* Can't have more nanoseconds then a second */ +	if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) +		return false; +	/* Disallow values that could overflow ktime_t */ +	if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) +		return false; +	return true; +}  extern void read_persistent_clock(struct timespec *ts);  extern void read_boot_clock(struct timespec *ts); diff --git a/include/net/llc.h b/include/net/llc.h index 226c846cab0..f2d0fc57052 100644 --- a/include/net/llc.h +++ b/include/net/llc.h @@ -133,7 +133,7 @@ extern int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,  extern void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb);  extern void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb); -extern int llc_station_init(void); +extern void llc_station_init(void);  extern void llc_station_exit(void);  #ifdef CONFIG_PROC_FS diff --git a/include/net/scm.h b/include/net/scm.h index 079d7887dac..7dc0854f0b3 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -70,9 +70,11 @@ static __inline__ void scm_destroy(struct scm_cookie *scm)  }  static __inline__ int scm_send(struct socket *sock, struct msghdr *msg, -			       struct scm_cookie *scm) +			       struct scm_cookie *scm, bool forcecreds)  {  	memset(scm, 0, sizeof(*scm)); +	if (forcecreds) +		scm_set_cred(scm, task_tgid(current), current_cred());  	unix_get_peersec_dgram(sock, scm);  	if (msg->msg_controllen <= 0)  		return 0; diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 62b619e82a9..976a81abe1a 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -292,6 +292,8 @@ struct xfrm_policy_afinfo {  						  struct flowi *fl,  						  int reverse);  	int			(*get_tos)(const struct flowi *fl); +	void			(*init_dst)(struct net *net, +					    struct xfrm_dst *dst);  	int			(*init_path)(struct xfrm_dst *path,  					     struct dst_entry *dst,  					     int nfheader_len); diff --git a/include/sound/pcm.h b/include/sound/pcm.h index c75c0d1a85e..cdca2ab1e71 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -1075,7 +1075,8 @@ static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max)  const char *snd_pcm_format_name(snd_pcm_format_t format);  /** - * Get a string naming the direction of a stream + * snd_pcm_stream_str - Get a string naming the direction of a stream + * @substream: the pcm substream instance   */  static inline const char *snd_pcm_stream_str(struct snd_pcm_substream *substream)  { diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 128ce46fa48..015cea01ae3 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -503,8 +503,6 @@ struct se_cmd {  	u32			se_ordered_id;  	/* Total size in bytes associated with command */  	u32			data_length; -	/* SCSI Presented Data Transfer Length */ -	u32			cmd_spdtl;  	u32			residual_count;  	u32			orig_fe_lun;  	/* Persistent Reservation key */ diff --git a/include/xen/events.h b/include/xen/events.h index 9c641deb65d..04399b28e82 100644 --- a/include/xen/events.h +++ b/include/xen/events.h @@ -58,8 +58,6 @@ void notify_remote_via_irq(int irq);  void xen_irq_resume(void); -void xen_hvm_prepare_kexec(struct shared_info *sip, unsigned long pfn); -  /* Clear an irq's pending state, in preparation for polling on it */  void xen_clear_irq_pending(int irq);  void xen_set_irq_pending(int irq); diff --git a/init/main.c b/init/main.c index e60679de61c..b28673087ac 100644 --- a/init/main.c +++ b/init/main.c @@ -461,10 +461,6 @@ static void __init mm_init(void)  	percpu_init_late();  	pgtable_cache_init();  	vmalloc_init(); -#ifdef CONFIG_X86 -	if (efi_enabled) -		efi_enter_virtual_mode(); -#endif  }  asmlinkage void __init start_kernel(void) @@ -606,6 +602,10 @@ asmlinkage void __init start_kernel(void)  	calibrate_delay();  	pidmap_init();  	anon_vma_init(); +#ifdef CONFIG_X86 +	if (efi_enabled) +		efi_enter_virtual_mode(); +#endif  	thread_info_cache_init();  	cred_init();  	fork_init(totalram_pages); diff --git a/ipc/mqueue.c b/ipc/mqueue.c index f8e54f5b908..9a08acc9e64 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -726,7 +726,6 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir,  			struct mq_attr *attr)  {  	const struct cred *cred = current_cred(); -	struct file *result;  	int ret;  	if (attr) { @@ -748,21 +747,11 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir,  	}  	mode &= ~current_umask(); -	ret = mnt_want_write(path->mnt); -	if (ret) -		return ERR_PTR(ret);  	ret = vfs_create(dir, path->dentry, mode, true);  	path->dentry->d_fsdata = NULL; -	if (!ret) -		result = dentry_open(path, oflag, cred); -	else -		result = ERR_PTR(ret); -	/* -	 * dentry_open() took a persistent mnt_want_write(), -	 * so we can now drop this one. -	 */ -	mnt_drop_write(path->mnt); -	return result; +	if (ret) +		return ERR_PTR(ret); +	return dentry_open(path, oflag, cred);  }  /* Opens existing queue */ @@ -788,7 +777,9 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,  	struct mq_attr attr;  	int fd, error;  	struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; -	struct dentry *root = ipc_ns->mq_mnt->mnt_root; +	struct vfsmount *mnt = ipc_ns->mq_mnt; +	struct dentry *root = mnt->mnt_root; +	int ro;  	if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))  		return -EFAULT; @@ -802,6 +793,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,  	if (fd < 0)  		goto out_putname; +	ro = mnt_want_write(mnt);	/* we'll drop it in any case */  	error = 0;  	mutex_lock(&root->d_inode->i_mutex);  	path.dentry = lookup_one_len(name, root, strlen(name)); @@ -809,7 +801,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,  		error = PTR_ERR(path.dentry);  		goto out_putfd;  	} -	path.mnt = mntget(ipc_ns->mq_mnt); +	path.mnt = mntget(mnt);  	if (oflag & O_CREAT) {  		if (path.dentry->d_inode) {	/* entry already exists */ @@ -820,6 +812,10 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,  			}  			filp = do_open(&path, oflag);  		} else { +			if (ro) { +				error = ro; +				goto out; +			}  			filp = do_create(ipc_ns, root->d_inode,  						&path, oflag, mode,  						u_attr ? &attr : NULL); @@ -845,6 +841,7 @@ out_putfd:  		fd = error;  	}  	mutex_unlock(&root->d_inode->i_mutex); +	mnt_drop_write(mnt);  out_putname:  	putname(name);  	return fd; @@ -857,40 +854,38 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)  	struct dentry *dentry;  	struct inode *inode = NULL;  	struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; +	struct vfsmount *mnt = ipc_ns->mq_mnt;  	name = getname(u_name);  	if (IS_ERR(name))  		return PTR_ERR(name); -	mutex_lock_nested(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex, -			I_MUTEX_PARENT); -	dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name)); +	err = mnt_want_write(mnt); +	if (err) +		goto out_name; +	mutex_lock_nested(&mnt->mnt_root->d_inode->i_mutex, I_MUTEX_PARENT); +	dentry = lookup_one_len(name, mnt->mnt_root, strlen(name));  	if (IS_ERR(dentry)) {  		err = PTR_ERR(dentry);  		goto out_unlock;  	} -	if (!dentry->d_inode) { -		err = -ENOENT; -		goto out_err; -	} -  	inode = dentry->d_inode; -	if (inode) +	if (!inode) { +		err = -ENOENT; +	} else {  		ihold(inode); -	err = mnt_want_write(ipc_ns->mq_mnt); -	if (err) -		goto out_err; -	err = vfs_unlink(dentry->d_parent->d_inode, dentry); -	mnt_drop_write(ipc_ns->mq_mnt); -out_err: +		err = vfs_unlink(dentry->d_parent->d_inode, dentry); +	}  	dput(dentry);  out_unlock: -	mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); -	putname(name); +	mutex_unlock(&mnt->mnt_root->d_inode->i_mutex);  	if (inode)  		iput(inode); +	mnt_drop_write(mnt); +out_name: +	putname(name);  	return err;  } diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 3a5ca582ba1..ed206fd88cc 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -250,7 +250,6 @@ static void untag_chunk(struct node *p)  		spin_unlock(&hash_lock);  		spin_unlock(&entry->lock);  		fsnotify_destroy_mark(entry); -		fsnotify_put_mark(entry);  		goto out;  	} @@ -259,7 +258,7 @@ static void untag_chunk(struct node *p)  	fsnotify_duplicate_mark(&new->mark, entry);  	if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) { -		free_chunk(new); +		fsnotify_put_mark(&new->mark);  		goto Fallback;  	} @@ -293,7 +292,7 @@ static void untag_chunk(struct node *p)  	spin_unlock(&hash_lock);  	spin_unlock(&entry->lock);  	fsnotify_destroy_mark(entry); -	fsnotify_put_mark(entry); +	fsnotify_put_mark(&new->mark);	/* drop initial reference */  	goto out;  Fallback: @@ -322,7 +321,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)  	entry = &chunk->mark;  	if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) { -		free_chunk(chunk); +		fsnotify_put_mark(entry);  		return -ENOSPC;  	} @@ -347,6 +346,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)  	insert_hash(chunk);  	spin_unlock(&hash_lock);  	spin_unlock(&entry->lock); +	fsnotify_put_mark(entry);	/* drop initial reference */  	return 0;  } @@ -396,7 +396,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)  	fsnotify_duplicate_mark(chunk_entry, old_entry);  	if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {  		spin_unlock(&old_entry->lock); -		free_chunk(chunk); +		fsnotify_put_mark(chunk_entry);  		fsnotify_put_mark(old_entry);  		return -ENOSPC;  	} @@ -444,8 +444,8 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)  	spin_unlock(&chunk_entry->lock);  	spin_unlock(&old_entry->lock);  	fsnotify_destroy_mark(old_entry); +	fsnotify_put_mark(chunk_entry);	/* drop initial reference */  	fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ -	fsnotify_put_mark(old_entry); /* and kill it */  	return 0;  } @@ -916,7 +916,12 @@ static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify  	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);  	evict_chunk(chunk); -	fsnotify_put_mark(entry); + +	/* +	 * We are guaranteed to have at least one reference to the mark from +	 * either the inode or the caller of fsnotify_destroy_mark(). +	 */ +	BUG_ON(atomic_read(&entry->refcnt) < 1);  }  static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode, diff --git a/kernel/fork.c b/kernel/fork.c index 3bd2280d79f..2c8857e1285 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -455,8 +455,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)  		if (retval)  			goto out; -		if (file && uprobe_mmap(tmp)) -			goto out; +		if (file) +			uprobe_mmap(tmp);  	}  	/* a new mm has just been created */  	arch_dup_mmap(oldmm, mm); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 82ad284f823..fbf1fd098dc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3142,6 +3142,20 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)  # define nsecs_to_cputime(__nsecs)	nsecs_to_jiffies(__nsecs)  #endif +static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total) +{ +	u64 temp = (__force u64) rtime; + +	temp *= (__force u64) utime; + +	if (sizeof(cputime_t) == 4) +		temp = div_u64(temp, (__force u32) total); +	else +		temp = div64_u64(temp, (__force u64) total); + +	return (__force cputime_t) temp; +} +  void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)  {  	cputime_t rtime, utime = p->utime, total = utime + p->stime; @@ -3151,13 +3165,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)  	 */  	rtime = nsecs_to_cputime(p->se.sum_exec_runtime); -	if (total) { -		u64 temp = (__force u64) rtime; - -		temp *= (__force u64) utime; -		do_div(temp, (__force u32) total); -		utime = (__force cputime_t) temp; -	} else +	if (total) +		utime = scale_utime(utime, rtime, total); +	else  		utime = rtime;  	/* @@ -3184,13 +3194,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)  	total = cputime.utime + cputime.stime;  	rtime = nsecs_to_cputime(cputime.sum_exec_runtime); -	if (total) { -		u64 temp = (__force u64) rtime; - -		temp *= (__force u64) cputime.utime; -		do_div(temp, (__force u32) total); -		utime = (__force cputime_t) temp; -	} else +	if (total) +		utime = scale_utime(cputime.utime, rtime, total); +	else  		utime = rtime;  	sig->prev_utime = max(sig->prev_utime, utime); @@ -7246,6 +7252,7 @@ int in_sched_functions(unsigned long addr)  #ifdef CONFIG_CGROUP_SCHED  struct task_group root_task_group; +LIST_HEAD(task_groups);  #endif  DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d0cc03b3e70..c219bf8d704 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3387,6 +3387,14 @@ static int tg_load_down(struct task_group *tg, void *data)  static void update_h_load(long cpu)  { +	struct rq *rq = cpu_rq(cpu); +	unsigned long now = jiffies; + +	if (rq->h_load_throttle == now) +		return; + +	rq->h_load_throttle = now; +  	rcu_read_lock();  	walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);  	rcu_read_unlock(); @@ -4293,11 +4301,10 @@ redo:  		env.src_rq    = busiest;  		env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running); +		update_h_load(env.src_cpu);  more_balance:  		local_irq_save(flags);  		double_rq_lock(this_rq, busiest); -		if (!env.loop) -			update_h_load(env.src_cpu);  		/*  		 * cur_ld_moved - load moved in current iteration diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 573e1ca0110..944cb68420e 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -788,6 +788,19 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)  	const struct cpumask *span;  	span = sched_rt_period_mask(); +#ifdef CONFIG_RT_GROUP_SCHED +	/* +	 * FIXME: isolated CPUs should really leave the root task group, +	 * whether they are isolcpus or were isolated via cpusets, lest +	 * the timer run on a CPU which does not service all runqueues, +	 * potentially leaving other CPUs indefinitely throttled.  If +	 * isolation is really required, the user will turn the throttle +	 * off to kill the perturbations it causes anyway.  Meanwhile, +	 * this maintains functionality for boot and/or troubleshooting. +	 */ +	if (rt_b == &root_task_group.rt_bandwidth) +		span = cpu_online_mask; +#endif  	for_each_cpu(i, span) {  		int enqueue = 0;  		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c35a1a7dd4d..f6714d009e7 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -80,7 +80,7 @@ extern struct mutex sched_domains_mutex;  struct cfs_rq;  struct rt_rq; -static LIST_HEAD(task_groups); +extern struct list_head task_groups;  struct cfs_bandwidth {  #ifdef CONFIG_CFS_BANDWIDTH @@ -374,7 +374,11 @@ struct rq {  #ifdef CONFIG_FAIR_GROUP_SCHED  	/* list of leaf cfs_rq on this cpu: */  	struct list_head leaf_cfs_rq_list; -#endif +#ifdef CONFIG_SMP +	unsigned long h_load_throttle; +#endif /* CONFIG_SMP */ +#endif /* CONFIG_FAIR_GROUP_SCHED */ +  #ifdef CONFIG_RT_GROUP_SCHED  	struct list_head leaf_rt_rq_list;  #endif diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index 7b386e86fd2..da5eb5bed84 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -27,8 +27,10 @@ static struct task_struct *pick_next_task_stop(struct rq *rq)  {  	struct task_struct *stop = rq->stop; -	if (stop && stop->on_rq) +	if (stop && stop->on_rq) { +		stop->se.exec_start = rq->clock_task;  		return stop; +	}  	return NULL;  } @@ -52,6 +54,21 @@ static void yield_task_stop(struct rq *rq)  static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)  { +	struct task_struct *curr = rq->curr; +	u64 delta_exec; + +	delta_exec = rq->clock_task - curr->se.exec_start; +	if (unlikely((s64)delta_exec < 0)) +		delta_exec = 0; + +	schedstat_set(curr->se.statistics.exec_max, +			max(curr->se.statistics.exec_max, delta_exec)); + +	curr->se.sum_exec_runtime += delta_exec; +	account_group_exec_runtime(curr, delta_exec); + +	curr->se.exec_start = rq->clock_task; +	cpuacct_charge(curr, delta_exec);  }  static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) @@ -60,6 +77,9 @@ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)  static void set_curr_task_stop(struct rq *rq)  { +	struct task_struct *stop = rq->stop; + +	stop->se.exec_start = rq->clock_task;  }  static void switched_to_stop(struct rq *rq, struct task_struct *p) diff --git a/kernel/task_work.c b/kernel/task_work.c index 91d4e1742a0..d320d44903b 100644 --- a/kernel/task_work.c +++ b/kernel/task_work.c @@ -75,6 +75,7 @@ void task_work_run(void)  			p = q->next;  			q->func(q);  			q = p; +			cond_resched();  		}  	}  } diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index e16af197a2b..0c1485e42be 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -115,6 +115,7 @@ static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)  {  	tk->xtime_sec += ts->tv_sec;  	tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift; +	tk_normalize_xtime(tk);  }  static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm) @@ -276,7 +277,7 @@ static void timekeeping_forward_now(struct timekeeper *tk)  	tk->xtime_nsec += cycle_delta * tk->mult;  	/* If arch requires, add in gettimeoffset() */ -	tk->xtime_nsec += arch_gettimeoffset() << tk->shift; +	tk->xtime_nsec += (u64)arch_gettimeoffset() << tk->shift;  	tk_normalize_xtime(tk); @@ -427,7 +428,7 @@ int do_settimeofday(const struct timespec *tv)  	struct timespec ts_delta, xt;  	unsigned long flags; -	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) +	if (!timespec_valid(tv))  		return -EINVAL;  	write_seqlock_irqsave(&tk->lock, flags); @@ -463,6 +464,8 @@ int timekeeping_inject_offset(struct timespec *ts)  {  	struct timekeeper *tk = &timekeeper;  	unsigned long flags; +	struct timespec tmp; +	int ret = 0;  	if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)  		return -EINVAL; @@ -471,10 +474,17 @@ int timekeeping_inject_offset(struct timespec *ts)  	timekeeping_forward_now(tk); +	/* Make sure the proposed value is valid */ +	tmp = timespec_add(tk_xtime(tk),  *ts); +	if (!timespec_valid(&tmp)) { +		ret = -EINVAL; +		goto error; +	}  	tk_xtime_add(tk, ts);  	tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts)); +error: /* even if we error out, we forwarded the time, so call update */  	timekeeping_update(tk, true);  	write_sequnlock_irqrestore(&tk->lock, flags); @@ -482,7 +492,7 @@ int timekeeping_inject_offset(struct timespec *ts)  	/* signal hrtimers about time change */  	clock_was_set(); -	return 0; +	return ret;  }  EXPORT_SYMBOL(timekeeping_inject_offset); @@ -649,7 +659,20 @@ void __init timekeeping_init(void)  	struct timespec now, boot, tmp;  	read_persistent_clock(&now); +	if (!timespec_valid(&now)) { +		pr_warn("WARNING: Persistent clock returned invalid value!\n" +			"         Check your CMOS/BIOS settings.\n"); +		now.tv_sec = 0; +		now.tv_nsec = 0; +	} +  	read_boot_clock(&boot); +	if (!timespec_valid(&boot)) { +		pr_warn("WARNING: Boot clock returned invalid value!\n" +			"         Check your CMOS/BIOS settings.\n"); +		boot.tv_sec = 0; +		boot.tv_nsec = 0; +	}  	seqlock_init(&tk->lock); @@ -1129,6 +1152,10 @@ static void update_wall_time(void)  	offset = (clock->read(clock) - clock->cycle_last) & clock->mask;  #endif +	/* Check if there's really nothing to do */ +	if (offset < tk->cycle_interval) +		goto out; +  	/*  	 * With NO_HZ we may have to accumulate many cycle_intervals  	 * (think "ticks") worth of time at once. To do this efficiently, @@ -1161,9 +1188,9 @@ static void update_wall_time(void)  	* the vsyscall implementations are converted to use xtime_nsec  	* (shifted nanoseconds), this can be killed.  	*/ -	remainder = tk->xtime_nsec & ((1 << tk->shift) - 1); +	remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);  	tk->xtime_nsec -= remainder; -	tk->xtime_nsec += 1 << tk->shift; +	tk->xtime_nsec += 1ULL << tk->shift;  	tk->ntp_error += remainder << tk->ntp_error_shift;  	/* diff --git a/kernel/timer.c b/kernel/timer.c index a61c09374eb..8c5e7b908c6 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1407,13 +1407,6 @@ SYSCALL_DEFINE1(alarm, unsigned int, seconds)  #endif -#ifndef __alpha__ - -/* - * The Alpha uses getxpid, getxuid, and getxgid instead.  Maybe this - * should be moved into arch/i386 instead? - */ -  /**   * sys_getpid - return the thread group id of the current process   * @@ -1469,8 +1462,6 @@ SYSCALL_DEFINE0(getegid)  	return from_kgid_munged(current_user_ns(), current_egid());  } -#endif -  static void process_timeout(unsigned long __data)  {  	wake_up_process((struct task_struct *)__data); diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 60e4d787567..6b245f64c8d 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -506,6 +506,8 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)  	int size;  	syscall_nr = syscall_get_nr(current, regs); +	if (syscall_nr < 0) +		return;  	if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))  		return; @@ -580,6 +582,8 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)  	int size;  	syscall_nr = syscall_get_nr(current, regs); +	if (syscall_nr < 0) +		return;  	if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))  		return; diff --git a/mm/compaction.c b/mm/compaction.c index e78cb968842..7fcd3a52e68 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -51,6 +51,47 @@ static inline bool migrate_async_suitable(int migratetype)  }  /* + * Compaction requires the taking of some coarse locks that are potentially + * very heavily contended. Check if the process needs to be scheduled or + * if the lock is contended. For async compaction, back out in the event + * if contention is severe. For sync compaction, schedule. + * + * Returns true if the lock is held. + * Returns false if the lock is released and compaction should abort + */ +static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags, +				      bool locked, struct compact_control *cc) +{ +	if (need_resched() || spin_is_contended(lock)) { +		if (locked) { +			spin_unlock_irqrestore(lock, *flags); +			locked = false; +		} + +		/* async aborts if taking too long or contended */ +		if (!cc->sync) { +			if (cc->contended) +				*cc->contended = true; +			return false; +		} + +		cond_resched(); +		if (fatal_signal_pending(current)) +			return false; +	} + +	if (!locked) +		spin_lock_irqsave(lock, *flags); +	return true; +} + +static inline bool compact_trylock_irqsave(spinlock_t *lock, +			unsigned long *flags, struct compact_control *cc) +{ +	return compact_checklock_irqsave(lock, flags, false, cc); +} + +/*   * Isolate free pages onto a private freelist. Caller must hold zone->lock.   * If @strict is true, will abort returning 0 on any invalid PFNs or non-free   * pages inside of the pageblock (even though it may still end up isolating @@ -173,7 +214,7 @@ isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)  }  /* Update the number of anon and file isolated pages in the zone */ -static void acct_isolated(struct zone *zone, struct compact_control *cc) +static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)  {  	struct page *page;  	unsigned int count[2] = { 0, }; @@ -181,8 +222,14 @@ static void acct_isolated(struct zone *zone, struct compact_control *cc)  	list_for_each_entry(page, &cc->migratepages, lru)  		count[!!page_is_file_cache(page)]++; -	__mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); -	__mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); +	/* If locked we can use the interrupt unsafe versions */ +	if (locked) { +		__mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); +		__mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); +	} else { +		mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); +		mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); +	}  }  /* Similar to reclaim, but different enough that they don't share logic */ @@ -228,6 +275,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,  	struct list_head *migratelist = &cc->migratepages;  	isolate_mode_t mode = 0;  	struct lruvec *lruvec; +	unsigned long flags; +	bool locked;  	/*  	 * Ensure that there are not too many pages isolated from the LRU @@ -247,25 +296,22 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,  	/* Time to isolate some pages for migration */  	cond_resched(); -	spin_lock_irq(&zone->lru_lock); +	spin_lock_irqsave(&zone->lru_lock, flags); +	locked = true;  	for (; low_pfn < end_pfn; low_pfn++) {  		struct page *page; -		bool locked = true;  		/* give a chance to irqs before checking need_resched() */  		if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) { -			spin_unlock_irq(&zone->lru_lock); +			spin_unlock_irqrestore(&zone->lru_lock, flags);  			locked = false;  		} -		if (need_resched() || spin_is_contended(&zone->lru_lock)) { -			if (locked) -				spin_unlock_irq(&zone->lru_lock); -			cond_resched(); -			spin_lock_irq(&zone->lru_lock); -			if (fatal_signal_pending(current)) -				break; -		} else if (!locked) -			spin_lock_irq(&zone->lru_lock); + +		/* Check if it is ok to still hold the lock */ +		locked = compact_checklock_irqsave(&zone->lru_lock, &flags, +								locked, cc); +		if (!locked) +			break;  		/*  		 * migrate_pfn does not necessarily start aligned to a @@ -349,9 +395,10 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,  		}  	} -	acct_isolated(zone, cc); +	acct_isolated(zone, locked, cc); -	spin_unlock_irq(&zone->lru_lock); +	if (locked) +		spin_unlock_irqrestore(&zone->lru_lock, flags);  	trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); @@ -384,6 +431,20 @@ static bool suitable_migration_target(struct page *page)  }  /* + * Returns the start pfn of the last page block in a zone.  This is the starting + * point for full compaction of a zone.  Compaction searches for free pages from + * the end of each zone, while isolate_freepages_block scans forward inside each + * page block. + */ +static unsigned long start_free_pfn(struct zone *zone) +{ +	unsigned long free_pfn; +	free_pfn = zone->zone_start_pfn + zone->spanned_pages; +	free_pfn &= ~(pageblock_nr_pages-1); +	return free_pfn; +} + +/*   * Based on information in the current compact_control, find blocks   * suitable for isolating free pages from and then isolate them.   */ @@ -422,17 +483,6 @@ static void isolate_freepages(struct zone *zone,  					pfn -= pageblock_nr_pages) {  		unsigned long isolated; -		/* -		 * Skip ahead if another thread is compacting in the area -		 * simultaneously. If we wrapped around, we can only skip -		 * ahead if zone->compact_cached_free_pfn also wrapped to -		 * above our starting point. -		 */ -		if (cc->order > 0 && (!cc->wrapped || -				      zone->compact_cached_free_pfn > -				      cc->start_free_pfn)) -			pfn = min(pfn, zone->compact_cached_free_pfn); -  		if (!pfn_valid(pfn))  			continue; @@ -458,7 +508,16 @@ static void isolate_freepages(struct zone *zone,  		 * are disabled  		 */  		isolated = 0; -		spin_lock_irqsave(&zone->lock, flags); + +		/* +		 * The zone lock must be held to isolate freepages. This +		 * unfortunately this is a very coarse lock and can be +		 * heavily contended if there are parallel allocations +		 * or parallel compactions. For async compaction do not +		 * spin on the lock +		 */ +		if (!compact_trylock_irqsave(&zone->lock, &flags, cc)) +			break;  		if (suitable_migration_target(page)) {  			end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);  			isolated = isolate_freepages_block(pfn, end_pfn, @@ -474,7 +533,15 @@ static void isolate_freepages(struct zone *zone,  		 */  		if (isolated) {  			high_pfn = max(high_pfn, pfn); -			if (cc->order > 0) + +			/* +			 * If the free scanner has wrapped, update +			 * compact_cached_free_pfn to point to the highest +			 * pageblock with free pages. This reduces excessive +			 * scanning of full pageblocks near the end of the +			 * zone +			 */ +			if (cc->order > 0 && cc->wrapped)  				zone->compact_cached_free_pfn = high_pfn;  		}  	} @@ -484,6 +551,11 @@ static void isolate_freepages(struct zone *zone,  	cc->free_pfn = high_pfn;  	cc->nr_freepages = nr_freepages; + +	/* If compact_cached_free_pfn is reset then set it now */ +	if (cc->order > 0 && !cc->wrapped && +			zone->compact_cached_free_pfn == start_free_pfn(zone)) +		zone->compact_cached_free_pfn = high_pfn;  }  /* @@ -570,20 +642,6 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,  	return ISOLATE_SUCCESS;  } -/* - * Returns the start pfn of the last page block in a zone.  This is the starting - * point for full compaction of a zone.  Compaction searches for free pages from - * the end of each zone, while isolate_freepages_block scans forward inside each - * page block. - */ -static unsigned long start_free_pfn(struct zone *zone) -{ -	unsigned long free_pfn; -	free_pfn = zone->zone_start_pfn + zone->spanned_pages; -	free_pfn &= ~(pageblock_nr_pages-1); -	return free_pfn; -} -  static int compact_finished(struct zone *zone,  			    struct compact_control *cc)  { @@ -771,7 +829,7 @@ out:  static unsigned long compact_zone_order(struct zone *zone,  				 int order, gfp_t gfp_mask, -				 bool sync) +				 bool sync, bool *contended)  {  	struct compact_control cc = {  		.nr_freepages = 0, @@ -780,6 +838,7 @@ static unsigned long compact_zone_order(struct zone *zone,  		.migratetype = allocflags_to_migratetype(gfp_mask),  		.zone = zone,  		.sync = sync, +		.contended = contended,  	};  	INIT_LIST_HEAD(&cc.freepages);  	INIT_LIST_HEAD(&cc.migratepages); @@ -801,7 +860,7 @@ int sysctl_extfrag_threshold = 500;   */  unsigned long try_to_compact_pages(struct zonelist *zonelist,  			int order, gfp_t gfp_mask, nodemask_t *nodemask, -			bool sync) +			bool sync, bool *contended)  {  	enum zone_type high_zoneidx = gfp_zone(gfp_mask);  	int may_enter_fs = gfp_mask & __GFP_FS; @@ -825,7 +884,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,  								nodemask) {  		int status; -		status = compact_zone_order(zone, order, gfp_mask, sync); +		status = compact_zone_order(zone, order, gfp_mask, sync, +						contended);  		rc = max(status, rc);  		/* If a normal allocation would succeed, stop compacting */ @@ -861,7 +921,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)  		if (cc->order > 0) {  			int ok = zone_watermark_ok(zone, cc->order,  						low_wmark_pages(zone), 0, 0); -			if (ok && cc->order > zone->compact_order_failed) +			if (ok && cc->order >= zone->compact_order_failed)  				zone->compact_order_failed = cc->order + 1;  			/* Currently async compaction is never deferred. */  			else if (!ok && cc->sync) diff --git a/mm/filemap.c b/mm/filemap.c index fa5ca304148..384344575c3 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1412,12 +1412,8 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,  			retval = filemap_write_and_wait_range(mapping, pos,  					pos + iov_length(iov, nr_segs) - 1);  			if (!retval) { -				struct blk_plug plug; - -				blk_start_plug(&plug);  				retval = mapping->a_ops->direct_IO(READ, iocb,  							iov, pos, nr_segs); -				blk_finish_plug(&plug);  			}  			if (retval > 0) {  				*ppos = pos + retval; @@ -2527,14 +2523,12 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,  {  	struct file *file = iocb->ki_filp;  	struct inode *inode = file->f_mapping->host; -	struct blk_plug plug;  	ssize_t ret;  	BUG_ON(iocb->ki_pos != pos);  	sb_start_write(inode->i_sb);  	mutex_lock(&inode->i_mutex); -	blk_start_plug(&plug);  	ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);  	mutex_unlock(&inode->i_mutex); @@ -2545,7 +2539,6 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,  		if (err < 0 && ret > 0)  			ret = err;  	} -	blk_finish_plug(&plug);  	sb_end_write(inode->i_sb);  	return ret;  } diff --git a/mm/internal.h b/mm/internal.h index 3314f79d775..b8c91b342e2 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -130,6 +130,7 @@ struct compact_control {  	int order;			/* order a direct compactor needs */  	int migratetype;		/* MOVABLE, RECLAIMABLE etc */  	struct zone *zone; +	bool *contended;		/* True if a lock was contended */  };  unsigned long diff --git a/mm/mmap.c b/mm/mmap.c index e3e86914f11..ae18a48e7e4 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1356,9 +1356,8 @@ out:  	} else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))  		make_pages_present(addr, addr + len); -	if (file && uprobe_mmap(vma)) -		/* matching probes but cannot insert */ -		goto unmap_and_free_vma; +	if (file) +		uprobe_mmap(vma);  	return addr; @@ -2309,7 +2308,7 @@ void exit_mmap(struct mm_struct *mm)  	}  	vm_unacct_memory(nr_accounted); -	BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); +	WARN_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);  }  /* Insert vm structure into process list sorted by address diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 009ac285fea..c66fb875104 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1928,6 +1928,17 @@ this_zone_full:  		zlc_active = 0;  		goto zonelist_scan;  	} + +	if (page) +		/* +		 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was +		 * necessary to allocate the page. The expectation is +		 * that the caller is taking steps that will free more +		 * memory. The caller should avoid the page being used +		 * for !PFMEMALLOC purposes. +		 */ +		page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS); +  	return page;  } @@ -2091,7 +2102,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,  	struct zonelist *zonelist, enum zone_type high_zoneidx,  	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,  	int migratetype, bool sync_migration, -	bool *deferred_compaction, +	bool *contended_compaction, bool *deferred_compaction,  	unsigned long *did_some_progress)  {  	struct page *page; @@ -2106,7 +2117,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,  	current->flags |= PF_MEMALLOC;  	*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, -						nodemask, sync_migration); +						nodemask, sync_migration, +						contended_compaction);  	current->flags &= ~PF_MEMALLOC;  	if (*did_some_progress != COMPACT_SKIPPED) { @@ -2152,7 +2164,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,  	struct zonelist *zonelist, enum zone_type high_zoneidx,  	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,  	int migratetype, bool sync_migration, -	bool *deferred_compaction, +	bool *contended_compaction, bool *deferred_compaction,  	unsigned long *did_some_progress)  {  	return NULL; @@ -2325,6 +2337,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,  	unsigned long did_some_progress;  	bool sync_migration = false;  	bool deferred_compaction = false; +	bool contended_compaction = false;  	/*  	 * In the slowpath, we sanity check order to avoid ever trying to @@ -2389,14 +2402,6 @@ rebalance:  				zonelist, high_zoneidx, nodemask,  				preferred_zone, migratetype);  		if (page) { -			/* -			 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was -			 * necessary to allocate the page. The expectation is -			 * that the caller is taking steps that will free more -			 * memory. The caller should avoid the page being used -			 * for !PFMEMALLOC purposes. -			 */ -			page->pfmemalloc = true;  			goto got_pg;  		}  	} @@ -2422,6 +2427,7 @@ rebalance:  					nodemask,  					alloc_flags, preferred_zone,  					migratetype, sync_migration, +					&contended_compaction,  					&deferred_compaction,  					&did_some_progress);  	if (page) @@ -2431,10 +2437,11 @@ rebalance:  	/*  	 * If compaction is deferred for high-order allocations, it is because  	 * sync compaction recently failed. In this is the case and the caller -	 * has requested the system not be heavily disrupted, fail the -	 * allocation now instead of entering direct reclaim +	 * requested a movable allocation that does not heavily disrupt the +	 * system then fail the allocation instead of entering direct reclaim.  	 */ -	if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD)) +	if ((deferred_compaction || contended_compaction) && +						(gfp_mask & __GFP_NO_KSWAPD))  		goto nopage;  	/* Try direct reclaim and then allocating */ @@ -2505,6 +2512,7 @@ rebalance:  					nodemask,  					alloc_flags, preferred_zone,  					migratetype, sync_migration, +					&contended_compaction,  					&deferred_compaction,  					&did_some_progress);  		if (page) @@ -2569,8 +2577,6 @@ retry_cpuset:  		page = __alloc_pages_slowpath(gfp_mask, order,  				zonelist, high_zoneidx, nodemask,  				preferred_zone, migratetype); -	else -		page->pfmemalloc = false;  	trace_mm_page_alloc(page, order, gfp_mask, migratetype); diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 73a2a83ee2d..402442402af 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -137,9 +137,21 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,  	return rc;  } +static inline netdev_tx_t vlan_netpoll_send_skb(struct vlan_dev_priv *vlan, struct sk_buff *skb) +{ +#ifdef CONFIG_NET_POLL_CONTROLLER +	if (vlan->netpoll) +		netpoll_send_skb(vlan->netpoll, skb); +#else +	BUG(); +#endif +	return NETDEV_TX_OK; +} +  static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,  					    struct net_device *dev)  { +	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);  	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);  	unsigned int len;  	int ret; @@ -150,29 +162,30 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,  	 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...  	 */  	if (veth->h_vlan_proto != htons(ETH_P_8021Q) || -	    vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR) { +	    vlan->flags & VLAN_FLAG_REORDER_HDR) {  		u16 vlan_tci; -		vlan_tci = vlan_dev_priv(dev)->vlan_id; +		vlan_tci = vlan->vlan_id;  		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);  		skb = __vlan_hwaccel_put_tag(skb, vlan_tci);  	} -	skb->dev = vlan_dev_priv(dev)->real_dev; +	skb->dev = vlan->real_dev;  	len = skb->len; -	if (netpoll_tx_running(dev)) -		return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev); +	if (unlikely(netpoll_tx_running(dev))) +		return vlan_netpoll_send_skb(vlan, skb); +  	ret = dev_queue_xmit(skb);  	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {  		struct vlan_pcpu_stats *stats; -		stats = this_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats); +		stats = this_cpu_ptr(vlan->vlan_pcpu_stats);  		u64_stats_update_begin(&stats->syncp);  		stats->tx_packets++;  		stats->tx_bytes += len;  		u64_stats_update_end(&stats->syncp);  	} else { -		this_cpu_inc(vlan_dev_priv(dev)->vlan_pcpu_stats->tx_dropped); +		this_cpu_inc(vlan->vlan_pcpu_stats->tx_dropped);  	}  	return ret; @@ -669,25 +682,26 @@ static void vlan_dev_poll_controller(struct net_device *dev)  	return;  } -static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo) +static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo, +				  gfp_t gfp)  { -	struct vlan_dev_priv *info = vlan_dev_priv(dev); -	struct net_device *real_dev = info->real_dev; +	struct vlan_dev_priv *vlan = vlan_dev_priv(dev); +	struct net_device *real_dev = vlan->real_dev;  	struct netpoll *netpoll;  	int err = 0; -	netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); +	netpoll = kzalloc(sizeof(*netpoll), gfp);  	err = -ENOMEM;  	if (!netpoll)  		goto out; -	err = __netpoll_setup(netpoll, real_dev); +	err = __netpoll_setup(netpoll, real_dev, gfp);  	if (err) {  		kfree(netpoll);  		goto out;  	} -	info->netpoll = netpoll; +	vlan->netpoll = netpoll;  out:  	return err; @@ -695,19 +709,15 @@ out:  static void vlan_dev_netpoll_cleanup(struct net_device *dev)  { -	struct vlan_dev_priv *info = vlan_dev_priv(dev); -	struct netpoll *netpoll = info->netpoll; +	struct vlan_dev_priv *vlan= vlan_dev_priv(dev); +	struct netpoll *netpoll = vlan->netpoll;  	if (!netpoll)  		return; -	info->netpoll = NULL; - -        /* Wait for transmitting packets to finish before freeing. */ -        synchronize_rcu_bh(); +	vlan->netpoll = NULL; -        __netpoll_cleanup(netpoll); -        kfree(netpoll); +	__netpoll_free_rcu(netpoll);  }  #endif /* CONFIG_NET_POLL_CONTROLLER */ diff --git a/net/atm/common.c b/net/atm/common.c index b4b44dbed64..0c0ad930a63 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -812,6 +812,7 @@ int vcc_getsockopt(struct socket *sock, int level, int optname,  		if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))  			return -ENOTCONN; +		memset(&pvc, 0, sizeof(pvc));  		pvc.sap_family = AF_ATMPVC;  		pvc.sap_addr.itf = vcc->dev->number;  		pvc.sap_addr.vpi = vcc->vpi; diff --git a/net/atm/pvc.c b/net/atm/pvc.c index 3a734919c36..ae032402140 100644 --- a/net/atm/pvc.c +++ b/net/atm/pvc.c @@ -95,6 +95,7 @@ static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr,  		return -ENOTCONN;  	*sockaddr_len = sizeof(struct sockaddr_atmpvc);  	addr = (struct sockaddr_atmpvc *)sockaddr; +	memset(addr, 0, sizeof(*addr));  	addr->sap_family = AF_ATMPVC;  	addr->sap_addr.itf = vcc->dev->number;  	addr->sap_addr.vpi = vcc->vpi; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 41ff978a33f..715d7e33fba 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1365,6 +1365,9 @@ static bool hci_resolve_next_name(struct hci_dev *hdev)  		return false;  	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); +	if (!e) +		return false; +  	if (hci_resolve_name(hdev, e) == 0) {  		e->name_state = NAME_PENDING;  		return true; @@ -1393,12 +1396,20 @@ static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,  		return;  	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); -	if (e) { +	/* If the device was not found in a list of found devices names of which +	 * are pending. there is no need to continue resolving a next name as it +	 * will be done upon receiving another Remote Name Request Complete +	 * Event */ +	if (!e) +		return; + +	list_del(&e->list); +	if (name) {  		e->name_state = NAME_KNOWN; -		list_del(&e->list); -		if (name) -			mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, -					 e->data.rssi, name, name_len); +		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, +				 e->data.rssi, name, name_len); +	} else { +		e->name_state = NAME_NOT_KNOWN;  	}  	if (hci_resolve_next_name(hdev)) @@ -1762,7 +1773,12 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)  		if (conn->type == ACL_LINK) {  			conn->state = BT_CONFIG;  			hci_conn_hold(conn); -			conn->disc_timeout = HCI_DISCONN_TIMEOUT; + +			if (!conn->out && !hci_conn_ssp_enabled(conn) && +			    !hci_find_link_key(hdev, &ev->bdaddr)) +				conn->disc_timeout = HCI_PAIRING_TIMEOUT; +			else +				conn->disc_timeout = HCI_DISCONN_TIMEOUT;  		} else  			conn->state = BT_CONNECTED; diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index a7f04de03d7..19fdac78e55 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -694,6 +694,7 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,  	*addr_len = sizeof(*haddr);  	haddr->hci_family = AF_BLUETOOTH;  	haddr->hci_dev    = hdev->id; +	haddr->hci_channel= 0;  	release_sock(sk);  	return 0; @@ -1009,6 +1010,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname,  		{  			struct hci_filter *f = &hci_pi(sk)->filter; +			memset(&uf, 0, sizeof(uf));  			uf.type_mask = f->type_mask;  			uf.opcode    = f->opcode;  			uf.event_mask[0] = *((u32 *) f->event_mask + 0); diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index a8964db04bf..daa149b7003 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -1181,6 +1181,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)  	sk = chan->sk;  	hci_conn_hold(conn->hcon); +	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;  	bacpy(&bt_sk(sk)->src, conn->src);  	bacpy(&bt_sk(sk)->dst, conn->dst); diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index a4bb27e8427..1497edd191a 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -245,6 +245,7 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l  	BT_DBG("sock %p, sk %p", sock, sk); +	memset(la, 0, sizeof(struct sockaddr_l2));  	addr->sa_family = AF_BLUETOOTH;  	*len = sizeof(struct sockaddr_l2); @@ -1174,7 +1175,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p  	chan = l2cap_chan_create();  	if (!chan) { -		l2cap_sock_kill(sk); +		sk_free(sk);  		return NULL;  	} diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 7e1e59645c0..1a17850d093 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -528,6 +528,7 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *  	BT_DBG("sock %p, sk %p", sock, sk); +	memset(sa, 0, sizeof(*sa));  	sa->rc_family  = AF_BLUETOOTH;  	sa->rc_channel = rfcomm_pi(sk)->channel;  	if (peer) @@ -822,6 +823,7 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c  		}  		sec.level = rfcomm_pi(sk)->sec_level; +		sec.key_size = 0;  		len = min_t(unsigned int, len, sizeof(sec));  		if (copy_to_user(optval, (char *) &sec, len)) diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index cb960773c00..56f182393c4 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -456,7 +456,7 @@ static int rfcomm_get_dev_list(void __user *arg)  	size = sizeof(*dl) + dev_num * sizeof(*di); -	dl = kmalloc(size, GFP_KERNEL); +	dl = kzalloc(size, GFP_KERNEL);  	if (!dl)  		return -ENOMEM; diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 40bbe25dcff..3589e21edb0 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -131,6 +131,15 @@ static int sco_conn_del(struct hci_conn *hcon, int err)  		sco_sock_clear_timer(sk);  		sco_chan_del(sk, err);  		bh_unlock_sock(sk); + +		sco_conn_lock(conn); +		conn->sk = NULL; +		sco_pi(sk)->conn = NULL; +		sco_conn_unlock(conn); + +		if (conn->hcon) +			hci_conn_put(conn->hcon); +  		sco_sock_kill(sk);  	} @@ -821,16 +830,6 @@ static void sco_chan_del(struct sock *sk, int err)  	BT_DBG("sk %p, conn %p, err %d", sk, conn, err); -	if (conn) { -		sco_conn_lock(conn); -		conn->sk = NULL; -		sco_pi(sk)->conn = NULL; -		sco_conn_unlock(conn); - -		if (conn->hcon) -			hci_conn_put(conn->hcon); -	} -  	sk->sk_state = BT_CLOSED;  	sk->sk_err   = err;  	sk->sk_state_change(sk); diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 16ef0dc85a0..901a616c808 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -579,8 +579,11 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)  	if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))  		smp = smp_chan_create(conn); +	else +		smp = conn->smp_chan; -	smp = conn->smp_chan; +	if (!smp) +		return SMP_UNSPECIFIED;  	smp->preq[0] = SMP_CMD_PAIRING_REQ;  	memcpy(&smp->preq[1], req, sizeof(*req)); diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 33348453760..070e8a68cfc 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -31,9 +31,11 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)  	struct net_bridge_mdb_entry *mdst;  	struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); +	rcu_read_lock();  #ifdef CONFIG_BRIDGE_NETFILTER  	if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {  		br_nf_pre_routing_finish_bridge_slow(skb); +		rcu_read_unlock();  		return NETDEV_TX_OK;  	}  #endif @@ -48,7 +50,6 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)  	skb_reset_mac_header(skb);  	skb_pull(skb, ETH_HLEN); -	rcu_read_lock();  	if (is_broadcast_ether_addr(dest))  		br_flood_deliver(br, skb);  	else if (is_multicast_ether_addr(dest)) { @@ -206,24 +207,23 @@ static void br_poll_controller(struct net_device *br_dev)  static void br_netpoll_cleanup(struct net_device *dev)  {  	struct net_bridge *br = netdev_priv(dev); -	struct net_bridge_port *p, *n; +	struct net_bridge_port *p; -	list_for_each_entry_safe(p, n, &br->port_list, list) { +	list_for_each_entry(p, &br->port_list, list)  		br_netpoll_disable(p); -	}  } -static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) +static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, +			    gfp_t gfp)  {  	struct net_bridge *br = netdev_priv(dev); -	struct net_bridge_port *p, *n; +	struct net_bridge_port *p;  	int err = 0; -	list_for_each_entry_safe(p, n, &br->port_list, list) { +	list_for_each_entry(p, &br->port_list, list) {  		if (!p->dev)  			continue; - -		err = br_netpoll_enable(p); +		err = br_netpoll_enable(p, gfp);  		if (err)  			goto fail;  	} @@ -236,17 +236,17 @@ fail:  	goto out;  } -int br_netpoll_enable(struct net_bridge_port *p) +int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)  {  	struct netpoll *np;  	int err = 0; -	np = kzalloc(sizeof(*p->np), GFP_KERNEL); +	np = kzalloc(sizeof(*p->np), gfp);  	err = -ENOMEM;  	if (!np)  		goto out; -	err = __netpoll_setup(np, p->dev); +	err = __netpoll_setup(np, p->dev, gfp);  	if (err) {  		kfree(np);  		goto out; @@ -267,11 +267,7 @@ void br_netpoll_disable(struct net_bridge_port *p)  	p->np = NULL; -	/* Wait for transmitting packets to finish before freeing. */ -	synchronize_rcu_bh(); - -	__netpoll_cleanup(np); -	kfree(np); +	__netpoll_free_rcu(np);  }  #endif diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index e9466d41270..02015a505d2 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -65,7 +65,7 @@ static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)  {  	skb->dev = to->dev; -	if (unlikely(netpoll_tx_running(to->dev))) { +	if (unlikely(netpoll_tx_running(to->br->dev))) {  		if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))  			kfree_skb(skb);  		else { diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index e1144e1617b..1c8fdc3558c 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -361,7 +361,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)  	if (err)  		goto err2; -	if (br_netpoll_info(br) && ((err = br_netpoll_enable(p)))) +	if (br_netpoll_info(br) && ((err = br_netpoll_enable(p, GFP_KERNEL))))  		goto err3;  	err = netdev_set_master(dev, br->dev); @@ -427,6 +427,10 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)  	if (!p || p->br != br)  		return -EINVAL; +	/* Since more than one interface can be attached to a bridge, +	 * there still maybe an alternate path for netconsole to use; +	 * therefore there is no reason for a NETDEV_RELEASE event. +	 */  	del_nbp(p);  	spin_lock_bh(&br->lock); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index a768b2408ed..f507d2af964 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -316,7 +316,7 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p,  		netpoll_send_skb(np, skb);  } -extern int br_netpoll_enable(struct net_bridge_port *p); +extern int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp);  extern void br_netpoll_disable(struct net_bridge_port *p);  #else  static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br) @@ -329,7 +329,7 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p,  {  } -static inline int br_netpoll_enable(struct net_bridge_port *p) +static inline int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)  {  	return 0;  } diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 69771c04ba8..e597733affb 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -94,6 +94,10 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)  	/* check the version of IP */  	ip_version = skb_header_pointer(skb, 0, 1, &buf); +	if (!ip_version) { +		kfree_skb(skb); +		return -EINVAL; +	}  	switch (*ip_version >> 4) {  	case 4: diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index 69e38db28e5..a8020293f34 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -84,7 +84,6 @@ int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)  			return -1;  		}  	} else { -		pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid);  		memcpy(&client->fsid, fsid, sizeof(*fsid));  	}  	return 0; diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c index 54b531a0112..38b5dc1823d 100644 --- a/net/ceph/debugfs.c +++ b/net/ceph/debugfs.c @@ -189,6 +189,9 @@ int ceph_debugfs_client_init(struct ceph_client *client)  	snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid,  		 client->monc.auth->global_id); +	dout("ceph_debugfs_client_init %p %s\n", client, name); + +	BUG_ON(client->debugfs_dir);  	client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir);  	if (!client->debugfs_dir)  		goto out; @@ -234,6 +237,7 @@ out:  void ceph_debugfs_client_cleanup(struct ceph_client *client)  { +	dout("ceph_debugfs_client_cleanup %p\n", client);  	debugfs_remove(client->debugfs_osdmap);  	debugfs_remove(client->debugfs_monmap);  	debugfs_remove(client->osdc.debugfs_file); diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index b9796750034..24c5eea8c45 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -915,7 +915,6 @@ static int prepare_write_connect(struct ceph_connection *con)  	con->out_connect.authorizer_len = auth ?  		cpu_to_le32(auth->authorizer_buf_len) : 0; -	con_out_kvec_reset(con);  	con_out_kvec_add(con, sizeof (con->out_connect),  					&con->out_connect);  	if (auth && auth->authorizer_buf_len) @@ -1557,6 +1556,7 @@ static int process_connect(struct ceph_connection *con)  			return -1;  		}  		con->auth_retry = 1; +		con_out_kvec_reset(con);  		ret = prepare_write_connect(con);  		if (ret < 0)  			return ret; @@ -1577,6 +1577,7 @@ static int process_connect(struct ceph_connection *con)  		       ENTITY_NAME(con->peer_name),  		       ceph_pr_addr(&con->peer_addr.in_addr));  		reset_connection(con); +		con_out_kvec_reset(con);  		ret = prepare_write_connect(con);  		if (ret < 0)  			return ret; @@ -1601,6 +1602,7 @@ static int process_connect(struct ceph_connection *con)  		     le32_to_cpu(con->out_connect.connect_seq),  		     le32_to_cpu(con->in_reply.connect_seq));  		con->connect_seq = le32_to_cpu(con->in_reply.connect_seq); +		con_out_kvec_reset(con);  		ret = prepare_write_connect(con);  		if (ret < 0)  			return ret; @@ -1617,6 +1619,7 @@ static int process_connect(struct ceph_connection *con)  		     le32_to_cpu(con->in_reply.global_seq));  		get_global_seq(con->msgr,  			       le32_to_cpu(con->in_reply.global_seq)); +		con_out_kvec_reset(con);  		ret = prepare_write_connect(con);  		if (ret < 0)  			return ret; @@ -2135,7 +2138,11 @@ more:  		BUG_ON(con->state != CON_STATE_CONNECTING);  		con->state = CON_STATE_NEGOTIATING; -		/* Banner is good, exchange connection info */ +		/* +		 * Received banner is good, exchange connection info. +		 * Do not reset out_kvec, as sending our banner raced +		 * with receiving peer banner after connect completed. +		 */  		ret = prepare_write_connect(con);  		if (ret < 0)  			goto out; diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index 105d533b55f..900ea0f043f 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c @@ -311,6 +311,17 @@ int ceph_monc_open_session(struct ceph_mon_client *monc)  EXPORT_SYMBOL(ceph_monc_open_session);  /* + * We require the fsid and global_id in order to initialize our + * debugfs dir. + */ +static bool have_debugfs_info(struct ceph_mon_client *monc) +{ +	dout("have_debugfs_info fsid %d globalid %lld\n", +	     (int)monc->client->have_fsid, monc->auth->global_id); +	return monc->client->have_fsid && monc->auth->global_id > 0; +} + +/*   * The monitor responds with mount ack indicate mount success.  The   * included client ticket allows the client to talk to MDSs and OSDs.   */ @@ -320,9 +331,12 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,  	struct ceph_client *client = monc->client;  	struct ceph_monmap *monmap = NULL, *old = monc->monmap;  	void *p, *end; +	int had_debugfs_info, init_debugfs = 0;  	mutex_lock(&monc->mutex); +	had_debugfs_info = have_debugfs_info(monc); +  	dout("handle_monmap\n");  	p = msg->front.iov_base;  	end = p + msg->front.iov_len; @@ -344,12 +358,22 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,  	if (!client->have_fsid) {  		client->have_fsid = true; +		if (!had_debugfs_info && have_debugfs_info(monc)) { +			pr_info("client%lld fsid %pU\n", +				ceph_client_id(monc->client), +				&monc->client->fsid); +			init_debugfs = 1; +		}  		mutex_unlock(&monc->mutex); -		/* -		 * do debugfs initialization without mutex to avoid -		 * creating a locking dependency -		 */ -		ceph_debugfs_client_init(client); + +		if (init_debugfs) { +			/* +			 * do debugfs initialization without mutex to avoid +			 * creating a locking dependency +			 */ +			ceph_debugfs_client_init(monc->client); +		} +  		goto out_unlocked;  	}  out: @@ -865,8 +889,10 @@ static void handle_auth_reply(struct ceph_mon_client *monc,  {  	int ret;  	int was_auth = 0; +	int had_debugfs_info, init_debugfs = 0;  	mutex_lock(&monc->mutex); +	had_debugfs_info = have_debugfs_info(monc);  	if (monc->auth->ops)  		was_auth = monc->auth->ops->is_authenticated(monc->auth);  	monc->pending_auth = 0; @@ -889,7 +915,22 @@ static void handle_auth_reply(struct ceph_mon_client *monc,  		__send_subscribe(monc);  		__resend_generic_request(monc);  	} + +	if (!had_debugfs_info && have_debugfs_info(monc)) { +		pr_info("client%lld fsid %pU\n", +			ceph_client_id(monc->client), +			&monc->client->fsid); +		init_debugfs = 1; +	}  	mutex_unlock(&monc->mutex); + +	if (init_debugfs) { +		/* +		 * do debugfs initialization without mutex to avoid +		 * creating a locking dependency +		 */ +		ceph_debugfs_client_init(monc->client); +	}  }  static int __validate_auth(struct ceph_mon_client *monc) diff --git a/net/core/dev.c b/net/core/dev.c index a39354ee143..83988362805 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1642,6 +1642,19 @@ static inline int deliver_skb(struct sk_buff *skb,  	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);  } +static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) +{ +	if (ptype->af_packet_priv == NULL) +		return false; + +	if (ptype->id_match) +		return ptype->id_match(ptype, skb->sk); +	else if ((struct sock *)ptype->af_packet_priv == skb->sk) +		return true; + +	return false; +} +  /*   *	Support routine. Sends outgoing frames to any network   *	taps currently in use. @@ -1659,8 +1672,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)  		 * they originated from - MvS (miquels@drinkel.ow.org)  		 */  		if ((ptype->dev == dev || !ptype->dev) && -		    (ptype->af_packet_priv == NULL || -		     (struct sock *)ptype->af_packet_priv != skb->sk)) { +		    (!skb_loop_sk(ptype, skb))) {  			if (pt_prev) {  				deliver_skb(skb2, pt_prev, skb->dev);  				pt_prev = ptype; @@ -5732,6 +5744,7 @@ EXPORT_SYMBOL(netdev_refcnt_read);  /**   * netdev_wait_allrefs - wait until all references are gone. + * @dev: target net_device   *   * This is called when unregistering network devices.   * diff --git a/net/core/netpoll.c b/net/core/netpoll.c index b4c90e42b44..346b1eb83a1 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -26,6 +26,7 @@  #include <linux/workqueue.h>  #include <linux/slab.h>  #include <linux/export.h> +#include <linux/if_vlan.h>  #include <net/tcp.h>  #include <net/udp.h>  #include <asm/unaligned.h> @@ -54,7 +55,7 @@ static atomic_t trapped;  	 MAX_UDP_CHUNK)  static void zap_completion_queue(void); -static void arp_reply(struct sk_buff *skb); +static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo);  static unsigned int carrier_timeout = 4;  module_param(carrier_timeout, uint, 0644); @@ -167,15 +168,24 @@ static void poll_napi(struct net_device *dev)  	struct napi_struct *napi;  	int budget = 16; +	WARN_ON_ONCE(!irqs_disabled()); +  	list_for_each_entry(napi, &dev->napi_list, dev_list) { +		local_irq_enable();  		if (napi->poll_owner != smp_processor_id() &&  		    spin_trylock(&napi->poll_lock)) { -			budget = poll_one_napi(dev->npinfo, napi, budget); +			rcu_read_lock_bh(); +			budget = poll_one_napi(rcu_dereference_bh(dev->npinfo), +					       napi, budget); +			rcu_read_unlock_bh();  			spin_unlock(&napi->poll_lock); -			if (!budget) +			if (!budget) { +				local_irq_disable();  				break; +			}  		} +		local_irq_disable();  	}  } @@ -185,13 +195,14 @@ static void service_arp_queue(struct netpoll_info *npi)  		struct sk_buff *skb;  		while ((skb = skb_dequeue(&npi->arp_tx))) -			arp_reply(skb); +			netpoll_arp_reply(skb, npi);  	}  }  static void netpoll_poll_dev(struct net_device *dev)  {  	const struct net_device_ops *ops; +	struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);  	if (!dev || !netif_running(dev))  		return; @@ -206,17 +217,18 @@ static void netpoll_poll_dev(struct net_device *dev)  	poll_napi(dev);  	if (dev->flags & IFF_SLAVE) { -		if (dev->npinfo) { +		if (ni) {  			struct net_device *bond_dev = dev->master;  			struct sk_buff *skb; -			while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) { +			struct netpoll_info *bond_ni = rcu_dereference_bh(bond_dev->npinfo); +			while ((skb = skb_dequeue(&ni->arp_tx))) {  				skb->dev = bond_dev; -				skb_queue_tail(&bond_dev->npinfo->arp_tx, skb); +				skb_queue_tail(&bond_ni->arp_tx, skb);  			}  		}  	} -	service_arp_queue(dev->npinfo); +	service_arp_queue(ni);  	zap_completion_queue();  } @@ -302,6 +314,7 @@ static int netpoll_owner_active(struct net_device *dev)  	return 0;  } +/* call with IRQ disabled */  void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,  			     struct net_device *dev)  { @@ -309,8 +322,11 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,  	unsigned long tries;  	const struct net_device_ops *ops = dev->netdev_ops;  	/* It is up to the caller to keep npinfo alive. */ -	struct netpoll_info *npinfo = np->dev->npinfo; +	struct netpoll_info *npinfo; + +	WARN_ON_ONCE(!irqs_disabled()); +	npinfo = rcu_dereference_bh(np->dev->npinfo);  	if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {  		__kfree_skb(skb);  		return; @@ -319,16 +335,22 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,  	/* don't get messages out of order, and no recursion */  	if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {  		struct netdev_queue *txq; -		unsigned long flags;  		txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); -		local_irq_save(flags);  		/* try until next clock tick */  		for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;  		     tries > 0; --tries) {  			if (__netif_tx_trylock(txq)) {  				if (!netif_xmit_stopped(txq)) { +					if (vlan_tx_tag_present(skb) && +					    !(netif_skb_features(skb) & NETIF_F_HW_VLAN_TX)) { +						skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); +						if (unlikely(!skb)) +							break; +						skb->vlan_tci = 0; +					} +  					status = ops->ndo_start_xmit(skb, dev);  					if (status == NETDEV_TX_OK)  						txq_trans_update(txq); @@ -347,10 +369,9 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,  		}  		WARN_ONCE(!irqs_disabled(), -			"netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n", +			"netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",  			dev->name, ops->ndo_start_xmit); -		local_irq_restore(flags);  	}  	if (status != NETDEV_TX_OK) { @@ -423,9 +444,8 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)  }  EXPORT_SYMBOL(netpoll_send_udp); -static void arp_reply(struct sk_buff *skb) +static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo)  { -	struct netpoll_info *npinfo = skb->dev->npinfo;  	struct arphdr *arp;  	unsigned char *arp_ptr;  	int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; @@ -543,13 +563,12 @@ static void arp_reply(struct sk_buff *skb)  	spin_unlock_irqrestore(&npinfo->rx_lock, flags);  } -int __netpoll_rx(struct sk_buff *skb) +int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)  {  	int proto, len, ulen;  	int hits = 0;  	const struct iphdr *iph;  	struct udphdr *uh; -	struct netpoll_info *npinfo = skb->dev->npinfo;  	struct netpoll *np, *tmp;  	if (list_empty(&npinfo->rx_np)) @@ -565,6 +584,12 @@ int __netpoll_rx(struct sk_buff *skb)  		return 1;  	} +	if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { +		skb = vlan_untag(skb); +		if (unlikely(!skb)) +			goto out; +	} +  	proto = ntohs(eth_hdr(skb)->h_proto);  	if (proto != ETH_P_IP)  		goto out; @@ -715,7 +740,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)  }  EXPORT_SYMBOL(netpoll_parse_options); -int __netpoll_setup(struct netpoll *np, struct net_device *ndev) +int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)  {  	struct netpoll_info *npinfo;  	const struct net_device_ops *ops; @@ -734,7 +759,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)  	}  	if (!ndev->npinfo) { -		npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); +		npinfo = kmalloc(sizeof(*npinfo), gfp);  		if (!npinfo) {  			err = -ENOMEM;  			goto out; @@ -752,7 +777,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)  		ops = np->dev->netdev_ops;  		if (ops->ndo_netpoll_setup) { -			err = ops->ndo_netpoll_setup(ndev, npinfo); +			err = ops->ndo_netpoll_setup(ndev, npinfo, gfp);  			if (err)  				goto free_npinfo;  		} @@ -857,7 +882,7 @@ int netpoll_setup(struct netpoll *np)  	refill_skbs();  	rtnl_lock(); -	err = __netpoll_setup(np, ndev); +	err = __netpoll_setup(np, ndev, GFP_KERNEL);  	rtnl_unlock();  	if (err) @@ -878,6 +903,24 @@ static int __init netpoll_init(void)  }  core_initcall(netpoll_init); +static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head) +{ +	struct netpoll_info *npinfo = +			container_of(rcu_head, struct netpoll_info, rcu); + +	skb_queue_purge(&npinfo->arp_tx); +	skb_queue_purge(&npinfo->txq); + +	/* we can't call cancel_delayed_work_sync here, as we are in softirq */ +	cancel_delayed_work(&npinfo->tx_work); + +	/* clean after last, unfinished work */ +	__skb_queue_purge(&npinfo->txq); +	/* now cancel it again */ +	cancel_delayed_work(&npinfo->tx_work); +	kfree(npinfo); +} +  void __netpoll_cleanup(struct netpoll *np)  {  	struct netpoll_info *npinfo; @@ -903,20 +946,24 @@ void __netpoll_cleanup(struct netpoll *np)  			ops->ndo_netpoll_cleanup(np->dev);  		RCU_INIT_POINTER(np->dev->npinfo, NULL); +		call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info); +	} +} +EXPORT_SYMBOL_GPL(__netpoll_cleanup); -		/* avoid racing with NAPI reading npinfo */ -		synchronize_rcu_bh(); +static void rcu_cleanup_netpoll(struct rcu_head *rcu_head) +{ +	struct netpoll *np = container_of(rcu_head, struct netpoll, rcu); -		skb_queue_purge(&npinfo->arp_tx); -		skb_queue_purge(&npinfo->txq); -		cancel_delayed_work_sync(&npinfo->tx_work); +	__netpoll_cleanup(np); +	kfree(np); +} -		/* clean after last, unfinished work */ -		__skb_queue_purge(&npinfo->txq); -		kfree(npinfo); -	} +void __netpoll_free_rcu(struct netpoll *np) +{ +	call_rcu_bh(&np->rcu, rcu_cleanup_netpoll);  } -EXPORT_SYMBOL_GPL(__netpoll_cleanup); +EXPORT_SYMBOL_GPL(__netpoll_free_rcu);  void netpoll_cleanup(struct netpoll *np)  { diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index ed0c0431fcd..c75e3f9d060 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c @@ -101,12 +101,10 @@ static int write_update_netdev_table(struct net_device *dev)  	u32 max_len;  	struct netprio_map *map; -	rtnl_lock();  	max_len = atomic_read(&max_prioidx) + 1;  	map = rtnl_dereference(dev->priomap);  	if (!map || map->priomap_len < max_len)  		ret = extend_netdev_table(dev, max_len); -	rtnl_unlock();  	return ret;  } @@ -256,17 +254,17 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft,  	if (!dev)  		goto out_free_devname; +	rtnl_lock();  	ret = write_update_netdev_table(dev);  	if (ret < 0)  		goto out_put_dev; -	rcu_read_lock(); -	map = rcu_dereference(dev->priomap); +	map = rtnl_dereference(dev->priomap);  	if (map)  		map->priomap[prioidx] = priority; -	rcu_read_unlock();  out_put_dev: +	rtnl_unlock();  	dev_put(dev);  out_free_devname: @@ -277,12 +275,6 @@ out_free_devname:  void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)  {  	struct task_struct *p; -	char *tmp = kzalloc(sizeof(char) * PATH_MAX, GFP_KERNEL); - -	if (!tmp) { -		pr_warn("Unable to attach cgrp due to alloc failure!\n"); -		return; -	}  	cgroup_taskset_for_each(p, cgrp, tset) {  		unsigned int fd; @@ -296,32 +288,24 @@ void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)  			continue;  		} -		rcu_read_lock(); +		spin_lock(&files->file_lock);  		fdt = files_fdtable(files);  		for (fd = 0; fd < fdt->max_fds; fd++) { -			char *path;  			struct file *file;  			struct socket *sock; -			unsigned long s; -			int rv, err = 0; +			int err;  			file = fcheck_files(files, fd);  			if (!file)  				continue; -			path = d_path(&file->f_path, tmp, PAGE_SIZE); -			rv = sscanf(path, "socket:[%lu]", &s); -			if (rv <= 0) -				continue; -  			sock = sock_from_file(file, &err); -			if (!err) +			if (sock)  				sock_update_netprioidx(sock->sk, p);  		} -		rcu_read_unlock(); +		spin_unlock(&files->file_lock);  		task_unlock(p);  	} -	kfree(tmp);  }  static struct cftype ss_files[] = { diff --git a/net/core/scm.c b/net/core/scm.c index 8f6ccfd68ef..040cebeed45 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -265,6 +265,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)  	for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;  	     i++, cmfptr++)  	{ +		struct socket *sock;  		int new_fd;  		err = security_file_receive(fp[i]);  		if (err) @@ -281,6 +282,9 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)  		}  		/* Bump the usage count and install the file. */  		get_file(fp[i]); +		sock = sock_from_file(fp[i], &err); +		if (sock) +			sock_update_netprioidx(sock->sk, current);  		fd_install(new_fd, fp[i]);  	} diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index 75c3582a767..fb85d371a8d 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h @@ -246,7 +246,7 @@ static inline int ccid_hc_rx_getsockopt(struct ccid *ccid, struct sock *sk,  					u32 __user *optval, int __user *optlen)  {  	int rc = -ENOPROTOOPT; -	if (ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL) +	if (ccid != NULL && ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL)  		rc = ccid->ccid_ops->ccid_hc_rx_getsockopt(sk, optname, len,  						 optval, optlen);  	return rc; @@ -257,7 +257,7 @@ static inline int ccid_hc_tx_getsockopt(struct ccid *ccid, struct sock *sk,  					u32 __user *optval, int __user *optlen)  {  	int rc = -ENOPROTOOPT; -	if (ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL) +	if (ccid != NULL && ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL)  		rc = ccid->ccid_ops->ccid_hc_tx_getsockopt(sk, optname, len,  						 optval, optlen);  	return rc; diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index d65e98798ec..119c04317d4 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c @@ -535,6 +535,7 @@ static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,  	case DCCP_SOCKOPT_CCID_TX_INFO:  		if (len < sizeof(tfrc))  			return -EINVAL; +		memset(&tfrc, 0, sizeof(tfrc));  		tfrc.tfrctx_x	   = hc->tx_x;  		tfrc.tfrctx_x_recv = hc->tx_x_recv;  		tfrc.tfrctx_x_calc = hc->tx_x_calc; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index db0cf17c00f..7f75f21d7b8 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -404,12 +404,15 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,  {  	const struct inet_request_sock *ireq = inet_rsk(req);  	struct inet_sock *newinet = inet_sk(newsk); -	struct ip_options_rcu *opt = ireq->opt; +	struct ip_options_rcu *opt;  	struct net *net = sock_net(sk);  	struct flowi4 *fl4;  	struct rtable *rt;  	fl4 = &newinet->cork.fl.u.ip4; + +	rcu_read_lock(); +	opt = rcu_dereference(newinet->inet_opt);  	flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,  			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,  			   sk->sk_protocol, inet_sk_flowi_flags(sk), @@ -421,11 +424,13 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,  		goto no_route;  	if (opt && opt->opt.is_strictroute && rt->rt_gateway)  		goto route_err; +	rcu_read_unlock();  	return &rt->dst;  route_err:  	ip_rt_put(rt);  no_route: +	rcu_read_unlock();  	IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);  	return NULL;  } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 147ccc3e93d..c196d749daf 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1338,10 +1338,10 @@ struct sk_buff *__ip_make_skb(struct sock *sk,  	iph->ihl = 5;  	iph->tos = inet->tos;  	iph->frag_off = df; -	ip_select_ident(iph, &rt->dst, sk);  	iph->ttl = ttl;  	iph->protocol = sk->sk_protocol;  	ip_copy_addrs(iph, fl4); +	ip_select_ident(iph, &rt->dst, sk);  	if (opt) {  		iph->ihl += opt->optlen>>2; diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c index ea4a23813d2..4ad9cf17399 100644 --- a/net/ipv4/netfilter/nf_nat_sip.c +++ b/net/ipv4/netfilter/nf_nat_sip.c @@ -148,7 +148,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,  	if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,  				    hdr, NULL, &matchoff, &matchlen,  				    &addr, &port) > 0) { -		unsigned int matchend, poff, plen, buflen, n; +		unsigned int olen, matchend, poff, plen, buflen, n;  		char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];  		/* We're only interested in headers related to this @@ -163,17 +163,18 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,  				goto next;  		} +		olen = *datalen;  		if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,  			      &addr, port))  			return NF_DROP; -		matchend = matchoff + matchlen; +		matchend = matchoff + matchlen + *datalen - olen;  		/* The maddr= parameter (RFC 2361) specifies where to send  		 * the reply. */  		if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,  					       "maddr=", &poff, &plen, -					       &addr) > 0 && +					       &addr, true) > 0 &&  		    addr.ip == ct->tuplehash[dir].tuple.src.u3.ip &&  		    addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) {  			buflen = sprintf(buffer, "%pI4", @@ -187,7 +188,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,  		 * from which the server received the request. */  		if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,  					       "received=", &poff, &plen, -					       &addr) > 0 && +					       &addr, false) > 0 &&  		    addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip &&  		    addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) {  			buflen = sprintf(buffer, "%pI4", diff --git a/net/ipv4/route.c b/net/ipv4/route.c index e4ba974f143..fd9ecb52c66 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2028,7 +2028,6 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)  		}  		dev_out = net->loopback_dev;  		fl4->flowi4_oif = dev_out->ifindex; -		res.fi = NULL;  		flags |= RTCF_LOCAL;  		goto make_route;  	} diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 76782376401..00a748d1406 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -417,10 +417,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)  		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */  			tp->mtu_info = info; -			if (!sock_owned_by_user(sk)) +			if (!sock_owned_by_user(sk)) {  				tcp_v4_mtu_reduced(sk); -			else -				set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags); +			} else { +				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags)) +					sock_hold(sk); +			}  			goto out;  		} @@ -1462,6 +1464,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,  		goto exit_nonewsk;  	newsk->sk_gso_type = SKB_GSO_TCPV4; +	inet_sk_rx_dst_set(newsk, skb);  	newtp		      = tcp_sk(newsk);  	newinet		      = inet_sk(newsk); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index d9c9dcef2de..6ff7f10dce9 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -387,8 +387,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,  		struct tcp_sock *oldtp = tcp_sk(sk);  		struct tcp_cookie_values *oldcvp = oldtp->cookie_values; -		newicsk->icsk_af_ops->sk_rx_dst_set(newsk, skb); -  		/* TCP Cookie Transactions require space for the cookie pair,  		 * as it differs for each connection.  There is no need to  		 * copy any s_data_payload stored at the original socket. diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 20dfd892c86..d04632673a9 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -910,14 +910,18 @@ void tcp_release_cb(struct sock *sk)  	if (flags & (1UL << TCP_TSQ_DEFERRED))  		tcp_tsq_handler(sk); -	if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) +	if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {  		tcp_write_timer_handler(sk); - -	if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) +		__sock_put(sk); +	} +	if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) {  		tcp_delack_timer_handler(sk); - -	if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) +		__sock_put(sk); +	} +	if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {  		sk->sk_prot->mtu_reduced(sk); +		__sock_put(sk); +	}  }  EXPORT_SYMBOL(tcp_release_cb); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 6df36ad55a3..b774a03bd1d 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -252,7 +252,8 @@ static void tcp_delack_timer(unsigned long data)  		inet_csk(sk)->icsk_ack.blocked = 1;  		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);  		/* deleguate our work to tcp_release_cb() */ -		set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags); +		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags)) +			sock_hold(sk);  	}  	bh_unlock_sock(sk);  	sock_put(sk); @@ -481,7 +482,8 @@ static void tcp_write_timer(unsigned long data)  		tcp_write_timer_handler(sk);  	} else {  		/* deleguate our work to tcp_release_cb() */ -		set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags); +		if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags)) +			sock_hold(sk);  	}  	bh_unlock_sock(sk);  	sock_put(sk); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 79181819a24..6bc85f7c31e 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -494,8 +494,7 @@ static void addrconf_forward_change(struct net *net, __s32 newf)  	struct net_device *dev;  	struct inet6_dev *idev; -	rcu_read_lock(); -	for_each_netdev_rcu(net, dev) { +	for_each_netdev(net, dev) {  		idev = __in6_dev_get(dev);  		if (idev) {  			int changed = (!idev->cnf.forwarding) ^ (!newf); @@ -504,7 +503,6 @@ static void addrconf_forward_change(struct net *net, __s32 newf)  				dev_forward_change(idev);  		}  	} -	rcu_read_unlock();  }  static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf) diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index da2e92d05c1..745a3204295 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -307,10 +307,10 @@ static int __net_init ipv6_proc_init_net(struct net *net)  		goto proc_dev_snmp6_fail;  	return 0; +proc_dev_snmp6_fail: +	proc_net_remove(net, "snmp6");  proc_snmp6_fail:  	proc_net_remove(net, "sockstat6"); -proc_dev_snmp6_fail: -	proc_net_remove(net, "dev_snmp6");  	return -ENOMEM;  } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index bb9ce2b2f37..a3e60cc04a8 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -94,6 +94,18 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,  }  #endif +static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) +{ +	struct dst_entry *dst = skb_dst(skb); +	const struct rt6_info *rt = (const struct rt6_info *)dst; + +	dst_hold(dst); +	sk->sk_rx_dst = dst; +	inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; +	if (rt->rt6i_node) +		inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum; +} +  static void tcp_v6_hash(struct sock *sk)  {  	if (sk->sk_state != TCP_CLOSE) { @@ -1270,6 +1282,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,  	newsk->sk_gso_type = SKB_GSO_TCPV6;  	__ip6_dst_store(newsk, dst, NULL, NULL); +	inet6_sk_rx_dst_set(newsk, skb);  	newtcp6sk = (struct tcp6_sock *)newsk;  	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; @@ -1729,18 +1742,6 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = {  	.twsk_destructor= tcp_twsk_destructor,  }; -static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) -{ -	struct dst_entry *dst = skb_dst(skb); -	const struct rt6_info *rt = (const struct rt6_info *)dst; - -	dst_hold(dst); -	sk->sk_rx_dst = dst; -	inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; -	if (rt->rt6i_node) -		inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum; -} -  static const struct inet_connection_sock_af_ops ipv6_specific = {  	.queue_xmit	   = inet6_csk_xmit,  	.send_check	   = tcp_v6_send_check, diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index ef39812107b..f8c4c08ffb6 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -73,6 +73,13 @@ static int xfrm6_get_tos(const struct flowi *fl)  	return 0;  } +static void xfrm6_init_dst(struct net *net, struct xfrm_dst *xdst) +{ +	struct rt6_info *rt = (struct rt6_info *)xdst; + +	rt6_init_peer(rt, net->ipv6.peers); +} +  static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst,  			   int nfheader_len)  { @@ -286,6 +293,7 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {  	.get_saddr = 		xfrm6_get_saddr,  	.decode_session =	_decode_session6,  	.get_tos =		xfrm6_get_tos, +	.init_dst =		xfrm6_init_dst,  	.init_path =		xfrm6_init_path,  	.fill_dst =		xfrm6_fill_dst,  	.blackhole_route =	ip6_blackhole_route, diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 35e1e4bde58..927547171bc 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -410,6 +410,7 @@ static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,  	lsa->l2tp_family = AF_INET6;  	lsa->l2tp_flowinfo = 0;  	lsa->l2tp_scope_id = 0; +	lsa->l2tp_unused = 0;  	if (peer) {  		if (!lsk->peer_conn_id)  			return -ENOTCONN; diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index f6fe4d40050..c2190005a11 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -969,14 +969,13 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,  	struct sockaddr_llc sllc;  	struct sock *sk = sock->sk;  	struct llc_sock *llc = llc_sk(sk); -	int rc = 0; +	int rc = -EBADF;  	memset(&sllc, 0, sizeof(sllc));  	lock_sock(sk);  	if (sock_flag(sk, SOCK_ZAPPED))  		goto out;  	*uaddrlen = sizeof(sllc); -	memset(uaddr, 0, *uaddrlen);  	if (peer) {  		rc = -ENOTCONN;  		if (sk->sk_state != TCP_ESTABLISHED) @@ -1206,7 +1205,7 @@ static int __init llc2_init(void)  	rc = llc_proc_init();  	if (rc != 0) {  		printk(llc_proc_err_msg); -		goto out_unregister_llc_proto; +		goto out_station;  	}  	rc = llc_sysctl_init();  	if (rc) { @@ -1226,7 +1225,8 @@ out_sysctl:  	llc_sysctl_exit();  out_proc:  	llc_proc_exit(); -out_unregister_llc_proto: +out_station: +	llc_station_exit();  	proto_unregister(&llc_proto);  	goto out;  } diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c index e32cab44ea9..dd3e83328ad 100644 --- a/net/llc/llc_input.c +++ b/net/llc/llc_input.c @@ -42,6 +42,7 @@ static void (*llc_type_handlers[2])(struct llc_sap *sap,  void llc_add_pack(int type, void (*handler)(struct llc_sap *sap,  					    struct sk_buff *skb))  { +	smp_wmb(); /* ensure initialisation is complete before it's called */  	if (type == LLC_DEST_SAP || type == LLC_DEST_CONN)  		llc_type_handlers[type - 1] = handler;  } @@ -50,11 +51,19 @@ void llc_remove_pack(int type)  {  	if (type == LLC_DEST_SAP || type == LLC_DEST_CONN)  		llc_type_handlers[type - 1] = NULL; +	synchronize_net();  }  void llc_set_station_handler(void (*handler)(struct sk_buff *skb))  { +	/* Ensure initialisation is complete before it's called */ +	if (handler) +		smp_wmb(); +  	llc_station_handler = handler; + +	if (!handler) +		synchronize_net();  }  /** @@ -150,6 +159,8 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,  	int dest;  	int (*rcv)(struct sk_buff *, struct net_device *,  		   struct packet_type *, struct net_device *); +	void (*sta_handler)(struct sk_buff *skb); +	void (*sap_handler)(struct llc_sap *sap, struct sk_buff *skb);  	if (!net_eq(dev_net(dev), &init_net))  		goto drop; @@ -182,7 +193,8 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,  	 */  	rcv = rcu_dereference(sap->rcv_func);  	dest = llc_pdu_type(skb); -	if (unlikely(!dest || !llc_type_handlers[dest - 1])) { +	sap_handler = dest ? ACCESS_ONCE(llc_type_handlers[dest - 1]) : NULL; +	if (unlikely(!sap_handler)) {  		if (rcv)  			rcv(skb, dev, pt, orig_dev);  		else @@ -193,7 +205,7 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,  			if (cskb)  				rcv(cskb, dev, pt, orig_dev);  		} -		llc_type_handlers[dest - 1](sap, skb); +		sap_handler(sap, skb);  	}  	llc_sap_put(sap);  out: @@ -202,9 +214,10 @@ drop:  	kfree_skb(skb);  	goto out;  handle_station: -	if (!llc_station_handler) +	sta_handler = ACCESS_ONCE(llc_station_handler); +	if (!sta_handler)  		goto drop; -	llc_station_handler(skb); +	sta_handler(skb);  	goto out;  } diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c index 6828e39ec2e..b2f2bac2c2a 100644 --- a/net/llc/llc_station.c +++ b/net/llc/llc_station.c @@ -687,12 +687,8 @@ static void llc_station_rcv(struct sk_buff *skb)  	llc_station_state_process(skb);  } -int __init llc_station_init(void) +void __init llc_station_init(void)  { -	int rc = -ENOBUFS; -	struct sk_buff *skb; -	struct llc_station_state_ev *ev; -  	skb_queue_head_init(&llc_main_station.mac_pdu_q);  	skb_queue_head_init(&llc_main_station.ev_q.list);  	spin_lock_init(&llc_main_station.ev_q.lock); @@ -700,23 +696,12 @@ int __init llc_station_init(void)  			(unsigned long)&llc_main_station);  	llc_main_station.ack_timer.expires  = jiffies +  						sysctl_llc_station_ack_timeout; -	skb = alloc_skb(0, GFP_ATOMIC); -	if (!skb) -		goto out; -	rc = 0; -	llc_set_station_handler(llc_station_rcv); -	ev = llc_station_ev(skb); -	memset(ev, 0, sizeof(*ev));  	llc_main_station.maximum_retry	= 1; -	llc_main_station.state		= LLC_STATION_STATE_DOWN; -	ev->type	= LLC_STATION_EV_TYPE_SIMPLE; -	ev->prim_type	= LLC_STATION_EV_ENABLE_WITHOUT_DUP_ADDR_CHECK; -	rc = llc_station_next_state(skb); -out: -	return rc; +	llc_main_station.state		= LLC_STATION_STATE_UP; +	llc_set_station_handler(llc_station_rcv);  } -void __exit llc_station_exit(void) +void llc_station_exit(void)  {  	llc_set_station_handler(NULL);  } diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 84444dda194..72bf32a8487 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -2759,6 +2759,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)  	{  		struct ip_vs_timeout_user t; +		memset(&t, 0, sizeof(t));  		__ip_vs_get_timeouts(net, &t);  		if (copy_to_user(user, &t, sizeof(t)) != 0)  			ret = -EFAULT; diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 45cf602a76b..527651a53a4 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -361,23 +361,6 @@ static void evict_oldest_expect(struct nf_conn *master,  	}  } -static inline int refresh_timer(struct nf_conntrack_expect *i) -{ -	struct nf_conn_help *master_help = nfct_help(i->master); -	const struct nf_conntrack_expect_policy *p; - -	if (!del_timer(&i->timeout)) -		return 0; - -	p = &rcu_dereference_protected( -		master_help->helper, -		lockdep_is_held(&nf_conntrack_lock) -		)->expect_policy[i->class]; -	i->timeout.expires = jiffies + p->timeout * HZ; -	add_timer(&i->timeout); -	return 1; -} -  static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)  {  	const struct nf_conntrack_expect_policy *p; @@ -386,7 +369,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)  	struct nf_conn_help *master_help = nfct_help(master);  	struct nf_conntrack_helper *helper;  	struct net *net = nf_ct_exp_net(expect); -	struct hlist_node *n; +	struct hlist_node *n, *next;  	unsigned int h;  	int ret = 1; @@ -395,12 +378,12 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)  		goto out;  	}  	h = nf_ct_expect_dst_hash(&expect->tuple); -	hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) { +	hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) {  		if (expect_matches(i, expect)) { -			/* Refresh timer: if it's dying, ignore.. */ -			if (refresh_timer(i)) { -				ret = 0; -				goto out; +			if (del_timer(&i->timeout)) { +				nf_ct_unlink_expect(i); +				nf_ct_expect_put(i); +				break;  			}  		} else if (expect_clash(i, expect)) {  			ret = -EBUSY; diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 14f67a2cbcb..da4fc37a857 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1896,10 +1896,15 @@ static int  ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct)  {  	struct nlattr *cda[CTA_MAX+1]; +	int ret;  	nla_parse_nested(cda, CTA_MAX, attr, ct_nla_policy); -	return ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct); +	spin_lock_bh(&nf_conntrack_lock); +	ret = ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct); +	spin_unlock_bh(&nf_conntrack_lock); + +	return ret;  }  static struct nfq_ct_hook ctnetlink_nfqueue_hook = { diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 758a1bacc12..5c0a112aeee 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -183,12 +183,12 @@ static int media_len(const struct nf_conn *ct, const char *dptr,  	return len + digits_len(ct, dptr, limit, shift);  } -static int parse_addr(const struct nf_conn *ct, const char *cp, -                      const char **endp, union nf_inet_addr *addr, -                      const char *limit) +static int sip_parse_addr(const struct nf_conn *ct, const char *cp, +			  const char **endp, union nf_inet_addr *addr, +			  const char *limit, bool delim)  {  	const char *end; -	int ret = 0; +	int ret;  	if (!ct)  		return 0; @@ -197,16 +197,28 @@ static int parse_addr(const struct nf_conn *ct, const char *cp,  	switch (nf_ct_l3num(ct)) {  	case AF_INET:  		ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end); +		if (ret == 0) +			return 0;  		break;  	case AF_INET6: +		if (cp < limit && *cp == '[') +			cp++; +		else if (delim) +			return 0; +  		ret = in6_pton(cp, limit - cp, (u8 *)&addr->ip6, -1, &end); +		if (ret == 0) +			return 0; + +		if (end < limit && *end == ']') +			end++; +		else if (delim) +			return 0;  		break;  	default:  		BUG();  	} -	if (ret == 0 || end == cp) -		return 0;  	if (endp)  		*endp = end;  	return 1; @@ -219,7 +231,7 @@ static int epaddr_len(const struct nf_conn *ct, const char *dptr,  	union nf_inet_addr addr;  	const char *aux = dptr; -	if (!parse_addr(ct, dptr, &dptr, &addr, limit)) { +	if (!sip_parse_addr(ct, dptr, &dptr, &addr, limit, true)) {  		pr_debug("ip: %s parse failed.!\n", dptr);  		return 0;  	} @@ -296,7 +308,7 @@ int ct_sip_parse_request(const struct nf_conn *ct,  		return 0;  	dptr += shift; -	if (!parse_addr(ct, dptr, &end, addr, limit)) +	if (!sip_parse_addr(ct, dptr, &end, addr, limit, true))  		return -1;  	if (end < limit && *end == ':') {  		end++; @@ -550,7 +562,7 @@ int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,  	if (ret == 0)  		return ret; -	if (!parse_addr(ct, dptr + *matchoff, &c, addr, limit)) +	if (!sip_parse_addr(ct, dptr + *matchoff, &c, addr, limit, true))  		return -1;  	if (*c == ':') {  		c++; @@ -599,7 +611,7 @@ int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,  			       unsigned int dataoff, unsigned int datalen,  			       const char *name,  			       unsigned int *matchoff, unsigned int *matchlen, -			       union nf_inet_addr *addr) +			       union nf_inet_addr *addr, bool delim)  {  	const char *limit = dptr + datalen;  	const char *start, *end; @@ -613,7 +625,7 @@ int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,  		return 0;  	start += strlen(name); -	if (!parse_addr(ct, start, &end, addr, limit)) +	if (!sip_parse_addr(ct, start, &end, addr, limit, delim))  		return 0;  	*matchoff = start - dptr;  	*matchlen = end - start; @@ -675,6 +687,47 @@ static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr,  	return 1;  } +static int sdp_parse_addr(const struct nf_conn *ct, const char *cp, +			  const char **endp, union nf_inet_addr *addr, +			  const char *limit) +{ +	const char *end; +	int ret; + +	memset(addr, 0, sizeof(*addr)); +	switch (nf_ct_l3num(ct)) { +	case AF_INET: +		ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end); +		break; +	case AF_INET6: +		ret = in6_pton(cp, limit - cp, (u8 *)&addr->ip6, -1, &end); +		break; +	default: +		BUG(); +	} + +	if (ret == 0) +		return 0; +	if (endp) +		*endp = end; +	return 1; +} + +/* skip ip address. returns its length. */ +static int sdp_addr_len(const struct nf_conn *ct, const char *dptr, +			const char *limit, int *shift) +{ +	union nf_inet_addr addr; +	const char *aux = dptr; + +	if (!sdp_parse_addr(ct, dptr, &dptr, &addr, limit)) { +		pr_debug("ip: %s parse failed.!\n", dptr); +		return 0; +	} + +	return dptr - aux; +} +  /* SDP header parsing: a SDP session description contains an ordered set of   * headers, starting with a section containing general session parameters,   * optionally followed by multiple media descriptions. @@ -686,10 +739,10 @@ static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr,   */  static const struct sip_header ct_sdp_hdrs[] = {  	[SDP_HDR_VERSION]		= SDP_HDR("v=", NULL, digits_len), -	[SDP_HDR_OWNER_IP4]		= SDP_HDR("o=", "IN IP4 ", epaddr_len), -	[SDP_HDR_CONNECTION_IP4]	= SDP_HDR("c=", "IN IP4 ", epaddr_len), -	[SDP_HDR_OWNER_IP6]		= SDP_HDR("o=", "IN IP6 ", epaddr_len), -	[SDP_HDR_CONNECTION_IP6]	= SDP_HDR("c=", "IN IP6 ", epaddr_len), +	[SDP_HDR_OWNER_IP4]		= SDP_HDR("o=", "IN IP4 ", sdp_addr_len), +	[SDP_HDR_CONNECTION_IP4]	= SDP_HDR("c=", "IN IP4 ", sdp_addr_len), +	[SDP_HDR_OWNER_IP6]		= SDP_HDR("o=", "IN IP6 ", sdp_addr_len), +	[SDP_HDR_CONNECTION_IP6]	= SDP_HDR("c=", "IN IP6 ", sdp_addr_len),  	[SDP_HDR_MEDIA]			= SDP_HDR("m=", NULL, media_len),  }; @@ -775,8 +828,8 @@ static int ct_sip_parse_sdp_addr(const struct nf_conn *ct, const char *dptr,  	if (ret <= 0)  		return ret; -	if (!parse_addr(ct, dptr + *matchoff, NULL, addr, -			dptr + *matchoff + *matchlen)) +	if (!sdp_parse_addr(ct, dptr + *matchoff, NULL, addr, +			    dptr + *matchoff + *matchlen))  		return -1;  	return 1;  } @@ -1515,7 +1568,6 @@ static int sip_help_udp(struct sk_buff *skb, unsigned int protoff,  }  static struct nf_conntrack_helper sip[MAX_PORTS][4] __read_mostly; -static char sip_names[MAX_PORTS][4][sizeof("sip-65535")] __read_mostly;  static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = {  	[SIP_EXPECT_SIGNALLING] = { @@ -1585,9 +1637,9 @@ static int __init nf_conntrack_sip_init(void)  			sip[i][j].me = THIS_MODULE;  			if (ports[i] == SIP_PORT) -				sprintf(sip_names[i][j], "sip"); +				sprintf(sip[i][j].name, "sip");  			else -				sprintf(sip_names[i][j], "sip-%u", i); +				sprintf(sip[i][j].name, "sip-%u", i);  			pr_debug("port #%u: %u\n", i, ports[i]); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 5463969da45..1445d73533e 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1362,7 +1362,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,  	if (NULL == siocb->scm)  		siocb->scm = &scm; -	err = scm_send(sock, msg, siocb->scm); +	err = scm_send(sock, msg, siocb->scm, true);  	if (err < 0)  		return err; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 8ac890a1a4c..aee7196aac3 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1273,6 +1273,14 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)  	spin_unlock(&f->lock);  } +bool match_fanout_group(struct packet_type *ptype, struct sock * sk) +{ +	if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout) +		return true; + +	return false; +} +  static int fanout_add(struct sock *sk, u16 id, u16 type_flags)  {  	struct packet_sock *po = pkt_sk(sk); @@ -1325,6 +1333,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)  		match->prot_hook.dev = po->prot_hook.dev;  		match->prot_hook.func = packet_rcv_fanout;  		match->prot_hook.af_packet_priv = match; +		match->prot_hook.id_match = match_fanout_group;  		dev_add_pack(&match->prot_hook);  		list_add(&match->list, &fanout_list);  	} diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index fe81cc18e9e..9c0fd0c7881 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -200,13 +200,12 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,  out:  	if (err) {  		m->tcf_qstats.overlimits++; -		/* should we be asking for packet to be dropped? -		 * may make sense for redirect case only -		 */ -		retval = TC_ACT_SHOT; -	} else { +		if (m->tcfm_eaction != TCA_EGRESS_MIRROR) +			retval = TC_ACT_SHOT; +		else +			retval = m->tcf_action; +	} else  		retval = m->tcf_action; -	}  	spin_unlock(&m->tcf_lock);  	return retval; diff --git a/net/socket.c b/net/socket.c index dfe5b66c97e..a5471f804d9 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2657,6 +2657,7 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)  	if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf)))  		return -EFAULT; +	memset(&ifc, 0, sizeof(ifc));  	if (ifc32.ifcbuf == 0) {  		ifc32.ifc_len = 0;  		ifc.ifc_len = 0; diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 88f2bf67196..bac973a3136 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -316,7 +316,6 @@ static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)   */  void svc_xprt_enqueue(struct svc_xprt *xprt)  { -	struct svc_serv	*serv = xprt->xpt_server;  	struct svc_pool *pool;  	struct svc_rqst	*rqstp;  	int cpu; @@ -362,8 +361,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)  				rqstp, rqstp->rq_xprt);  		rqstp->rq_xprt = xprt;  		svc_xprt_get(xprt); -		rqstp->rq_reserved = serv->sv_max_mesg; -		atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);  		pool->sp_stats.threads_woken++;  		wake_up(&rqstp->rq_wait);  	} else { @@ -640,8 +637,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)  	if (xprt) {  		rqstp->rq_xprt = xprt;  		svc_xprt_get(xprt); -		rqstp->rq_reserved = serv->sv_max_mesg; -		atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);  		/* As there is a shortage of threads and this request  		 * had to be queued, don't allow the thread to wait so @@ -738,6 +733,8 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)  		else  			len = xprt->xpt_ops->xpo_recvfrom(rqstp);  		dprintk("svc: got len=%d\n", len); +		rqstp->rq_reserved = serv->sv_max_mesg; +		atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);  	}  	svc_xprt_received(xprt); @@ -794,7 +791,8 @@ int svc_send(struct svc_rqst *rqstp)  	/* Grab mutex to serialize outgoing data. */  	mutex_lock(&xprt->xpt_mutex); -	if (test_bit(XPT_DEAD, &xprt->xpt_flags)) +	if (test_bit(XPT_DEAD, &xprt->xpt_flags) +			|| test_bit(XPT_CLOSE, &xprt->xpt_flags))  		len = -ENOTCONN;  	else  		len = xprt->xpt_ops->xpo_sendto(rqstp); diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 18bc130255a..998aa8c1807 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1129,9 +1129,9 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)  	if (len >= 0)  		svsk->sk_tcplen += len;  	if (len != want) { +		svc_tcp_save_pages(svsk, rqstp);  		if (len < 0 && len != -EAGAIN)  			goto err_other; -		svc_tcp_save_pages(svsk, rqstp);  		dprintk("svc: incomplete TCP record (%d of %d)\n",  			svsk->sk_tcplen, svsk->sk_reclen);  		goto err_noclose; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index e4768c180da..c5ee4ff6136 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1450,7 +1450,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,  	if (NULL == siocb->scm)  		siocb->scm = &tmp_scm;  	wait_for_unix_gc(); -	err = scm_send(sock, msg, siocb->scm); +	err = scm_send(sock, msg, siocb->scm, false);  	if (err < 0)  		return err; @@ -1619,7 +1619,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,  	if (NULL == siocb->scm)  		siocb->scm = &tmp_scm;  	wait_for_unix_gc(); -	err = scm_send(sock, msg, siocb->scm); +	err = scm_send(sock, msg, siocb->scm, false);  	if (err < 0)  		return err; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index c5a5165a592..5a2aa17e4d3 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1357,6 +1357,8 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)  		memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));  		xdst->flo.ops = &xfrm_bundle_fc_ops; +		if (afinfo->init_dst) +			afinfo->init_dst(net, xdst);  	} else  		xdst = ERR_PTR(-ENOBUFS); diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 913d6bdfdda..ca05ba217f5 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -3016,7 +3016,8 @@ sub process {  					$herectx .= raw_line($linenr, $n) . "\n";  				} -				if (($stmts =~ tr/;/;/) == 1) { +				if (($stmts =~ tr/;/;/) == 1 && +				    $stmts !~ /^\s*(if|while|for|switch)\b/) {  					WARN("SINGLE_STATEMENT_DO_WHILE_MACRO",  					     "Single statement macros should not use a do {} while (0) loop\n" . "$herectx");  				} diff --git a/scripts/kernel-doc b/scripts/kernel-doc index 9b0c0b8b4ab..8fd107a3fac 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc @@ -1786,6 +1786,7 @@ sub dump_function($$) {      $prototype =~ s/__init +//;      $prototype =~ s/__init_or_module +//;      $prototype =~ s/__must_check +//; +    $prototype =~ s/__weak +//;      $prototype =~ s/^#\s*define\s+//; #ak added      $prototype =~ s/__attribute__\s*\(\([a-z,]*\)\)//; diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index d51b7c76c37..0cc99a3ea42 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c @@ -279,12 +279,9 @@ static int yama_ptrace_access_check(struct task_struct *child,  	}  	if (rc) { -		char name[sizeof(current->comm)];  		printk_ratelimited(KERN_NOTICE  			"ptrace of pid %d was attempted by: %s (pid %d)\n", -			child->pid, -			get_task_comm(name, current), -			current->pid); +			child->pid, current->comm, current->pid);  	}  	return rc; @@ -319,12 +316,9 @@ static int yama_ptrace_traceme(struct task_struct *parent)  	}  	if (rc) { -		char name[sizeof(current->comm)];  		printk_ratelimited(KERN_NOTICE  			"ptraceme of pid %d was attempted by: %s (pid %d)\n", -			current->pid, -			get_task_comm(name, parent), -			parent->pid); +			current->pid, parent->comm, parent->pid);  	}  	return rc; diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c index 0d7b25e8164..4e1fda75c1c 100644 --- a/sound/arm/pxa2xx-ac97.c +++ b/sound/arm/pxa2xx-ac97.c @@ -106,7 +106,7 @@ static struct pxa2xx_pcm_client pxa2xx_ac97_pcm_client = {  	.prepare		= pxa2xx_ac97_pcm_prepare,  }; -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP  static int pxa2xx_ac97_do_suspend(struct snd_card *card)  { @@ -243,7 +243,7 @@ static struct platform_driver pxa2xx_ac97_driver = {  	.driver		= {  		.name	= "pxa2xx-ac97",  		.owner	= THIS_MODULE, -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP  		.pm	= &pxa2xx_ac97_pm_ops,  #endif  	}, diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c index eb4ceb71123..277ebce23a4 100644 --- a/sound/atmel/abdac.c +++ b/sound/atmel/abdac.c @@ -452,6 +452,7 @@ static int __devinit atmel_abdac_probe(struct platform_device *pdev)  	dac->regs = ioremap(regs->start, resource_size(regs));  	if (!dac->regs) {  		dev_dbg(&pdev->dev, "could not remap register memory\n"); +		retval = -ENOMEM;  		goto out_free_card;  	} @@ -534,7 +535,7 @@ out_put_pclk:  	return retval;  } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP  static int atmel_abdac_suspend(struct device *pdev)  {  	struct snd_card *card = dev_get_drvdata(pdev); diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c index bf47025bdf4..9052aff37f6 100644 --- a/sound/atmel/ac97c.c +++ b/sound/atmel/ac97c.c @@ -278,14 +278,9 @@ static int atmel_ac97c_capture_hw_params(struct snd_pcm_substream *substream,  	if (retval < 0)  		return retval;  	/* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */ -	if (cpu_is_at32ap7000()) { -		if (retval < 0) -			return retval; -		/* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */ -		if (retval == 1) -			if (test_and_clear_bit(DMA_RX_READY, &chip->flags)) -				dw_dma_cyclic_free(chip->dma.rx_chan); -	} +	if (cpu_is_at32ap7000() && retval == 1) +		if (test_and_clear_bit(DMA_RX_READY, &chip->flags)) +			dw_dma_cyclic_free(chip->dma.rx_chan);  	/* Set restrictions to params. */  	mutex_lock(&opened_mutex); @@ -980,6 +975,7 @@ static int __devinit atmel_ac97c_probe(struct platform_device *pdev)  	if (!chip->regs) {  		dev_dbg(&pdev->dev, "could not remap register memory\n"); +		retval = -ENOMEM;  		goto err_ioremap;  	} @@ -1134,7 +1130,7 @@ err_snd_card_new:  	return retval;  } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP  static int atmel_ac97c_suspend(struct device *pdev)  {  	struct snd_card *card = dev_get_drvdata(pdev); diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c index 1128b35b2b0..5a34355e78e 100644 --- a/sound/drivers/aloop.c +++ b/sound/drivers/aloop.c @@ -1176,7 +1176,7 @@ static int __devexit loopback_remove(struct platform_device *devptr)  	return 0;  } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP  static int loopback_suspend(struct device *pdev)  {  	struct snd_card *card = dev_get_drvdata(pdev); diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c index f7d3bfc6bca..54bb6644a59 100644 --- a/sound/drivers/dummy.c +++ b/sound/drivers/dummy.c @@ -1064,7 +1064,7 @@ static int __devexit snd_dummy_remove(struct platform_device *devptr)  	return 0;  } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP  static int snd_dummy_suspend(struct device *pdev)  {  	struct snd_card *card = dev_get_drvdata(pdev); diff --git a/sound/drivers/pcsp/pcsp.c b/sound/drivers/pcsp/pcsp.c index 6ca59fc6dcb..ef171295f6d 100644 --- a/sound/drivers/pcsp/pcsp.c +++ b/sound/drivers/pcsp/pcsp.c @@ -199,7 +199,7 @@ static void pcsp_stop_beep(struct snd_pcsp *chip)  	pcspkr_stop_sound();  } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP  static int pcsp_suspend(struct device *dev)  {  	struct snd_pcsp *chip = dev_get_drvdata(dev); @@ -212,7 +212,7 @@ static SIMPLE_DEV_PM_OPS(pcsp_pm, pcsp_suspend, NULL);  #define PCSP_PM_OPS	&pcsp_pm  #else  #define PCSP_PM_OPS	NULL -#endif	/* CONFIG_PM */ +#endif	/* CONFIG_PM_SLEEP */  static void pcsp_shutdown(struct platform_device *dev)  { diff --git a/sound/isa/als100.c b/sound/isa/als100.c index 2d67c78c9f4..f7cdaf51512 100644 --- a/sound/isa/als100.c +++ b/sound/isa/als100.c @@ -233,7 +233,7 @@ static int __devinit snd_card_als100_probe(int dev,  			irq[dev], dma8[dev], dma16[dev]);  	} -	if ((error = snd_sb16dsp_pcm(chip, 0, NULL)) < 0) { +	if ((error = snd_sb16dsp_pcm(chip, 0, &chip->pcm)) < 0) {  		snd_card_free(card);  		return error;  	} diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c index 733b014ec7d..b2b3c014221 100644 --- a/sound/oss/sb_audio.c +++ b/sound/oss/sb_audio.c @@ -575,13 +575,15 @@ static int jazz16_audio_set_speed(int dev, int speed)  	if (speed > 0)  	{  		int tmp; -		int s = speed * devc->channels; +		int s;  		if (speed < 5000)  			speed = 5000;  		if (speed > 44100)  			speed = 44100; +		s = speed * devc->channels; +  		devc->tconst = (256 - ((1000000 + s / 2) / s)) & 0xff;  		tmp = 256 - devc->tconst; diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c index f75f5ffdfdf..a71d1c14a0f 100644 --- a/sound/pci/cs46xx/cs46xx_lib.c +++ b/sound/pci/cs46xx/cs46xx_lib.c @@ -94,7 +94,7 @@ static unsigned short snd_cs46xx_codec_read(struct snd_cs46xx *chip,  	if (snd_BUG_ON(codec_index != CS46XX_PRIMARY_CODEC_INDEX &&  		       codec_index != CS46XX_SECONDARY_CODEC_INDEX)) -		return -EINVAL; +		return 0xffff;  	chip->active_ctrl(chip, 1); diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c index 8e40262d411..2f6e9c762d3 100644 --- a/sound/pci/ctxfi/ctatc.c +++ b/sound/pci/ctxfi/ctatc.c @@ -1725,8 +1725,10 @@ int __devinit ct_atc_create(struct snd_card *card, struct pci_dev *pci,  	atc_connect_resources(atc);  	atc->timer = ct_timer_new(atc); -	if (!atc->timer) +	if (!atc->timer) { +		err = -ENOMEM;  		goto error1; +	}  	err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, atc, &ops);  	if (err < 0) diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c index 0bc2315b181..0849aac449f 100644 --- a/sound/pci/hda/hda_beep.c +++ b/sound/pci/hda/hda_beep.c @@ -231,16 +231,22 @@ void snd_hda_detach_beep_device(struct hda_codec *codec)  }  EXPORT_SYMBOL_HDA(snd_hda_detach_beep_device); +static bool ctl_has_mute(struct snd_kcontrol *kcontrol) +{ +	struct hda_codec *codec = snd_kcontrol_chip(kcontrol); +	return query_amp_caps(codec, get_amp_nid(kcontrol), +			      get_amp_direction(kcontrol)) & AC_AMPCAP_MUTE; +} +  /* get/put callbacks for beep mute mixer switches */  int snd_hda_mixer_amp_switch_get_beep(struct snd_kcontrol *kcontrol,  				      struct snd_ctl_elem_value *ucontrol)  {  	struct hda_codec *codec = snd_kcontrol_chip(kcontrol);  	struct hda_beep *beep = codec->beep; -	if (beep) { +	if (beep && (!beep->enabled || !ctl_has_mute(kcontrol))) {  		ucontrol->value.integer.value[0] = -			ucontrol->value.integer.value[1] = -			beep->enabled; +			ucontrol->value.integer.value[1] = beep->enabled;  		return 0;  	}  	return snd_hda_mixer_amp_switch_get(kcontrol, ucontrol); @@ -252,9 +258,20 @@ int snd_hda_mixer_amp_switch_put_beep(struct snd_kcontrol *kcontrol,  {  	struct hda_codec *codec = snd_kcontrol_chip(kcontrol);  	struct hda_beep *beep = codec->beep; -	if (beep) -		snd_hda_enable_beep_device(codec, -					   *ucontrol->value.integer.value); +	if (beep) { +		u8 chs = get_amp_channels(kcontrol); +		int enable = 0; +		long *valp = ucontrol->value.integer.value; +		if (chs & 1) { +			enable |= *valp; +			valp++; +		} +		if (chs & 2) +			enable |= *valp; +		snd_hda_enable_beep_device(codec, enable); +	} +	if (!ctl_has_mute(kcontrol)) +		return 0;  	return snd_hda_mixer_amp_switch_put(kcontrol, ucontrol);  }  EXPORT_SYMBOL_HDA(snd_hda_mixer_amp_switch_put_beep); diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 88a9c20eb7a..f560051a949 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -1386,6 +1386,44 @@ int snd_hda_codec_configure(struct hda_codec *codec)  }  EXPORT_SYMBOL_HDA(snd_hda_codec_configure); +/* update the stream-id if changed */ +static void update_pcm_stream_id(struct hda_codec *codec, +				 struct hda_cvt_setup *p, hda_nid_t nid, +				 u32 stream_tag, int channel_id) +{ +	unsigned int oldval, newval; + +	if (p->stream_tag != stream_tag || p->channel_id != channel_id) { +		oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0); +		newval = (stream_tag << 4) | channel_id; +		if (oldval != newval) +			snd_hda_codec_write(codec, nid, 0, +					    AC_VERB_SET_CHANNEL_STREAMID, +					    newval); +		p->stream_tag = stream_tag; +		p->channel_id = channel_id; +	} +} + +/* update the format-id if changed */ +static void update_pcm_format(struct hda_codec *codec, struct hda_cvt_setup *p, +			      hda_nid_t nid, int format) +{ +	unsigned int oldval; + +	if (p->format_id != format) { +		oldval = snd_hda_codec_read(codec, nid, 0, +					    AC_VERB_GET_STREAM_FORMAT, 0); +		if (oldval != format) { +			msleep(1); +			snd_hda_codec_write(codec, nid, 0, +					    AC_VERB_SET_STREAM_FORMAT, +					    format); +		} +		p->format_id = format; +	} +} +  /**   * snd_hda_codec_setup_stream - set up the codec for streaming   * @codec: the CODEC to set up @@ -1400,7 +1438,6 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,  {  	struct hda_codec *c;  	struct hda_cvt_setup *p; -	unsigned int oldval, newval;  	int type;  	int i; @@ -1413,29 +1450,13 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,  	p = get_hda_cvt_setup(codec, nid);  	if (!p)  		return; -	/* update the stream-id if changed */ -	if (p->stream_tag != stream_tag || p->channel_id != channel_id) { -		oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0); -		newval = (stream_tag << 4) | channel_id; -		if (oldval != newval) -			snd_hda_codec_write(codec, nid, 0, -					    AC_VERB_SET_CHANNEL_STREAMID, -					    newval); -		p->stream_tag = stream_tag; -		p->channel_id = channel_id; -	} -	/* update the format-id if changed */ -	if (p->format_id != format) { -		oldval = snd_hda_codec_read(codec, nid, 0, -					    AC_VERB_GET_STREAM_FORMAT, 0); -		if (oldval != format) { -			msleep(1); -			snd_hda_codec_write(codec, nid, 0, -					    AC_VERB_SET_STREAM_FORMAT, -					    format); -		} -		p->format_id = format; -	} + +	if (codec->pcm_format_first) +		update_pcm_format(codec, p, nid, format); +	update_pcm_stream_id(codec, p, nid, stream_tag, channel_id); +	if (!codec->pcm_format_first) +		update_pcm_format(codec, p, nid, format); +  	p->active = 1;  	p->dirty = 0; @@ -3497,7 +3518,7 @@ static bool snd_hda_codec_get_supported_ps(struct hda_codec *codec, hda_nid_t fg  {  	int sup = snd_hda_param_read(codec, fg, AC_PAR_POWER_STATE); -	if (sup < 0) +	if (sup == -1)  		return false;  	if (sup & power_state)  		return true; @@ -4433,6 +4454,8 @@ static void __snd_hda_power_up(struct hda_codec *codec, bool wait_power_down)  	 * then there is no need to go through power up here.  	 */  	if (codec->power_on) { +		if (codec->power_transition < 0) +			codec->power_transition = 0;  		spin_unlock(&codec->power_lock);  		return;  	} diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h index c422d330ca5..7fbc1bcaf1a 100644 --- a/sound/pci/hda/hda_codec.h +++ b/sound/pci/hda/hda_codec.h @@ -861,6 +861,7 @@ struct hda_codec {  	unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */  	unsigned int ignore_misc_bit:1; /* ignore MISC_NO_PRESENCE bit */  	unsigned int no_jack_detect:1;	/* Machine has no jack-detection */ +	unsigned int pcm_format_first:1; /* PCM format must be set first */  #ifdef CONFIG_SND_HDA_POWER_SAVE  	unsigned int power_on :1;	/* current (global) power-state */  	int power_transition;	/* power-state in transition */ diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index c8aced182fd..60882c62f18 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -151,6 +151,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"  			 "{Intel, CPT},"  			 "{Intel, PPT},"  			 "{Intel, LPT}," +			 "{Intel, LPT_LP},"  			 "{Intel, HPT},"  			 "{Intel, PBG},"  			 "{Intel, SCH}," @@ -3270,6 +3271,14 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {  	{ PCI_DEVICE(0x8086, 0x8c20),  	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |  	  AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_COMBO }, +	/* Lynx Point-LP */ +	{ PCI_DEVICE(0x8086, 0x9c20), +	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | +	  AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_COMBO }, +	/* Lynx Point-LP */ +	{ PCI_DEVICE(0x8086, 0x9c21), +	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | +	  AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_COMBO },  	/* Haswell */  	{ PCI_DEVICE(0x8086, 0x0c0c),  	  .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c index 7e46258fc70..6894ec66258 100644 --- a/sound/pci/hda/hda_proc.c +++ b/sound/pci/hda/hda_proc.c @@ -412,7 +412,7 @@ static void print_digital_conv(struct snd_info_buffer *buffer,  	if (digi1 & AC_DIG1_EMPHASIS)  		snd_iprintf(buffer, " Preemphasis");  	if (digi1 & AC_DIG1_COPYRIGHT) -		snd_iprintf(buffer, " Copyright"); +		snd_iprintf(buffer, " Non-Copyright");  	if (digi1 & AC_DIG1_NONAUDIO)  		snd_iprintf(buffer, " Non-Audio");  	if (digi1 & AC_DIG1_PROFESSIONAL) diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index d0d3540e39e..49750a96d64 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -246,7 +246,7 @@ static void init_output(struct hda_codec *codec, hda_nid_t pin, hda_nid_t dac)  					    AC_VERB_SET_AMP_GAIN_MUTE,  					    AMP_OUT_UNMUTE);  	} -	if (dac) +	if (dac && (get_wcaps(codec, dac) & AC_WCAP_OUT_AMP))  		snd_hda_codec_write(codec, dac, 0,  				    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO);  } @@ -261,7 +261,7 @@ static void init_input(struct hda_codec *codec, hda_nid_t pin, hda_nid_t adc)  					    AC_VERB_SET_AMP_GAIN_MUTE,  					    AMP_IN_UNMUTE(0));  	} -	if (adc) +	if (adc && (get_wcaps(codec, adc) & AC_WCAP_IN_AMP))  		snd_hda_codec_write(codec, adc, 0, AC_VERB_SET_AMP_GAIN_MUTE,  				    AMP_IN_UNMUTE(0));  } @@ -275,6 +275,10 @@ static int _add_switch(struct hda_codec *codec, hda_nid_t nid, const char *pfx,  	int type = dir ? HDA_INPUT : HDA_OUTPUT;  	struct snd_kcontrol_new knew =  		HDA_CODEC_MUTE_MONO(namestr, nid, chan, 0, type); +	if ((query_amp_caps(codec, nid, type) & AC_AMPCAP_MUTE) == 0) { +		snd_printdd("Skipping '%s %s Switch' (no mute on node 0x%x)\n", pfx, dirstr[dir], nid); +		return 0; +	}  	sprintf(namestr, "%s %s Switch", pfx, dirstr[dir]);  	return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec));  } @@ -286,6 +290,10 @@ static int _add_volume(struct hda_codec *codec, hda_nid_t nid, const char *pfx,  	int type = dir ? HDA_INPUT : HDA_OUTPUT;  	struct snd_kcontrol_new knew =  		HDA_CODEC_VOLUME_MONO(namestr, nid, chan, 0, type); +	if ((query_amp_caps(codec, nid, type) & AC_AMPCAP_NUM_STEPS) == 0) { +		snd_printdd("Skipping '%s %s Volume' (no amp on node 0x%x)\n", pfx, dirstr[dir], nid); +		return 0; +	}  	sprintf(namestr, "%s %s Volume", pfx, dirstr[dir]);  	return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec));  } @@ -464,50 +472,17 @@ exit:  }  /* - * PCM stuffs + * PCM callbacks   */ -static void ca0132_setup_stream(struct hda_codec *codec, hda_nid_t nid, -				 u32 stream_tag, -				 int channel_id, int format) -{ -	unsigned int oldval, newval; - -	if (!nid) -		return; - -	snd_printdd("ca0132_setup_stream: " -		"NID=0x%x, stream=0x%x, channel=%d, format=0x%x\n", -		nid, stream_tag, channel_id, format); - -	/* update the format-id if changed */ -	oldval = snd_hda_codec_read(codec, nid, 0, -				    AC_VERB_GET_STREAM_FORMAT, -				    0); -	if (oldval != format) { -		msleep(20); -		snd_hda_codec_write(codec, nid, 0, -				    AC_VERB_SET_STREAM_FORMAT, -				    format); -	} - -	oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0); -	newval = (stream_tag << 4) | channel_id; -	if (oldval != newval) { -		snd_hda_codec_write(codec, nid, 0, -				    AC_VERB_SET_CHANNEL_STREAMID, -				    newval); -	} -} - -static void ca0132_cleanup_stream(struct hda_codec *codec, hda_nid_t nid) +static int ca0132_playback_pcm_open(struct hda_pcm_stream *hinfo, +				    struct hda_codec *codec, +				    struct snd_pcm_substream *substream)  { -	snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_STREAM_FORMAT, 0); -	snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CHANNEL_STREAMID, 0); +	struct ca0132_spec *spec = codec->spec; +	return snd_hda_multi_out_analog_open(codec, &spec->multiout, substream, +					     hinfo);  } -/* - * PCM callbacks - */  static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo,  			struct hda_codec *codec,  			unsigned int stream_tag, @@ -515,10 +490,8 @@ static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo,  			struct snd_pcm_substream *substream)  {  	struct ca0132_spec *spec = codec->spec; - -	ca0132_setup_stream(codec, spec->dacs[0], stream_tag, 0, format); - -	return 0; +	return snd_hda_multi_out_analog_prepare(codec, &spec->multiout, +						stream_tag, format, substream);  }  static int ca0132_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, @@ -526,92 +499,45 @@ static int ca0132_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,  			struct snd_pcm_substream *substream)  {  	struct ca0132_spec *spec = codec->spec; - -	ca0132_cleanup_stream(codec, spec->dacs[0]); - -	return 0; +	return snd_hda_multi_out_analog_cleanup(codec, &spec->multiout);  }  /*   * Digital out   */ -static int ca0132_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo, -			struct hda_codec *codec, -			unsigned int stream_tag, -			unsigned int format, -			struct snd_pcm_substream *substream) -{ -	struct ca0132_spec *spec = codec->spec; - -	ca0132_setup_stream(codec, spec->dig_out, stream_tag, 0, format); - -	return 0; -} - -static int ca0132_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, -			struct hda_codec *codec, -			struct snd_pcm_substream *substream) +static int ca0132_dig_playback_pcm_open(struct hda_pcm_stream *hinfo, +					struct hda_codec *codec, +					struct snd_pcm_substream *substream)  {  	struct ca0132_spec *spec = codec->spec; - -	ca0132_cleanup_stream(codec, spec->dig_out); - -	return 0; +	return snd_hda_multi_out_dig_open(codec, &spec->multiout);  } -/* - * Analog capture - */ -static int ca0132_capture_pcm_prepare(struct hda_pcm_stream *hinfo, +static int ca0132_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo,  			struct hda_codec *codec,  			unsigned int stream_tag,  			unsigned int format,  			struct snd_pcm_substream *substream)  {  	struct ca0132_spec *spec = codec->spec; - -	ca0132_setup_stream(codec, spec->adcs[substream->number], -			     stream_tag, 0, format); - -	return 0; -} - -static int ca0132_capture_pcm_cleanup(struct hda_pcm_stream *hinfo, -			struct hda_codec *codec, -			struct snd_pcm_substream *substream) -{ -	struct ca0132_spec *spec = codec->spec; - -	ca0132_cleanup_stream(codec, spec->adcs[substream->number]); - -	return 0; +	return snd_hda_multi_out_dig_prepare(codec, &spec->multiout, +					     stream_tag, format, substream);  } -/* - * Digital capture - */ -static int ca0132_dig_capture_pcm_prepare(struct hda_pcm_stream *hinfo, +static int ca0132_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,  			struct hda_codec *codec, -			unsigned int stream_tag, -			unsigned int format,  			struct snd_pcm_substream *substream)  {  	struct ca0132_spec *spec = codec->spec; - -	ca0132_setup_stream(codec, spec->dig_in, stream_tag, 0, format); - -	return 0; +	return snd_hda_multi_out_dig_cleanup(codec, &spec->multiout);  } -static int ca0132_dig_capture_pcm_cleanup(struct hda_pcm_stream *hinfo, -			struct hda_codec *codec, -			struct snd_pcm_substream *substream) +static int ca0132_dig_playback_pcm_close(struct hda_pcm_stream *hinfo, +					 struct hda_codec *codec, +					 struct snd_pcm_substream *substream)  {  	struct ca0132_spec *spec = codec->spec; - -	ca0132_cleanup_stream(codec, spec->dig_in); - -	return 0; +	return snd_hda_multi_out_dig_close(codec, &spec->multiout);  }  /* @@ -621,6 +547,7 @@ static struct hda_pcm_stream ca0132_pcm_analog_playback = {  	.channels_min = 2,  	.channels_max = 2,  	.ops = { +		.open = ca0132_playback_pcm_open,  		.prepare = ca0132_playback_pcm_prepare,  		.cleanup = ca0132_playback_pcm_cleanup  	}, @@ -630,10 +557,6 @@ static struct hda_pcm_stream ca0132_pcm_analog_capture = {  	.substreams = 1,  	.channels_min = 2,  	.channels_max = 2, -	.ops = { -		.prepare = ca0132_capture_pcm_prepare, -		.cleanup = ca0132_capture_pcm_cleanup -	},  };  static struct hda_pcm_stream ca0132_pcm_digital_playback = { @@ -641,6 +564,8 @@ static struct hda_pcm_stream ca0132_pcm_digital_playback = {  	.channels_min = 2,  	.channels_max = 2,  	.ops = { +		.open = ca0132_dig_playback_pcm_open, +		.close = ca0132_dig_playback_pcm_close,  		.prepare = ca0132_dig_playback_pcm_prepare,  		.cleanup = ca0132_dig_playback_pcm_cleanup  	}, @@ -650,10 +575,6 @@ static struct hda_pcm_stream ca0132_pcm_digital_capture = {  	.substreams = 1,  	.channels_min = 2,  	.channels_max = 2, -	.ops = { -		.prepare = ca0132_dig_capture_pcm_prepare, -		.cleanup = ca0132_dig_capture_pcm_cleanup -	},  };  static int ca0132_build_pcms(struct hda_codec *codec) @@ -928,18 +849,16 @@ static int ca0132_build_controls(struct hda_codec *codec)  						    spec->dig_out);  		if (err < 0)  			return err; -		err = add_out_volume(codec, spec->dig_out, "IEC958"); +		err = snd_hda_create_spdif_share_sw(codec, &spec->multiout);  		if (err < 0)  			return err; +		/* spec->multiout.share_spdif = 1; */  	}  	if (spec->dig_in) {  		err = snd_hda_create_spdif_in_ctls(codec, spec->dig_in);  		if (err < 0)  			return err; -		err = add_in_volume(codec, spec->dig_in, "IEC958"); -		if (err < 0) -			return err;  	}  	return 0;  } @@ -961,6 +880,9 @@ static void ca0132_config(struct hda_codec *codec)  	struct ca0132_spec *spec = codec->spec;  	struct auto_pin_cfg *cfg = &spec->autocfg; +	codec->pcm_format_first = 1; +	codec->no_sticky_stream = 1; +  	/* line-outs */  	cfg->line_outs = 1;  	cfg->line_out_pins[0] = 0x0b; /* front */ @@ -988,14 +910,24 @@ static void ca0132_config(struct hda_codec *codec)  	/* Mic-in */  	spec->input_pins[0] = 0x12; -	spec->input_labels[0] = "Mic-In"; +	spec->input_labels[0] = "Mic";  	spec->adcs[0] = 0x07;  	/* Line-In */  	spec->input_pins[1] = 0x11; -	spec->input_labels[1] = "Line-In"; +	spec->input_labels[1] = "Line";  	spec->adcs[1] = 0x08;  	spec->num_inputs = 2; + +	/* SPDIF I/O */ +	spec->dig_out = 0x05; +	spec->multiout.dig_out_nid = spec->dig_out; +	cfg->dig_out_pins[0] = 0x0c; +	cfg->dig_outs = 1; +	cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF; +	spec->dig_in = 0x09; +	cfg->dig_in_pin = 0x0e; +	cfg->dig_in_type = HDA_PCM_TYPE_SPDIF;  }  static void ca0132_init_chip(struct hda_codec *codec) diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 94040ccf8e8..ea5775a1a7d 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -4272,7 +4272,8 @@ static int stac92xx_init(struct hda_codec *codec)  	unsigned int gpio;  	int i; -	snd_hda_sequence_write(codec, spec->init); +	if (spec->init) +		snd_hda_sequence_write(codec, spec->init);  	/* power down adcs initially */  	if (spec->powerdown_adcs) @@ -5748,7 +5749,6 @@ again:  		/* fallthru */  	case 0x111d76b4: /* 6 Port without Analog Mixer */  	case 0x111d76b5: -		spec->init = stac92hd71bxx_core_init;  		codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs;  		spec->num_dmics = stac92xx_connected_ports(codec,  					stac92hd71bxx_dmic_nids, @@ -5773,7 +5773,6 @@ again:  			spec->stream_delay = 40; /* 40 milliseconds */  		/* disable VSW */ -		spec->init = stac92hd71bxx_core_init;  		unmute_init++;  		snd_hda_codec_set_pincfg(codec, 0x0f, 0x40f000f0);  		snd_hda_codec_set_pincfg(codec, 0x19, 0x40f000f3); @@ -5788,7 +5787,6 @@ again:  		/* fallthru */  	default: -		spec->init = stac92hd71bxx_core_init;  		codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs;  		spec->num_dmics = stac92xx_connected_ports(codec,  					stac92hd71bxx_dmic_nids, @@ -5796,6 +5794,9 @@ again:  		break;  	} +	if (get_wcaps_type(get_wcaps(codec, 0x28)) == AC_WID_VOL_KNB) +		spec->init = stac92hd71bxx_core_init; +  	if (get_wcaps(codec, 0xa) & AC_WCAP_IN_AMP)  		snd_hda_sequence_write_cache(codec, unmute_init); diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index 80d90cb4285..43077177691 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -1752,6 +1752,14 @@ static int via_suspend(struct hda_codec *codec)  {  	struct via_spec *spec = codec->spec;  	vt1708_stop_hp_work(spec); + +	if (spec->codec_type == VT1802) { +		/* Fix pop noise on headphones */ +		int i; +		for (i = 0; i < spec->autocfg.hp_outs; i++) +			snd_hda_set_pin_ctl(codec, spec->autocfg.hp_pins[i], 0); +	} +  	return 0;  }  #endif diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c index d1ab4370673..5579b08bb35 100644 --- a/sound/pci/lx6464es/lx6464es.c +++ b/sound/pci/lx6464es/lx6464es.c @@ -851,6 +851,8 @@ static int __devinit lx_pcm_create(struct lx6464es *chip)  	/* hardcoded device name & channel count */  	err = snd_pcm_new(chip->card, (char *)card_name, 0,  			  1, 1, &pcm); +	if (err < 0) +		return err;  	pcm->private_data = chip; diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c index b8ac8710f47..b12308b5ba2 100644 --- a/sound/pci/rme9652/hdspm.c +++ b/sound/pci/rme9652/hdspm.c @@ -6585,7 +6585,7 @@ static int __devinit snd_hdspm_create(struct snd_card *card,  		snd_printk(KERN_ERR "HDSPM: "  				"unable to kmalloc Mixer memory of %d Bytes\n",  				(int)sizeof(struct hdspm_mixer)); -		return err; +		return -ENOMEM;  	}  	hdspm->port_names_in = NULL; diff --git a/sound/pci/sis7019.c b/sound/pci/sis7019.c index 512434efcc3..805ab6e9a78 100644 --- a/sound/pci/sis7019.c +++ b/sound/pci/sis7019.c @@ -1377,8 +1377,9 @@ static int __devinit sis_chip_create(struct snd_card *card,  	if (rc)  		goto error_out_cleanup; -	if (request_irq(pci->irq, sis_interrupt, IRQF_SHARED, KBUILD_MODNAME, -			sis)) { +	rc = request_irq(pci->irq, sis_interrupt, IRQF_SHARED, KBUILD_MODNAME, +			 sis); +	if (rc) {  		dev_err(&pci->dev, "unable to allocate irq %d\n", sis->irq);  		goto error_out_cleanup;  	} diff --git a/sound/ppc/powermac.c b/sound/ppc/powermac.c index f5ceb6f282d..210cafe0489 100644 --- a/sound/ppc/powermac.c +++ b/sound/ppc/powermac.c @@ -143,7 +143,7 @@ static int __devexit snd_pmac_remove(struct platform_device *devptr)  	return 0;  } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP  static int snd_pmac_driver_suspend(struct device *dev)  {  	struct snd_card *card = dev_get_drvdata(dev); diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c index 1aa52eff526..9b18b5243a5 100644 --- a/sound/ppc/snd_ps3.c +++ b/sound/ppc/snd_ps3.c @@ -1040,6 +1040,7 @@ static int __devinit snd_ps3_driver_probe(struct ps3_system_bus_device *dev)  				   GFP_KERNEL);  	if (!the_card.null_buffer_start_vaddr) {  		pr_info("%s: nullbuffer alloc failed\n", __func__); +		ret = -ENOMEM;  		goto clean_preallocate;  	}  	pr_debug("%s: null vaddr=%p dma=%#llx\n", __func__, diff --git a/sound/soc/blackfin/bf6xx-sport.c b/sound/soc/blackfin/bf6xx-sport.c index 318c5ba5360..dfb744381c4 100644 --- a/sound/soc/blackfin/bf6xx-sport.c +++ b/sound/soc/blackfin/bf6xx-sport.c @@ -413,7 +413,14 @@ EXPORT_SYMBOL(sport_create);  void sport_delete(struct sport_device *sport)  { +	if (sport->tx_desc) +		dma_free_coherent(NULL, sport->tx_desc_size, +				sport->tx_desc, 0); +	if (sport->rx_desc) +		dma_free_coherent(NULL, sport->rx_desc_size, +				sport->rx_desc, 0);  	sport_free_resource(sport); +	kfree(sport);  }  EXPORT_SYMBOL(sport_delete); diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c index 6537f16d383..e33d327396a 100644 --- a/sound/soc/codecs/wm5102.c +++ b/sound/soc/codecs/wm5102.c @@ -128,13 +128,9 @@ SOC_SINGLE_TLV("EQ4 B5 Volume", ARIZONA_EQ4_2, ARIZONA_EQ4_B5_GAIN_SHIFT,  ARIZONA_MIXER_CONTROLS("DRC1L", ARIZONA_DRC1LMIX_INPUT_1_SOURCE),  ARIZONA_MIXER_CONTROLS("DRC1R", ARIZONA_DRC1RMIX_INPUT_1_SOURCE), -ARIZONA_MIXER_CONTROLS("DRC2L", ARIZONA_DRC2LMIX_INPUT_1_SOURCE), -ARIZONA_MIXER_CONTROLS("DRC2R", ARIZONA_DRC2RMIX_INPUT_1_SOURCE),  SND_SOC_BYTES_MASK("DRC1", ARIZONA_DRC1_CTRL1, 5,  		   ARIZONA_DRC1R_ENA | ARIZONA_DRC1L_ENA), -SND_SOC_BYTES_MASK("DRC2", ARIZONA_DRC2_CTRL1, 5, -		   ARIZONA_DRC2R_ENA | ARIZONA_DRC2L_ENA),  ARIZONA_MIXER_CONTROLS("LHPF1", ARIZONA_HPLP1MIX_INPUT_1_SOURCE),  ARIZONA_MIXER_CONTROLS("LHPF2", ARIZONA_HPLP2MIX_INPUT_1_SOURCE), @@ -236,8 +232,6 @@ ARIZONA_MIXER_ENUMS(EQ4, ARIZONA_EQ4MIX_INPUT_1_SOURCE);  ARIZONA_MIXER_ENUMS(DRC1L, ARIZONA_DRC1LMIX_INPUT_1_SOURCE);  ARIZONA_MIXER_ENUMS(DRC1R, ARIZONA_DRC1RMIX_INPUT_1_SOURCE); -ARIZONA_MIXER_ENUMS(DRC2L, ARIZONA_DRC2LMIX_INPUT_1_SOURCE); -ARIZONA_MIXER_ENUMS(DRC2R, ARIZONA_DRC2RMIX_INPUT_1_SOURCE);  ARIZONA_MIXER_ENUMS(LHPF1, ARIZONA_HPLP1MIX_INPUT_1_SOURCE);  ARIZONA_MIXER_ENUMS(LHPF2, ARIZONA_HPLP2MIX_INPUT_1_SOURCE); @@ -349,10 +343,6 @@ SND_SOC_DAPM_PGA("DRC1L", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1L_ENA_SHIFT, 0,  		 NULL, 0),  SND_SOC_DAPM_PGA("DRC1R", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1R_ENA_SHIFT, 0,  		 NULL, 0), -SND_SOC_DAPM_PGA("DRC2L", ARIZONA_DRC2_CTRL1, ARIZONA_DRC2L_ENA_SHIFT, 0, -		 NULL, 0), -SND_SOC_DAPM_PGA("DRC2R", ARIZONA_DRC2_CTRL1, ARIZONA_DRC2R_ENA_SHIFT, 0, -		 NULL, 0),  SND_SOC_DAPM_PGA("LHPF1", ARIZONA_HPLPF1_1, ARIZONA_LHPF1_ENA_SHIFT, 0,  		 NULL, 0), @@ -466,8 +456,6 @@ ARIZONA_MIXER_WIDGETS(EQ4, "EQ4"),  ARIZONA_MIXER_WIDGETS(DRC1L, "DRC1L"),  ARIZONA_MIXER_WIDGETS(DRC1R, "DRC1R"), -ARIZONA_MIXER_WIDGETS(DRC2L, "DRC2L"), -ARIZONA_MIXER_WIDGETS(DRC2R, "DRC2R"),  ARIZONA_MIXER_WIDGETS(LHPF1, "LHPF1"),  ARIZONA_MIXER_WIDGETS(LHPF2, "LHPF2"), @@ -553,8 +541,6 @@ SND_SOC_DAPM_OUTPUT("SPKDAT1R"),  	{ name, "EQ4", "EQ4" }, \  	{ name, "DRC1L", "DRC1L" }, \  	{ name, "DRC1R", "DRC1R" }, \ -	{ name, "DRC2L", "DRC2L" }, \ -	{ name, "DRC2R", "DRC2R" }, \  	{ name, "LHPF1", "LHPF1" }, \  	{ name, "LHPF2", "LHPF2" }, \  	{ name, "LHPF3", "LHPF3" }, \ @@ -639,6 +625,15 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {  	{ "AIF2 Capture", NULL, "SYSCLK" },  	{ "AIF3 Capture", NULL, "SYSCLK" }, +	{ "IN1L PGA", NULL, "IN1L" }, +	{ "IN1R PGA", NULL, "IN1R" }, + +	{ "IN2L PGA", NULL, "IN2L" }, +	{ "IN2R PGA", NULL, "IN2R" }, + +	{ "IN3L PGA", NULL, "IN3L" }, +	{ "IN3R PGA", NULL, "IN3R" }, +  	ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"),  	ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"),  	ARIZONA_MIXER_ROUTES("OUT2L", "HPOUT2L"), @@ -675,8 +670,6 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {  	ARIZONA_MIXER_ROUTES("DRC1L", "DRC1L"),  	ARIZONA_MIXER_ROUTES("DRC1R", "DRC1R"), -	ARIZONA_MIXER_ROUTES("DRC2L", "DRC2L"), -	ARIZONA_MIXER_ROUTES("DRC2R", "DRC2R"),  	ARIZONA_MIXER_ROUTES("LHPF1", "LHPF1"),  	ARIZONA_MIXER_ROUTES("LHPF2", "LHPF2"), diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c index 8033f706518..01ebbcc5c6a 100644 --- a/sound/soc/codecs/wm5110.c +++ b/sound/soc/codecs/wm5110.c @@ -681,6 +681,18 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {  	{ "AIF2 Capture", NULL, "SYSCLK" },  	{ "AIF3 Capture", NULL, "SYSCLK" }, +	{ "IN1L PGA", NULL, "IN1L" }, +	{ "IN1R PGA", NULL, "IN1R" }, + +	{ "IN2L PGA", NULL, "IN2L" }, +	{ "IN2R PGA", NULL, "IN2R" }, + +	{ "IN3L PGA", NULL, "IN3L" }, +	{ "IN3R PGA", NULL, "IN3R" }, + +	{ "IN4L PGA", NULL, "IN4L" }, +	{ "IN4R PGA", NULL, "IN4R" }, +  	ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"),  	ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"),  	ARIZONA_MIXER_ROUTES("OUT2L", "HPOUT2L"), diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index aa9ce9dd7d8..ce672007379 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c @@ -3733,21 +3733,6 @@ static int wm8962_runtime_resume(struct device *dev)  	regcache_sync(wm8962->regmap); -	regmap_update_bits(wm8962->regmap, WM8962_ANTI_POP, -			   WM8962_STARTUP_BIAS_ENA | WM8962_VMID_BUF_ENA, -			   WM8962_STARTUP_BIAS_ENA | WM8962_VMID_BUF_ENA); - -	/* Bias enable at 2*50k for ramp */ -	regmap_update_bits(wm8962->regmap, WM8962_PWR_MGMT_1, -			   WM8962_VMID_SEL_MASK | WM8962_BIAS_ENA, -			   WM8962_BIAS_ENA | 0x180); - -	msleep(5); - -	/* VMID back to 2x250k for standby */ -	regmap_update_bits(wm8962->regmap, WM8962_PWR_MGMT_1, -			   WM8962_VMID_SEL_MASK, 0x100); -  	return 0;  } diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index 04ef03175c5..6c9eeca85b9 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c @@ -4038,6 +4038,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)  		break;  	case WM8958:  		if (wm8994->revision < 1) { +			snd_soc_dapm_add_routes(dapm, wm8994_intercon, +						ARRAY_SIZE(wm8994_intercon));  			snd_soc_dapm_add_routes(dapm, wm8994_revd_intercon,  						ARRAY_SIZE(wm8994_revd_intercon));  			snd_soc_dapm_add_routes(dapm, wm8994_lateclk_revd_intercon, diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c index f16fb361a4e..c6d2076a796 100644 --- a/sound/soc/codecs/wm9712.c +++ b/sound/soc/codecs/wm9712.c @@ -148,7 +148,7 @@ SOC_SINGLE("Treble Volume", AC97_MASTER_TONE, 0, 15, 1),  SOC_SINGLE("Capture ADC Switch", AC97_REC_GAIN, 15, 1, 1),  SOC_ENUM("Capture Volume Steps", wm9712_enum[6]), -SOC_DOUBLE("Capture Volume", AC97_REC_GAIN, 8, 0, 63, 1), +SOC_DOUBLE("Capture Volume", AC97_REC_GAIN, 8, 0, 63, 0),  SOC_SINGLE("Capture ZC Switch", AC97_REC_GAIN, 7, 1, 0),  SOC_SINGLE_TLV("Mic 1 Volume", AC97_MIC, 8, 31, 1, main_tlv), @@ -272,7 +272,7 @@ SOC_DAPM_ENUM("Route", wm9712_enum[9]);  /* Mic select */  static const struct snd_kcontrol_new wm9712_mic_src_controls = -SOC_DAPM_ENUM("Route", wm9712_enum[7]); +SOC_DAPM_ENUM("Mic Source Select", wm9712_enum[7]);  /* diff select */  static const struct snd_kcontrol_new wm9712_diff_sel_controls = @@ -291,7 +291,9 @@ SND_SOC_DAPM_MUX("Left Capture Select", SND_SOC_NOPM, 0, 0,  	&wm9712_capture_selectl_controls),  SND_SOC_DAPM_MUX("Right Capture Select", SND_SOC_NOPM, 0, 0,  	&wm9712_capture_selectr_controls), -SND_SOC_DAPM_MUX("Mic Select Source", SND_SOC_NOPM, 0, 0, +SND_SOC_DAPM_MUX("Left Mic Select Source", SND_SOC_NOPM, 0, 0, +	&wm9712_mic_src_controls), +SND_SOC_DAPM_MUX("Right Mic Select Source", SND_SOC_NOPM, 0, 0,  	&wm9712_mic_src_controls),  SND_SOC_DAPM_MUX("Differential Source", SND_SOC_NOPM, 0, 0,  	&wm9712_diff_sel_controls), @@ -319,6 +321,7 @@ SND_SOC_DAPM_PGA("Out 3 PGA", AC97_INT_PAGING, 5, 1, NULL, 0),  SND_SOC_DAPM_PGA("Line PGA", AC97_INT_PAGING, 2, 1, NULL, 0),  SND_SOC_DAPM_PGA("Phone PGA", AC97_INT_PAGING, 1, 1, NULL, 0),  SND_SOC_DAPM_PGA("Mic PGA", AC97_INT_PAGING, 0, 1, NULL, 0), +SND_SOC_DAPM_PGA("Differential Mic", SND_SOC_NOPM, 0, 0, NULL, 0),  SND_SOC_DAPM_MICBIAS("Mic Bias", AC97_INT_PAGING, 10, 1),  SND_SOC_DAPM_OUTPUT("MONOOUT"),  SND_SOC_DAPM_OUTPUT("HPOUTL"), @@ -379,6 +382,18 @@ static const struct snd_soc_dapm_route wm9712_audio_map[] = {  	{"Mic PGA", NULL, "MIC1"},  	{"Mic PGA", NULL, "MIC2"}, +	/* microphones */ +	{"Differential Mic", NULL, "MIC1"}, +	{"Differential Mic", NULL, "MIC2"}, +	{"Left Mic Select Source", "Mic 1", "MIC1"}, +	{"Left Mic Select Source", "Mic 2", "MIC2"}, +	{"Left Mic Select Source", "Stereo", "MIC1"}, +	{"Left Mic Select Source", "Differential", "Differential Mic"}, +	{"Right Mic Select Source", "Mic 1", "MIC1"}, +	{"Right Mic Select Source", "Mic 2", "MIC2"}, +	{"Right Mic Select Source", "Stereo", "MIC2"}, +	{"Right Mic Select Source", "Differential", "Differential Mic"}, +  	/* left capture selector */  	{"Left Capture Select", "Mic", "MIC1"},  	{"Left Capture Select", "Speaker Mixer", "Speaker Mixer"}, diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c index 95441bfc819..ce5e5cd254d 100644 --- a/sound/soc/davinci/davinci-mcasp.c +++ b/sound/soc/davinci/davinci-mcasp.c @@ -380,14 +380,20 @@ static void mcasp_start_tx(struct davinci_audio_dev *dev)  static void davinci_mcasp_start(struct davinci_audio_dev *dev, int stream)  {  	if (stream == SNDRV_PCM_STREAM_PLAYBACK) { -		if (dev->txnumevt)	/* enable FIFO */ +		if (dev->txnumevt) {	/* enable FIFO */ +			mcasp_clr_bits(dev->base + DAVINCI_MCASP_WFIFOCTL, +								FIFO_ENABLE);  			mcasp_set_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,  								FIFO_ENABLE); +		}  		mcasp_start_tx(dev);  	} else { -		if (dev->rxnumevt)	/* enable FIFO */ +		if (dev->rxnumevt) {	/* enable FIFO */ +			mcasp_clr_bits(dev->base + DAVINCI_MCASP_RFIFOCTL, +								FIFO_ENABLE);  			mcasp_set_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,  								FIFO_ENABLE); +		}  		mcasp_start_rx(dev);  	}  } diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c index 28dd76c7cb1..81d7728cf67 100644 --- a/sound/soc/fsl/imx-ssi.c +++ b/sound/soc/fsl/imx-ssi.c @@ -380,13 +380,14 @@ static int imx_ssi_dai_probe(struct snd_soc_dai *dai)  static struct snd_soc_dai_driver imx_ssi_dai = {  	.probe = imx_ssi_dai_probe,  	.playback = { -		.channels_min = 1, +		/* The SSI does not support monaural audio. */ +		.channels_min = 2,  		.channels_max = 2,  		.rates = SNDRV_PCM_RATE_8000_96000,  		.formats = SNDRV_PCM_FMTBIT_S16_LE,  	},  	.capture = { -		.channels_min = 1, +		.channels_min = 2,  		.channels_max = 2,  		.rates = SNDRV_PCM_RATE_8000_96000,  		.formats = SNDRV_PCM_FMTBIT_S16_LE, diff --git a/sound/soc/mxs/Kconfig b/sound/soc/mxs/Kconfig index 99a997f19bb..b6fa77678d9 100644 --- a/sound/soc/mxs/Kconfig +++ b/sound/soc/mxs/Kconfig @@ -10,7 +10,7 @@ menuconfig SND_MXS_SOC  if SND_MXS_SOC  config SND_SOC_MXS_SGTL5000 -	tristate "SoC Audio support for i.MX boards with sgtl5000" +	tristate "SoC Audio support for MXS boards with sgtl5000"  	depends on I2C  	select SND_SOC_SGTL5000  	help diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c index 34835e8a916..d33c48baaf7 100644 --- a/sound/soc/omap/mcbsp.c +++ b/sound/soc/omap/mcbsp.c @@ -745,7 +745,7 @@ int omap_mcbsp_6pin_src_mux(struct omap_mcbsp *mcbsp, u8 mux)  {  	const char *signal, *src; -	if (mcbsp->pdata->mux_signal) +	if (!mcbsp->pdata->mux_signal)  		return -EINVAL;  	switch (mux) { diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c index b7b2a1f9142..89b064650f1 100644 --- a/sound/soc/samsung/pcm.c +++ b/sound/soc/samsung/pcm.c @@ -20,7 +20,7 @@  #include <sound/pcm_params.h>  #include <plat/audio.h> -#include <plat/dma.h> +#include <mach/dma.h>  #include "dma.h"  #include "pcm.h" diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index f81c5976b96..c501af6d8db 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -826,7 +826,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num)  	}  	if (!rtd->cpu_dai) { -		dev_dbg(card->dev, "CPU DAI %s not registered\n", +		dev_err(card->dev, "CPU DAI %s not registered\n",  			dai_link->cpu_dai_name);  		return -EPROBE_DEFER;  	} @@ -857,14 +857,14 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num)  		}  		if (!rtd->codec_dai) { -			dev_dbg(card->dev, "CODEC DAI %s not registered\n", +			dev_err(card->dev, "CODEC DAI %s not registered\n",  				dai_link->codec_dai_name);  			return -EPROBE_DEFER;  		}  	}  	if (!rtd->codec) { -		dev_dbg(card->dev, "CODEC %s not registered\n", +		dev_err(card->dev, "CODEC %s not registered\n",  			dai_link->codec_name);  		return -EPROBE_DEFER;  	} @@ -888,7 +888,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num)  		rtd->platform = platform;  	}  	if (!rtd->platform) { -		dev_dbg(card->dev, "platform %s not registered\n", +		dev_err(card->dev, "platform %s not registered\n",  			dai_link->platform_name);  		return -EPROBE_DEFER;  	} @@ -1481,6 +1481,8 @@ static int soc_check_aux_dev(struct snd_soc_card *card, int num)  			return 0;  	} +	dev_err(card->dev, "%s not registered\n", aux_dev->codec_name); +  	return -EPROBE_DEFER;  } diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c index 7f8b3b7428b..0c172938b82 100644 --- a/sound/soc/soc-jack.c +++ b/sound/soc/soc-jack.c @@ -103,7 +103,7 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)  	}  	/* Report before the DAPM sync to help users updating micbias status */ -	blocking_notifier_call_chain(&jack->notifier, status, jack); +	blocking_notifier_call_chain(&jack->notifier, jack->status, jack);  	snd_soc_dapm_sync(dapm); diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index 0f647d22cb4..c4118120268 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c @@ -821,10 +821,6 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)  	if (++ep->use_count != 1)  		return 0; -	/* just to be sure */ -	deactivate_urbs(ep, 0, 1); -	wait_clear_urbs(ep); -  	ep->active_mask = 0;  	ep->unlink_mask = 0;  	ep->phase = 0; diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index a1298f37942..62ec808ed79 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -544,6 +544,9 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)  	subs->last_frame_number = 0;  	runtime->delay = 0; +	/* clear the pending deactivation on the target EPs */ +	deactivate_endpoints(subs); +  	/* for playback, submit the URBs now; otherwise, the first hwptr_done  	 * updates for all URBs would happen at the same time when starting */  	if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources index 2884e67ee62..213362850ab 100644 --- a/tools/perf/util/python-ext-sources +++ b/tools/perf/util/python-ext-sources @@ -10,10 +10,12 @@ util/ctype.c  util/evlist.c  util/evsel.c  util/cpumap.c +util/hweight.c  util/thread_map.c  util/util.c  util/xyarray.c  util/cgroup.c  util/debugfs.c +util/rblist.c  util/strlist.c  ../../lib/rbtree.c  |