diff options
Diffstat (limited to 'drivers')
546 files changed, 9181 insertions, 5717 deletions
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h index bc533dde16c..f895a244ca7 100644 --- a/drivers/acpi/acpica/acconfig.h +++ b/drivers/acpi/acpica/acconfig.h @@ -121,7 +121,7 @@  /* Maximum sleep allowed via Sleep() operator */ -#define ACPI_MAX_SLEEP                  20000	/* Two seconds */ +#define ACPI_MAX_SLEEP                  2000	/* Two seconds */  /******************************************************************************   * diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig index c34aa51af4e..e3f47872ec2 100644 --- a/drivers/acpi/apei/Kconfig +++ b/drivers/acpi/apei/Kconfig @@ -13,6 +13,7 @@ config ACPI_APEI_GHES  	bool "APEI Generic Hardware Error Source"  	depends on ACPI_APEI && X86  	select ACPI_HED +	select IRQ_WORK  	select LLIST  	select GENERIC_ALLOCATOR  	help diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index 8041248fce9..61540360d5c 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c @@ -618,7 +618,7 @@ int apei_osc_setup(void)  	};  	capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; -	capbuf[OSC_SUPPORT_TYPE] = 0; +	capbuf[OSC_SUPPORT_TYPE] = 1;  	capbuf[OSC_CONTROL_TYPE] = 0;  	if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)) diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index db06f34419c..1c052127548 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -3416,27 +3416,28 @@ init_card(struct atm_dev *dev)  	size = sizeof(struct vc_map *) * card->tct_size;  	IPRINTK("%s: allocate %d byte for VC map.\n", card->name, size); -	if (NULL == (card->vcs = vmalloc(size))) { +	card->vcs = vzalloc(size); +	if (!card->vcs) {  		printk("%s: memory allocation failure.\n", card->name);  		deinit_card(card);  		return -1;  	} -	memset(card->vcs, 0, size);  	size = sizeof(struct vc_map *) * card->scd_size;  	IPRINTK("%s: allocate %d byte for SCD to VC mapping.\n",  	        card->name, size); -	if (NULL == (card->scd2vc = vmalloc(size))) { +	card->scd2vc = vzalloc(size); +	if (!card->scd2vc) {  		printk("%s: memory allocation failure.\n", card->name);  		deinit_card(card);  		return -1;  	} -	memset(card->scd2vc, 0, size);  	size = sizeof(struct tst_info) * (card->tst_size - 2);  	IPRINTK("%s: allocate %d byte for TST to VC mapping.\n",  		card->name, size); -	if (NULL == (card->soft_tst = vmalloc(size))) { +	card->soft_tst = vmalloc(size); +	if (!card->soft_tst) {  		printk("%s: memory allocation failure.\n", card->name);  		deinit_card(card);  		return -1; diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c index e828c548749..f5569699f31 100644 --- a/drivers/atm/lanai.c +++ b/drivers/atm/lanai.c @@ -1457,10 +1457,9 @@ static int __devinit vcc_table_allocate(struct lanai_dev *lanai)  	return (lanai->vccs == NULL) ? -ENOMEM : 0;  #else  	int bytes = (lanai->num_vci) * sizeof(struct lanai_vcc *); -	lanai->vccs = (struct lanai_vcc **) vmalloc(bytes); +	lanai->vccs = vzalloc(bytes);  	if (unlikely(lanai->vccs == NULL))  		return -ENOMEM; -	memset(lanai->vccs, 0, bytes);  	return 0;  #endif  } diff --git a/drivers/base/devres.c b/drivers/base/devres.c index cf7a0c78805..65cd7483245 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -397,6 +397,7 @@ static int remove_nodes(struct device *dev,  static int release_nodes(struct device *dev, struct list_head *first,  			 struct list_head *end, unsigned long flags) +	__releases(&dev->devres_lock)  {  	LIST_HEAD(todo);  	int cnt; diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index 33e1bed68fd..a4760e095ff 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -376,7 +376,7 @@ int devtmpfs_mount(const char *mntdir)  	return err;  } -static __initdata DECLARE_COMPLETION(setup_done); +static DECLARE_COMPLETION(setup_done);  static int handle(const char *name, mode_t mode, struct device *dev)  { diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index bbb03e6f725..06ed6b4e7df 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -521,11 +521,6 @@ static int _request_firmware(const struct firmware **firmware_p,  	if (!firmware_p)  		return -EINVAL; -	if (WARN_ON(usermodehelper_is_disabled())) { -		dev_err(device, "firmware: %s will not be loaded\n", name); -		return -EBUSY; -	} -  	*firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);  	if (!firmware) {  		dev_err(device, "%s: kmalloc(struct firmware) failed\n", @@ -539,6 +534,12 @@ static int _request_firmware(const struct firmware **firmware_p,  		return 0;  	} +	if (WARN_ON(usermodehelper_is_disabled())) { +		dev_err(device, "firmware: %s will not be loaded\n", name); +		retval = -EBUSY; +		goto out; +	} +  	if (uevent)  		dev_dbg(device, "firmware: requesting %s\n", name); diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 4573f5ec936..7a24895543e 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(platform_bus);  /**   * arch_setup_pdev_archdata - Allow manipulation of archdata before its used - * @dev: platform device + * @pdev: platform device   *   * This is called before platform_device_add() such that any pdev_archdata may   * be setup before the platform_notifier is called.  So if a user needs to diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index a846b2f95cf..b97294e2d95 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -19,7 +19,7 @@  struct pm_clk_data {  	struct list_head clock_list; -	struct mutex lock; +	spinlock_t lock;  };  enum pce_status { @@ -42,6 +42,22 @@ static struct pm_clk_data *__to_pcd(struct device *dev)  }  /** + * pm_clk_acquire - Acquire a device clock. + * @dev: Device whose clock is to be acquired. + * @ce: PM clock entry corresponding to the clock. + */ +static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) +{ +	ce->clk = clk_get(dev, ce->con_id); +	if (IS_ERR(ce->clk)) { +		ce->status = PCE_STATUS_ERROR; +	} else { +		ce->status = PCE_STATUS_ACQUIRED; +		dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); +	} +} + +/**   * pm_clk_add - Start using a device clock for power management.   * @dev: Device whose clock is going to be used for power management.   * @con_id: Connection ID of the clock. @@ -73,26 +89,23 @@ int pm_clk_add(struct device *dev, const char *con_id)  		}  	} -	mutex_lock(&pcd->lock); +	pm_clk_acquire(dev, ce); + +	spin_lock_irq(&pcd->lock);  	list_add_tail(&ce->node, &pcd->clock_list); -	mutex_unlock(&pcd->lock); +	spin_unlock_irq(&pcd->lock);  	return 0;  }  /**   * __pm_clk_remove - Destroy PM clock entry.   * @ce: PM clock entry to destroy. - * - * This routine must be called under the mutex protecting the PM list of clocks - * corresponding the the @ce's device.   */  static void __pm_clk_remove(struct pm_clock_entry *ce)  {  	if (!ce)  		return; -	list_del(&ce->node); -  	if (ce->status < PCE_STATUS_ERROR) {  		if (ce->status == PCE_STATUS_ENABLED)  			clk_disable(ce->clk); @@ -123,21 +136,25 @@ void pm_clk_remove(struct device *dev, const char *con_id)  	if (!pcd)  		return; -	mutex_lock(&pcd->lock); +	spin_lock_irq(&pcd->lock);  	list_for_each_entry(ce, &pcd->clock_list, node) { -		if (!con_id && !ce->con_id) { -			__pm_clk_remove(ce); -			break; -		} else if (!con_id || !ce->con_id) { +		if (!con_id && !ce->con_id) +			goto remove; +		else if (!con_id || !ce->con_id)  			continue; -		} else if (!strcmp(con_id, ce->con_id)) { -			__pm_clk_remove(ce); -			break; -		} +		else if (!strcmp(con_id, ce->con_id)) +			goto remove;  	} -	mutex_unlock(&pcd->lock); +	spin_unlock_irq(&pcd->lock); +	return; + + remove: +	list_del(&ce->node); +	spin_unlock_irq(&pcd->lock); + +	__pm_clk_remove(ce);  }  /** @@ -158,7 +175,7 @@ int pm_clk_init(struct device *dev)  	}  	INIT_LIST_HEAD(&pcd->clock_list); -	mutex_init(&pcd->lock); +	spin_lock_init(&pcd->lock);  	dev->power.subsys_data = pcd;  	return 0;  } @@ -175,20 +192,27 @@ void pm_clk_destroy(struct device *dev)  {  	struct pm_clk_data *pcd = __to_pcd(dev);  	struct pm_clock_entry *ce, *c; +	struct list_head list;  	if (!pcd)  		return;  	dev->power.subsys_data = NULL; +	INIT_LIST_HEAD(&list); -	mutex_lock(&pcd->lock); +	spin_lock_irq(&pcd->lock);  	list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) -		__pm_clk_remove(ce); +		list_move(&ce->node, &list); -	mutex_unlock(&pcd->lock); +	spin_unlock_irq(&pcd->lock);  	kfree(pcd); + +	list_for_each_entry_safe_reverse(ce, c, &list, node) { +		list_del(&ce->node); +		__pm_clk_remove(ce); +	}  }  #endif /* CONFIG_PM */ @@ -196,23 +220,6 @@ void pm_clk_destroy(struct device *dev)  #ifdef CONFIG_PM_RUNTIME  /** - * pm_clk_acquire - Acquire a device clock. - * @dev: Device whose clock is to be acquired. - * @con_id: Connection ID of the clock. - */ -static void pm_clk_acquire(struct device *dev, -				    struct pm_clock_entry *ce) -{ -	ce->clk = clk_get(dev, ce->con_id); -	if (IS_ERR(ce->clk)) { -		ce->status = PCE_STATUS_ERROR; -	} else { -		ce->status = PCE_STATUS_ACQUIRED; -		dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); -	} -} - -/**   * pm_clk_suspend - Disable clocks in a device's PM clock list.   * @dev: Device to disable the clocks for.   */ @@ -220,25 +227,23 @@ int pm_clk_suspend(struct device *dev)  {  	struct pm_clk_data *pcd = __to_pcd(dev);  	struct pm_clock_entry *ce; +	unsigned long flags;  	dev_dbg(dev, "%s()\n", __func__);  	if (!pcd)  		return 0; -	mutex_lock(&pcd->lock); +	spin_lock_irqsave(&pcd->lock, flags);  	list_for_each_entry_reverse(ce, &pcd->clock_list, node) { -		if (ce->status == PCE_STATUS_NONE) -			pm_clk_acquire(dev, ce); -  		if (ce->status < PCE_STATUS_ERROR) {  			clk_disable(ce->clk);  			ce->status = PCE_STATUS_ACQUIRED;  		}  	} -	mutex_unlock(&pcd->lock); +	spin_unlock_irqrestore(&pcd->lock, flags);  	return 0;  } @@ -251,25 +256,23 @@ int pm_clk_resume(struct device *dev)  {  	struct pm_clk_data *pcd = __to_pcd(dev);  	struct pm_clock_entry *ce; +	unsigned long flags;  	dev_dbg(dev, "%s()\n", __func__);  	if (!pcd)  		return 0; -	mutex_lock(&pcd->lock); +	spin_lock_irqsave(&pcd->lock, flags);  	list_for_each_entry(ce, &pcd->clock_list, node) { -		if (ce->status == PCE_STATUS_NONE) -			pm_clk_acquire(dev, ce); -  		if (ce->status < PCE_STATUS_ERROR) {  			clk_enable(ce->clk);  			ce->status = PCE_STATUS_ENABLED;  		}  	} -	mutex_unlock(&pcd->lock); +	spin_unlock_irqrestore(&pcd->lock, flags);  	return 0;  } @@ -344,6 +347,7 @@ int pm_clk_suspend(struct device *dev)  {  	struct pm_clk_data *pcd = __to_pcd(dev);  	struct pm_clock_entry *ce; +	unsigned long flags;  	dev_dbg(dev, "%s()\n", __func__); @@ -351,12 +355,12 @@ int pm_clk_suspend(struct device *dev)  	if (!pcd || !dev->driver)  		return 0; -	mutex_lock(&pcd->lock); +	spin_lock_irqsave(&pcd->lock, flags);  	list_for_each_entry_reverse(ce, &pcd->clock_list, node)  		clk_disable(ce->clk); -	mutex_unlock(&pcd->lock); +	spin_unlock_irqrestore(&pcd->lock, flags);  	return 0;  } @@ -369,6 +373,7 @@ int pm_clk_resume(struct device *dev)  {  	struct pm_clk_data *pcd = __to_pcd(dev);  	struct pm_clock_entry *ce; +	unsigned long flags;  	dev_dbg(dev, "%s()\n", __func__); @@ -376,12 +381,12 @@ int pm_clk_resume(struct device *dev)  	if (!pcd || !dev->driver)  		return 0; -	mutex_lock(&pcd->lock); +	spin_lock_irqsave(&pcd->lock, flags);  	list_for_each_entry(ce, &pcd->clock_list, node)  		clk_enable(ce->clk); -	mutex_unlock(&pcd->lock); +	spin_unlock_irqrestore(&pcd->lock, flags);  	return 0;  } diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 0eef4da1ac6..20663f8dae4 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -168,13 +168,11 @@ struct regmap *regmap_init(struct device *dev,  	map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL);  	if (map->work_buf == NULL) {  		ret = -ENOMEM; -		goto err_bus; +		goto err_map;  	}  	return map; -err_bus: -	module_put(map->bus->owner);  err_map:  	kfree(map);  err: @@ -188,7 +186,6 @@ EXPORT_SYMBOL_GPL(regmap_init);  void regmap_exit(struct regmap *map)  {  	kfree(map->work_buf); -	module_put(map->bus->owner);  	kfree(map);  }  EXPORT_SYMBOL_GPL(regmap_exit); diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 873e2e4ac55..73b7b1a18fa 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -15,6 +15,7 @@ MODULE_LICENSE("GPL");  static int bcma_bus_match(struct device *dev, struct device_driver *drv);  static int bcma_device_probe(struct device *dev);  static int bcma_device_remove(struct device *dev); +static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env);  static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)  { @@ -49,6 +50,7 @@ static struct bus_type bcma_bus_type = {  	.match		= bcma_bus_match,  	.probe		= bcma_device_probe,  	.remove		= bcma_device_remove, +	.uevent		= bcma_device_uevent,  	.dev_attrs	= bcma_device_attrs,  }; @@ -227,6 +229,16 @@ static int bcma_device_remove(struct device *dev)  	return 0;  } +static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env) +{ +	struct bcma_device *core = container_of(dev, struct bcma_device, dev); + +	return add_uevent_var(env, +			      "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X", +			      core->id.manuf, core->id.id, +			      core->id.rev, core->id.class); +} +  static int __init bcma_modinit(void)  {  	int err; diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 7b976296b56..912f585a760 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -378,15 +378,14 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)  	 * thread.  As we have no disk yet, we are not in the IO path,  	 * not even the IO path of the peer. */  	bytes = sizeof(struct page *)*want; -	new_pages = kmalloc(bytes, GFP_KERNEL); +	new_pages = kzalloc(bytes, GFP_KERNEL);  	if (!new_pages) { -		new_pages = vmalloc(bytes); +		new_pages = vzalloc(bytes);  		if (!new_pages)  			return NULL;  		vmalloced = 1;  	} -	memset(new_pages, 0, bytes);  	if (want >= have) {  		for (i = 0; i < have; i++)  			new_pages[i] = old_pages[i]; diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index ef2ceed3be4..1706d60b8c9 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -28,7 +28,6 @@  #include <linux/compiler.h>  #include <linux/types.h> -#include <linux/version.h>  #include <linux/list.h>  #include <linux/sched.h>  #include <linux/bitops.h> @@ -928,7 +927,7 @@ struct drbd_md {  #define NL_INT64(pn,pr,member) __u64 member;  #define NL_BIT(pn,pr,member)   unsigned member:1;  #define NL_STRING(pn,pr,member,len) unsigned char member[len]; int member ## _len; -#include "linux/drbd_nl.h" +#include <linux/drbd_nl.h>  struct drbd_backing_dev {  	struct block_device *backing_bdev; diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 0feab261e29..af2a25049bc 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -94,7 +94,7 @@ static int name ## _from_tags(struct drbd_conf *mdev, \  		 arg->member ## _len = dlen; \  		 memcpy(arg->member, tags, min_t(size_t, dlen, len)); \  		 break; -#include "linux/drbd_nl.h" +#include <linux/drbd_nl.h>  /* Generate the struct to tag_list functions */  #define NL_PACKET(name, number, fields) \ @@ -129,7 +129,7 @@ name ## _to_tags(struct drbd_conf *mdev, \  	put_unaligned(arg->member ## _len, tags++);	\  	memcpy(tags, arg->member, arg->member ## _len); \  	tags = (unsigned short *)((char *)tags + arg->member ## _len); -#include "linux/drbd_nl.h" +#include <linux/drbd_nl.h>  void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name);  void drbd_nl_send_reply(struct cn_msg *, int); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 98de8f41867..9955a53733b 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4250,7 +4250,7 @@ static int __init floppy_init(void)  	use_virtual_dma = can_use_virtual_dma & 1;  	fdc_state[0].address = FDC1;  	if (fdc_state[0].address == -1) { -		del_timer(&fd_timeout); +		del_timer_sync(&fd_timeout);  		err = -ENODEV;  		goto out_unreg_region;  	} @@ -4261,7 +4261,7 @@ static int __init floppy_init(void)  	fdc = 0;		/* reset fdc in case of unexpected interrupt */  	err = floppy_grab_irq_and_dma();  	if (err) { -		del_timer(&fd_timeout); +		del_timer_sync(&fd_timeout);  		err = -EBUSY;  		goto out_unreg_region;  	} @@ -4318,7 +4318,7 @@ static int __init floppy_init(void)  		user_reset_fdc(-1, FD_RESET_ALWAYS, false);  	}  	fdc = 0; -	del_timer(&fd_timeout); +	del_timer_sync(&fd_timeout);  	current_drive = 0;  	initialized = true;  	if (have_no_fdc) { @@ -4368,7 +4368,7 @@ out_unreg_blkdev:  	unregister_blkdev(FLOPPY_MAJOR, "fd");  out_put_disk:  	while (dr--) { -		del_timer(&motor_off_timer[dr]); +		del_timer_sync(&motor_off_timer[dr]);  		if (disks[dr]->queue)  			blk_cleanup_queue(disks[dr]->queue);  		put_disk(disks[dr]); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 15f65b5f3fc..fe3c3249cec 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -192,7 +192,7 @@ static ssize_t rbd_snap_add(struct device *dev,  			    const char *buf,  			    size_t count);  static void __rbd_remove_snap_dev(struct rbd_device *rbd_dev, -				  struct rbd_snap *snap);; +				  struct rbd_snap *snap);  static struct rbd_device *dev_to_rbd(struct device *dev) diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 2330a9ad5e9..1540792b1e5 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -396,7 +396,7 @@ static int xen_blkbk_map(struct blkif_request *req,  			continue;  		ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr), -			blkbk->pending_page(pending_req, i), false); +			blkbk->pending_page(pending_req, i), NULL);  		if (ret) {  			pr_alert(DRV_PFX "Failed to install M2P override for %lx (ret: %d)\n",  				 (unsigned long)map[i].dev_bus_addr, ret); diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 9e40b283a46..c4bd34063ec 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -27,7 +27,6 @@  #ifndef __XEN_BLKIF__BACKEND__COMMON_H__  #define __XEN_BLKIF__BACKEND__COMMON_H__ -#include <linux/version.h>  #include <linux/module.h>  #include <linux/interrupt.h>  #include <linux/slab.h> @@ -46,7 +45,7 @@  #define DRV_PFX "xen-blkback:"  #define DPRINTK(fmt, args...)				\ -	pr_debug(DRV_PFX "(%s:%d) " fmt ".\n",	\ +	pr_debug(DRV_PFX "(%s:%d) " fmt ".\n",		\  		 __func__, __LINE__, ##args) diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 3f129b45451..5fd2010f7d2 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -590,7 +590,7 @@ static void frontend_changed(struct xenbus_device *dev,  		/*  		 * Enforce precondition before potential leak point. -		 * blkif_disconnect() is idempotent. +		 * xen_blkif_disconnect() is idempotent.  		 */  		xen_blkif_disconnect(be->blkif); @@ -601,17 +601,17 @@ static void frontend_changed(struct xenbus_device *dev,  		break;  	case XenbusStateClosing: -		xen_blkif_disconnect(be->blkif);  		xenbus_switch_state(dev, XenbusStateClosing);  		break;  	case XenbusStateClosed: +		xen_blkif_disconnect(be->blkif);  		xenbus_switch_state(dev, XenbusStateClosed);  		if (xenbus_dev_is_online(dev))  			break;  		/* fall through if not online */  	case XenbusStateUnknown: -		/* implies blkif_disconnect() via blkback_remove() */ +		/* implies xen_blkif_disconnect() via xen_blkbk_remove() */  		device_unregister(&dev->dev);  		break; diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index a5854735bb2..db7cb8111fb 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -63,6 +63,7 @@ static struct usb_device_id ath3k_table[] = {  	/* Atheros AR3011 with sflash firmware*/  	{ USB_DEVICE(0x0CF3, 0x3002) },  	{ USB_DEVICE(0x13d3, 0x3304) }, +	{ USB_DEVICE(0x0930, 0x0215) },  	/* Atheros AR9285 Malbec with sflash firmware */  	{ USB_DEVICE(0x03F0, 0x311D) }, diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 7f521d4ac65..c827d737cce 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -81,7 +81,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8787 = {  	.io_port_2 = 0x7a,  }; -static const struct btmrvl_sdio_device btmrvl_sdio_sd6888 = { +static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {  	.helper		= "sd8688_helper.bin",  	.firmware	= "sd8688.bin",  	.reg		= &btmrvl_reg_8688, @@ -98,7 +98,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {  static const struct sdio_device_id btmrvl_sdio_ids[] = {  	/* Marvell SD8688 Bluetooth device */  	{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105), -			.driver_data = (unsigned long) &btmrvl_sdio_sd6888 }, +			.driver_data = (unsigned long) &btmrvl_sdio_sd8688 },  	/* Marvell SD8787 Bluetooth device */  	{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),  			.driver_data = (unsigned long) &btmrvl_sdio_sd8787 }, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 91d13a9e8c6..9cbac6b445e 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -72,9 +72,15 @@ static struct usb_device_id btusb_table[] = {  	/* Apple MacBookAir3,1, MacBookAir3,2 */  	{ USB_DEVICE(0x05ac, 0x821b) }, +	/* Apple MacBookAir4,1 */ +	{ USB_DEVICE(0x05ac, 0x821f) }, +  	/* Apple MacBookPro8,2 */  	{ USB_DEVICE(0x05ac, 0x821a) }, +	/* Apple MacMini5,1 */ +	{ USB_DEVICE(0x05ac, 0x8281) }, +  	/* AVM BlueFRITZ! USB v2.0 */  	{ USB_DEVICE(0x057c, 0x3800) }, @@ -106,6 +112,7 @@ static struct usb_device_id blacklist_table[] = {  	/* Atheros 3011 with sflash firmware */  	{ USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },  	{ USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE }, +	{ USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },  	/* Atheros AR9285 Malbec with sflash firmware */  	{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, @@ -256,7 +263,9 @@ static void btusb_intr_complete(struct urb *urb)  	err = usb_submit_urb(urb, GFP_ATOMIC);  	if (err < 0) { -		if (err != -EPERM) +		/* -EPERM: urb is being killed; +		 * -ENODEV: device got disconnected */ +		if (err != -EPERM && err != -ENODEV)  			BT_ERR("%s urb %p failed to resubmit (%d)",  						hdev->name, urb, -err);  		usb_unanchor_urb(urb); @@ -341,7 +350,9 @@ static void btusb_bulk_complete(struct urb *urb)  	err = usb_submit_urb(urb, GFP_ATOMIC);  	if (err < 0) { -		if (err != -EPERM) +		/* -EPERM: urb is being killed; +		 * -ENODEV: device got disconnected */ +		if (err != -EPERM && err != -ENODEV)  			BT_ERR("%s urb %p failed to resubmit (%d)",  						hdev->name, urb, -err);  		usb_unanchor_urb(urb); @@ -431,7 +442,9 @@ static void btusb_isoc_complete(struct urb *urb)  	err = usb_submit_urb(urb, GFP_ATOMIC);  	if (err < 0) { -		if (err != -EPERM) +		/* -EPERM: urb is being killed; +		 * -ENODEV: device got disconnected */ +		if (err != -EPERM && err != -ENODEV)  			BT_ERR("%s urb %p failed to resubmit (%d)",  						hdev->name, urb, -err);  		usb_unanchor_urb(urb); diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c index 65d27aff553..04d353f58d7 100644 --- a/drivers/bluetooth/btwilink.c +++ b/drivers/bluetooth/btwilink.c @@ -125,6 +125,13 @@ static long st_receive(void *priv_data, struct sk_buff *skb)  /* protocol structure registered with shared transport */  static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {  	{ +		.chnl_id = HCI_EVENT_PKT, /* HCI Events */ +		.hdr_len = sizeof(struct hci_event_hdr), +		.offset_len_in_hdr = offsetof(struct hci_event_hdr, plen), +		.len_size = 1, /* sizeof(plen) in struct hci_event_hdr */ +		.reserve = 8, +	}, +	{  		.chnl_id = HCI_ACLDATA_PKT, /* ACL */  		.hdr_len = sizeof(struct hci_acl_hdr),  		.offset_len_in_hdr = offsetof(struct hci_acl_hdr, dlen), @@ -138,13 +145,6 @@ static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {  		.len_size = 1, /* sizeof(dlen) in struct hci_sco_hdr */  		.reserve = 8,  	}, -	{ -		.chnl_id = HCI_EVENT_PKT, /* HCI Events */ -		.hdr_len = sizeof(struct hci_event_hdr), -		.offset_len_in_hdr = offsetof(struct hci_event_hdr, plen), -		.len_size = 1, /* sizeof(plen) in struct hci_event_hdr */ -		.reserve = 8, -	},  };  /* Called from HCI core to initialize the device */ @@ -240,7 +240,7 @@ static int ti_st_close(struct hci_dev *hdev)  	if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))  		return 0; -	for (i = 0; i < MAX_BT_CHNL_IDS; i++) { +	for (i = MAX_BT_CHNL_IDS-1; i >= 0; i--) {  		err = st_unregister(&ti_st_proto[i]);  		if (err)  			BT_ERR("st_unregister(%d) failed with error %d", diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c index f27d0d0816d..4b71647782d 100644 --- a/drivers/char/agp/backend.c +++ b/drivers/char/agp/backend.c @@ -171,7 +171,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)  	}  	got_gatt = 1; -	bridge->key_list = vmalloc(PAGE_SIZE * 4); +	bridge->key_list = vzalloc(PAGE_SIZE * 4);  	if (bridge->key_list == NULL) {  		dev_err(&bridge->dev->dev,  			"can't allocate memory for key lists\n"); @@ -181,7 +181,6 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)  	got_keylist = 1;  	/* FIXME vmalloc'd memory not guaranteed contiguous */ -	memset(bridge->key_list, 0, PAGE_SIZE * 4);  	if (bridge->driver->configure()) {  		dev_err(&bridge->dev->dev, "error configuring host chipset\n"); diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c index a7346ab97a3..f4837a893df 100644 --- a/drivers/char/apm-emulation.c +++ b/drivers/char/apm-emulation.c @@ -40,10 +40,7 @@  #define APM_MINOR_DEV	134  /* - * See Documentation/Config.help for the configuration options. - * - * Various options can be changed at boot time as follows: - * (We allow underscores for compatibility with the modules code) + * One option can be changed at boot time as follows:   *	apm=on/off			enable/disable APM   */ @@ -300,17 +297,13 @@ apm_ioctl(struct file *filp, u_int cmd, u_long arg)  			/*  			 * Wait for the suspend/resume to complete.  If there  			 * are pending acknowledges, we wait here for them. +			 * wait_event_freezable() is interruptible and pending +			 * signal can cause busy looping.  We aren't doing +			 * anything critical, chill a bit on each iteration.  			 */ -			freezer_do_not_count(); - -			wait_event(apm_suspend_waitqueue, -				   as->suspend_state == SUSPEND_DONE); - -			/* -			 * Since we are waiting until the suspend is done, the -			 * try_to_freeze() in freezer_count() will not trigger -			 */ -			freezer_count(); +			while (wait_event_freezable(apm_suspend_waitqueue, +					as->suspend_state == SUSPEND_DONE)) +				msleep(10);  			break;  		case SUSPEND_ACKTO:  			as->suspend_result = -ETIMEDOUT; diff --git a/drivers/char/msm_smd_pkt.c b/drivers/char/msm_smd_pkt.c index b6f8a65c996..8eca55deb3a 100644 --- a/drivers/char/msm_smd_pkt.c +++ b/drivers/char/msm_smd_pkt.c @@ -379,9 +379,8 @@ static int __init smd_pkt_init(void)  	for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) {  		smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev),  					  GFP_KERNEL); -		if (IS_ERR(smd_pkt_devp[i])) { -			r = PTR_ERR(smd_pkt_devp[i]); -			pr_err("kmalloc() failed %d\n", r); +		if (!smd_pkt_devp[i]) { +			pr_err("kmalloc() failed\n");  			goto clean_cdevs;  		} diff --git a/drivers/char/raw.c b/drivers/char/raw.c index b33e8ea314e..b6de2c04714 100644 --- a/drivers/char/raw.c +++ b/drivers/char/raw.c @@ -324,13 +324,12 @@ static int __init raw_init(void)  		max_raw_minors = MAX_RAW_MINORS;  	} -	raw_devices = vmalloc(sizeof(struct raw_device_data) * max_raw_minors); +	raw_devices = vzalloc(sizeof(struct raw_device_data) * max_raw_minors);  	if (!raw_devices) {  		printk(KERN_ERR "Not enough memory for raw device structures\n");  		ret = -ENOMEM;  		goto error;  	} -	memset(raw_devices, 0, sizeof(struct raw_device_data) * max_raw_minors);  	ret = register_chrdev_region(dev, max_raw_minors, "raw");  	if (ret) diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c index dfa8b3062fd..ccd124ab7ca 100644 --- a/drivers/char/rtc.c +++ b/drivers/char/rtc.c @@ -80,6 +80,7 @@  #include <linux/bcd.h>  #include <linux/delay.h>  #include <linux/uaccess.h> +#include <linux/ratelimit.h>  #include <asm/current.h>  #include <asm/system.h> @@ -1195,10 +1196,8 @@ static void rtc_dropped_irq(unsigned long data)  	spin_unlock_irq(&rtc_lock); -	if (printk_ratelimit()) { -		printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", -			freq); -	} +	printk_ratelimited(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", +			   freq);  	/* Now we have new data */  	wake_up_interruptible(&rtc_wait); diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index f6595aba4f0..fa567f1158c 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -43,6 +43,7 @@ config TCG_NSC  config TCG_ATMEL  	tristate "Atmel TPM Interface" +	depends on PPC64 || HAS_IOPORT  	---help---  	  If you have a TPM security chip from Atmel say Yes and it   	  will be accessible from within Linux.  To compile this driver  diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index caf8012ef47..361a1dff8f7 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c @@ -383,6 +383,9 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,  	u32 count, ordinal;  	unsigned long stop; +	if (bufsiz > TPM_BUFSIZE) +		bufsiz = TPM_BUFSIZE; +  	count = be32_to_cpu(*((__be32 *) (buf + 2)));  	ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));  	if (count == 0) @@ -963,6 +966,9 @@ ssize_t tpm_show_durations(struct device *dev, struct device_attribute *attr,  {  	struct tpm_chip *chip = dev_get_drvdata(dev); +	if (chip->vendor.duration[TPM_LONG] == 0) +		return 0; +  	return sprintf(buf, "%d %d %d [%s]\n",  		       jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),  		       jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]), @@ -1102,6 +1108,7 @@ ssize_t tpm_read(struct file *file, char __user *buf,  {  	struct tpm_chip *chip = file->private_data;  	ssize_t ret_size; +	int rc;  	del_singleshot_timer_sync(&chip->user_read_timer);  	flush_work_sync(&chip->work); @@ -1112,8 +1119,11 @@ ssize_t tpm_read(struct file *file, char __user *buf,  			ret_size = size;  		mutex_lock(&chip->buffer_mutex); -		if (copy_to_user(buf, chip->data_buffer, ret_size)) +		rc = copy_to_user(buf, chip->data_buffer, ret_size); +		memset(chip->data_buffer, 0, ret_size); +		if (rc)  			ret_size = -EFAULT; +  		mutex_unlock(&chip->buffer_mutex);  	} diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c index 82facc9104c..4d2464871ad 100644 --- a/drivers/char/tpm/tpm_nsc.c +++ b/drivers/char/tpm/tpm_nsc.c @@ -396,8 +396,6 @@ static void __exit cleanup_nsc(void)  	if (pdev) {  		tpm_nsc_remove(&pdev->dev);  		platform_device_unregister(pdev); -		kfree(pdev); -		pdev = NULL;  	}  	platform_driver_unregister(&nsc_drv); diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index dc7c033ef58..32a77becc09 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -26,6 +26,7 @@  #include <linux/clk.h>  #include <linux/irq.h>  #include <linux/err.h> +#include <linux/delay.h>  #include <linux/clocksource.h>  #include <linux/clockchips.h>  #include <linux/sh_timer.h> @@ -150,13 +151,13 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)  static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)  { -	int ret; +	int k, ret;  	/* enable clock */  	ret = clk_enable(p->clk);  	if (ret) {  		dev_err(&p->pdev->dev, "cannot enable clock\n"); -		return ret; +		goto err0;  	}  	/* make sure channel is disabled */ @@ -174,9 +175,38 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)  	sh_cmt_write(p, CMCOR, 0xffffffff);  	sh_cmt_write(p, CMCNT, 0); +	/* +	 * According to the sh73a0 user's manual, as CMCNT can be operated +	 * only by the RCLK (Pseudo 32 KHz), there's one restriction on +	 * modifying CMCNT register; two RCLK cycles are necessary before +	 * this register is either read or any modification of the value +	 * it holds is reflected in the LSI's actual operation. +	 * +	 * While at it, we're supposed to clear out the CMCNT as of this +	 * moment, so make sure it's processed properly here.  This will +	 * take RCLKx2 at maximum. +	 */ +	for (k = 0; k < 100; k++) { +		if (!sh_cmt_read(p, CMCNT)) +			break; +		udelay(1); +	} + +	if (sh_cmt_read(p, CMCNT)) { +		dev_err(&p->pdev->dev, "cannot clear CMCNT\n"); +		ret = -ETIMEDOUT; +		goto err1; +	} +  	/* enable channel */  	sh_cmt_start_stop_ch(p, 1);  	return 0; + err1: +	/* stop clock */ +	clk_disable(p->clk); + + err0: +	return ret;  }  static void sh_cmt_disable(struct sh_cmt_priv *p) diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 891360edecd..629b3ec698e 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -725,7 +725,7 @@ static int __init cpufreq_gov_dbs_init(void)  		dbs_tuners_ins.down_differential =  					MICRO_FREQUENCY_DOWN_DIFFERENTIAL;  		/* -		 * In no_hz/micro accounting case we set the minimum frequency +		 * In nohz/micro accounting case we set the minimum frequency  		 * not depending on HZ, but fixed (very low). The deferred  		 * timer might skip some samples if idle/sleeping as needed.  		*/ diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 7b0603eb012..cdc02ac8f41 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -261,6 +261,9 @@ static int pcc_get_offset(int cpu)  	pr = per_cpu(processors, cpu);  	pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); +	if (!pr) +		return -ENODEV; +  	status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer);  	if (ACPI_FAILURE(status))  		return -ENODEV; diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index cd3a7c726bf..467e4dcb20a 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -174,8 +174,10 @@ struct d40_base;   * @tasklet: Tasklet that gets scheduled from interrupt context to complete a   * transfer and call client callback.   * @client: Cliented owned descriptor list. + * @pending_queue: Submitted jobs, to be issued by issue_pending()   * @active: Active descriptor.   * @queue: Queued jobs. + * @prepare_queue: Prepared jobs.   * @dma_cfg: The client configuration of this dma channel.   * @configured: whether the dma_cfg configuration is valid   * @base: Pointer to the device instance struct. @@ -203,6 +205,7 @@ struct d40_chan {  	struct list_head		 pending_queue;  	struct list_head		 active;  	struct list_head		 queue; +	struct list_head		 prepare_queue;  	struct stedma40_chan_cfg	 dma_cfg;  	bool				 configured;  	struct d40_base			*base; @@ -477,7 +480,6 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)  		list_for_each_entry_safe(d, _d, &d40c->client, node)  			if (async_tx_test_ack(&d->txd)) { -				d40_pool_lli_free(d40c, d);  				d40_desc_remove(d);  				desc = d;  				memset(desc, 0, sizeof(*desc)); @@ -644,8 +646,11 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)  	return d;  } +/* remove desc from current queue and add it to the pending_queue */  static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)  { +	d40_desc_remove(desc); +	desc->is_in_client_list = false;  	list_add_tail(&desc->node, &d40c->pending_queue);  } @@ -803,6 +808,7 @@ done:  static void d40_term_all(struct d40_chan *d40c)  {  	struct d40_desc *d40d; +	struct d40_desc *_d;  	/* Release active descriptors */  	while ((d40d = d40_first_active_get(d40c))) { @@ -822,6 +828,21 @@ static void d40_term_all(struct d40_chan *d40c)  		d40_desc_free(d40c, d40d);  	} +	/* Release client owned descriptors */ +	if (!list_empty(&d40c->client)) +		list_for_each_entry_safe(d40d, _d, &d40c->client, node) { +			d40_desc_remove(d40d); +			d40_desc_free(d40c, d40d); +		} + +	/* Release descriptors in prepare queue */ +	if (!list_empty(&d40c->prepare_queue)) +		list_for_each_entry_safe(d40d, _d, +					 &d40c->prepare_queue, node) { +			d40_desc_remove(d40d); +			d40_desc_free(d40c, d40d); +		} +  	d40c->pending_tx = 0;  	d40c->busy = false;  } @@ -1208,7 +1229,6 @@ static void dma_tasklet(unsigned long data)  	if (!d40d->cyclic) {  		if (async_tx_test_ack(&d40d->txd)) { -			d40_pool_lli_free(d40c, d40d);  			d40_desc_remove(d40d);  			d40_desc_free(d40c, d40d);  		} else { @@ -1595,21 +1615,10 @@ static int d40_free_dma(struct d40_chan *d40c)  	u32 event;  	struct d40_phy_res *phy = d40c->phy_chan;  	bool is_src; -	struct d40_desc *d; -	struct d40_desc *_d; -  	/* Terminate all queued and active transfers */  	d40_term_all(d40c); -	/* Release client owned descriptors */ -	if (!list_empty(&d40c->client)) -		list_for_each_entry_safe(d, _d, &d40c->client, node) { -			d40_pool_lli_free(d40c, d); -			d40_desc_remove(d); -			d40_desc_free(d40c, d); -		} -  	if (phy == NULL) {  		chan_err(d40c, "phy == null\n");  		return -EINVAL; @@ -1911,6 +1920,12 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,  		goto err;  	} +	/* +	 * add descriptor to the prepare queue in order to be able +	 * to free them later in terminate_all +	 */ +	list_add_tail(&desc->node, &chan->prepare_queue); +  	spin_unlock_irqrestore(&chan->lock, flags);  	return &desc->txd; @@ -2400,6 +2415,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,  		INIT_LIST_HEAD(&d40c->queue);  		INIT_LIST_HEAD(&d40c->pending_queue);  		INIT_LIST_HEAD(&d40c->client); +		INIT_LIST_HEAD(&d40c->prepare_queue);  		tasklet_init(&d40c->tasklet, dma_tasklet,  			     (unsigned long) d40c); diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 57cd3a406ed..fd7170a9ad2 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -290,6 +290,9 @@ static const struct {  	{PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,  		QUIRK_CYCLE_TIMER}, +	{PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID, +		QUIRK_NO_MSI}, +  	{PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,  		QUIRK_CYCLE_TIMER}, diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 41841a3e3f9..17cef864506 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -1198,6 +1198,10 @@ static int sbp2_remove(struct device *dev)  {  	struct fw_unit *unit = fw_unit(dev);  	struct sbp2_target *tgt = dev_get_drvdata(&unit->device); +	struct sbp2_logical_unit *lu; + +	list_for_each_entry(lu, &tgt->lu_list, link) +		cancel_delayed_work_sync(&lu->work);  	sbp2_target_put(tgt);  	return 0; diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c index 68810fd1a59..aa83de9db1b 100644 --- a/drivers/firmware/google/gsmi.c +++ b/drivers/firmware/google/gsmi.c @@ -420,7 +420,7 @@ static efi_status_t gsmi_get_next_variable(unsigned long *name_size,  static efi_status_t gsmi_set_variable(efi_char16_t *name,  				      efi_guid_t *vendor, -				      unsigned long attr, +				      u32 attr,  				      unsigned long data_size,  				      void *data)  { diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c index 231714def4d..4e24436b0f8 100644 --- a/drivers/gpio/gpio-generic.c +++ b/drivers/gpio/gpio-generic.c @@ -351,7 +351,7 @@ static int bgpio_setup_direction(struct bgpio_chip *bgc,  	return 0;  } -int __devexit bgpio_remove(struct bgpio_chip *bgc) +int bgpio_remove(struct bgpio_chip *bgc)  {  	int err = gpiochip_remove(&bgc->gc); @@ -361,15 +361,10 @@ int __devexit bgpio_remove(struct bgpio_chip *bgc)  }  EXPORT_SYMBOL_GPL(bgpio_remove); -int __devinit bgpio_init(struct bgpio_chip *bgc, -			 struct device *dev, -			 unsigned long sz, -			 void __iomem *dat, -			 void __iomem *set, -			 void __iomem *clr, -			 void __iomem *dirout, -			 void __iomem *dirin, -			 bool big_endian) +int bgpio_init(struct bgpio_chip *bgc, struct device *dev, +	       unsigned long sz, void __iomem *dat, void __iomem *set, +	       void __iomem *clr, void __iomem *dirout, void __iomem *dirin, +	       bool big_endian)  {  	int ret; diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 0599854e221..118ec12d2d5 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -34,8 +34,8 @@ struct gpio_bank {  	u16 irq;  	u16 virtual_irq_start;  	int method; -#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)  	u32 suspend_wakeup; +#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)  	u32 saved_wakeup;  #endif  	u32 non_wakeup_gpios; diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index c43b8ff626a..0550dcb8581 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -577,6 +577,7 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)  void  pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)  { +	*gpio_base = -1;  }  #endif diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 82db1850666..fe738f05309 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -499,6 +499,7 @@ void drm_connector_cleanup(struct drm_connector *connector)  	mutex_lock(&dev->mode_config.mutex);  	drm_mode_object_put(dev, &connector->base);  	list_del(&connector->head); +	dev->mode_config.num_connector--;  	mutex_unlock(&dev->mode_config.mutex);  }  EXPORT_SYMBOL(drm_connector_cleanup); @@ -529,6 +530,7 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)  	mutex_lock(&dev->mode_config.mutex);  	drm_mode_object_put(dev, &encoder->base);  	list_del(&encoder->head); +	dev->mode_config.num_encoder--;  	mutex_unlock(&dev->mode_config.mutex);  }  EXPORT_SYMBOL(drm_encoder_cleanup); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 802b61ac313..f7c6854eb4d 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -256,7 +256,6 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,  {  	printk(KERN_ERR "panic occurred, switching back to text console\n");  	return drm_fb_helper_force_kernel_mode(); -	return 0;  }  EXPORT_SYMBOL(drm_fb_helper_panic); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ce045a8cf82..f07e4252b70 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -67,11 +67,11 @@ module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);  MODULE_PARM_DESC(i915_enable_rc6,  		"Enable power-saving render C-state 6 (default: true)"); -unsigned int i915_enable_fbc __read_mostly = 1; +unsigned int i915_enable_fbc __read_mostly = -1;  module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);  MODULE_PARM_DESC(i915_enable_fbc,  		"Enable frame buffer compression for power savings " -		"(default: false)"); +		"(default: -1 (use per-chip default))");  unsigned int i915_lvds_downclock __read_mostly = 0;  module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ee1d701317f..04411ad2e77 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -878,7 +878,7 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv,  	int pp_reg, lvds_reg;  	u32 val;  	enum pipe panel_pipe = PIPE_A; -	bool locked = locked; +	bool locked = true;  	if (HAS_PCH_SPLIT(dev_priv->dev)) {  		pp_reg = PCH_PP_CONTROL; @@ -1799,6 +1799,7 @@ static void intel_update_fbc(struct drm_device *dev)  	struct drm_framebuffer *fb;  	struct intel_framebuffer *intel_fb;  	struct drm_i915_gem_object *obj; +	int enable_fbc;  	DRM_DEBUG_KMS("\n"); @@ -1839,8 +1840,15 @@ static void intel_update_fbc(struct drm_device *dev)  	intel_fb = to_intel_framebuffer(fb);  	obj = intel_fb->obj; -	if (!i915_enable_fbc) { -		DRM_DEBUG_KMS("fbc disabled per module param (default off)\n"); +	enable_fbc = i915_enable_fbc; +	if (enable_fbc < 0) { +		DRM_DEBUG_KMS("fbc set to per-chip default\n"); +		enable_fbc = 1; +		if (INTEL_INFO(dev)->gen <= 5) +			enable_fbc = 0; +	} +	if (!enable_fbc) { +		DRM_DEBUG_KMS("fbc disabled per module param\n");  		dev_priv->no_fbc_reason = FBC_MODULE_PARAM;  		goto out_disable;  	} @@ -4687,13 +4695,13 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,  		bpc = 6; /* min is 18bpp */  		break;  	case 24: -		bpc = min((unsigned int)8, display_bpc); +		bpc = 8;  		break;  	case 30: -		bpc = min((unsigned int)10, display_bpc); +		bpc = 10;  		break;  	case 48: -		bpc = min((unsigned int)12, display_bpc); +		bpc = 12;  		break;  	default:  		DRM_DEBUG("unsupported depth, assuming 24 bits\n"); @@ -4701,10 +4709,12 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,  		break;  	} +	display_bpc = min(display_bpc, bpc); +  	DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",  			 bpc, display_bpc); -	*pipe_bpp = bpc * 3; +	*pipe_bpp = display_bpc * 3;  	return display_bpc != bpc;  } @@ -7238,8 +7248,6 @@ static void intel_setup_outputs(struct drm_device *dev)  			intel_encoder_clones(dev, encoder->clone_mask);  	} -	intel_panel_setup_backlight(dev); -  	/* disable all the possible outputs/crtcs before entering KMS mode */  	drm_helper_disable_unused_functions(dev);  } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0b2ee9d3998..fe1099d8817 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -337,9 +337,6 @@ extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,  					   struct drm_connector *connector,  					   struct intel_load_detect_pipe *old); -extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); -extern int intel_sdvo_supports_hotplug(struct drm_connector *connector); -extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);  extern void intelfb_restore(void);  extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,  				    u16 blue, int regno); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 30fe554d893..6348c499616 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -92,6 +92,11 @@ struct intel_sdvo {  	*/  	uint16_t attached_output; +	/* +	 * Hotplug activation bits for this device +	 */ +	uint8_t hotplug_active[2]; +  	/**  	 * This is used to select the color range of RBG outputs in HDMI mode.  	 * It is only valid when using TMDS encoding and 8 bit per color mode. @@ -1208,74 +1213,20 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in  	return true;  } -/* No use! */ -#if 0 -struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) -{ -	struct drm_connector *connector = NULL; -	struct intel_sdvo *iout = NULL; -	struct intel_sdvo *sdvo; - -	/* find the sdvo connector */ -	list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -		iout = to_intel_sdvo(connector); - -		if (iout->type != INTEL_OUTPUT_SDVO) -			continue; - -		sdvo = iout->dev_priv; - -		if (sdvo->sdvo_reg == SDVOB && sdvoB) -			return connector; - -		if (sdvo->sdvo_reg == SDVOC && !sdvoB) -			return connector; - -	} - -	return NULL; -} - -int intel_sdvo_supports_hotplug(struct drm_connector *connector) +static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)  {  	u8 response[2]; -	u8 status; -	struct intel_sdvo *intel_sdvo; -	DRM_DEBUG_KMS("\n"); - -	if (!connector) -		return 0; - -	intel_sdvo = to_intel_sdvo(connector);  	return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,  				    &response, 2) && response[0];  } -void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) +static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)  { -	u8 response[2]; -	u8 status; -	struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector); - -	intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); -	intel_sdvo_read_response(intel_sdvo, &response, 2); - -	if (on) { -		intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); -		status = intel_sdvo_read_response(intel_sdvo, &response, 2); - -		intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); -	} else { -		response[0] = 0; -		response[1] = 0; -		intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); -	} +	struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); -	intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); -	intel_sdvo_read_response(intel_sdvo, &response, 2); +	intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2);  } -#endif  static bool  intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) @@ -2045,6 +1996,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)  {  	struct drm_encoder *encoder = &intel_sdvo->base.base;  	struct drm_connector *connector; +	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);  	struct intel_connector *intel_connector;  	struct intel_sdvo_connector *intel_sdvo_connector; @@ -2062,7 +2014,17 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)  	intel_connector = &intel_sdvo_connector->base;  	connector = &intel_connector->base; -	connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; +	if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) { +		connector->polled = DRM_CONNECTOR_POLL_HPD; +		intel_sdvo->hotplug_active[0] |= 1 << device; +		/* Some SDVO devices have one-shot hotplug interrupts. +		 * Ensure that they get re-enabled when an interrupt happens. +		 */ +		intel_encoder->hot_plug = intel_sdvo_enable_hotplug; +		intel_sdvo_enable_hotplug(intel_encoder); +	} +	else +		connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;  	encoder->encoder_type = DRM_MODE_ENCODER_TMDS;  	connector->connector_type = DRM_MODE_CONNECTOR_DVID; @@ -2569,6 +2531,14 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)  	if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))  		goto err; +	/* Set up hotplug command - note paranoia about contents of reply. +	 * We assume that the hardware is in a sane state, and only touch +	 * the bits we think we understand. +	 */ +	intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, +			     &intel_sdvo->hotplug_active, 2); +	intel_sdvo->hotplug_active[0] &= ~0x3; +  	if (intel_sdvo_output_setup(intel_sdvo,  				    intel_sdvo->caps.output_flags) != true) {  		DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 8d02d875376..c919cfc8f2f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -530,7 +530,8 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)  		nouveau_gpuobj_ref(NULL, &obj);  		if (ret)  			return ret; -	} else { +	} else +	if (USE_SEMA(dev)) {  		/* map fence bo into channel's vm */  		ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,  					 &chan->fence.vma); diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index c444cadbf84..2706cb3d871 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -37,8 +37,11 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,  		return -ENOMEM;  	nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL); -	if (!nvbe->ttm_alloced) +	if (!nvbe->ttm_alloced) { +		kfree(nvbe->pages); +		nvbe->pages = NULL;  		return -ENOMEM; +	}  	nvbe->nr_pages = 0;  	while (num_pages--) { @@ -126,7 +129,7 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)  		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {  			nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); -			dma_offset += NV_CTXDMA_PAGE_SIZE; +			offset_l += NV_CTXDMA_PAGE_SIZE;  		}  	} diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index 118261d4927..5e45398a9e2 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -781,11 +781,20 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,  	struct drm_device *dev = crtc->dev;  	struct drm_nouveau_private *dev_priv = dev->dev_private;  	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; -	struct drm_framebuffer *drm_fb = nv_crtc->base.fb; -	struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); +	struct drm_framebuffer *drm_fb; +	struct nouveau_framebuffer *fb;  	int arb_burst, arb_lwm;  	int ret; +	NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); + +	/* no fb bound */ +	if (!atomic && !crtc->fb) { +		NV_DEBUG_KMS(dev, "No FB bound\n"); +		return 0; +	} + +  	/* If atomic, we want to switch to the fb we were passed, so  	 * now we update pointers to do that.  (We don't pin; just  	 * assume we're already pinned and update the base address.) @@ -794,6 +803,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,  		drm_fb = passed_fb;  		fb = nouveau_framebuffer(passed_fb);  	} else { +		drm_fb = crtc->fb; +		fb = nouveau_framebuffer(crtc->fb);  		/* If not atomic, we can go ahead and pin, and unpin the  		 * old fb we were passed.  		 */ diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 46ad59ea218..5d989073ba6 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -519,12 +519,18 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,  	struct drm_device *dev = nv_crtc->base.dev;  	struct drm_nouveau_private *dev_priv = dev->dev_private;  	struct nouveau_channel *evo = nv50_display(dev)->master; -	struct drm_framebuffer *drm_fb = nv_crtc->base.fb; -	struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); +	struct drm_framebuffer *drm_fb; +	struct nouveau_framebuffer *fb;  	int ret;  	NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); +	/* no fb bound */ +	if (!atomic && !crtc->fb) { +		NV_DEBUG_KMS(dev, "No FB bound\n"); +		return 0; +	} +  	/* If atomic, we want to switch to the fb we were passed, so  	 * now we update pointers to do that.  (We don't pin; just  	 * assume we're already pinned and update the base address.) @@ -533,6 +539,8 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,  		drm_fb = passed_fb;  		fb = nouveau_framebuffer(passed_fb);  	} else { +		drm_fb = crtc->fb; +		fb = nouveau_framebuffer(crtc->fb);  		/* If not atomic, we can go ahead and pin, and unpin the  		 * old fb we were passed.  		 */ diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index e88c64417a8..14cc88aaf3a 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -277,7 +277,12 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,  	case ATOM_ARG_FB:  		idx = U8(*ptr);  		(*ptr)++; -		val = gctx->scratch[((gctx->fb_base + idx) / 4)]; +		if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { +			DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n", +				  gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); +			val = 0; +		} else +			val = gctx->scratch[(gctx->fb_base / 4) + idx];  		if (print)  			DEBUG("FB[0x%02X]", idx);  		break; @@ -531,7 +536,11 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,  	case ATOM_ARG_FB:  		idx = U8(*ptr);  		(*ptr)++; -		gctx->scratch[((gctx->fb_base + idx) / 4)] = val; +		if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { +			DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n", +				  gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); +		} else +			gctx->scratch[(gctx->fb_base / 4) + idx] = val;  		DEBUG("FB[0x%02X]", idx);  		break;  	case ATOM_ARG_PLL: @@ -1370,11 +1379,13 @@ int atom_allocate_fb_scratch(struct atom_context *ctx)  		usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;  	} +	ctx->scratch_size_bytes = 0;  	if (usage_bytes == 0)  		usage_bytes = 20 * 1024;  	/* allocate some scratch memory */  	ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);  	if (!ctx->scratch)  		return -ENOMEM; +	ctx->scratch_size_bytes = usage_bytes;  	return 0;  } diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h index a589a55b223..93cfe2086ba 100644 --- a/drivers/gpu/drm/radeon/atom.h +++ b/drivers/gpu/drm/radeon/atom.h @@ -137,6 +137,7 @@ struct atom_context {  	int cs_equal, cs_above;  	int io_mode;  	uint32_t *scratch; +	int scratch_size_bytes;  };  extern int atom_debug; diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index c742944d380..a515b2a09d8 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -466,7 +466,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,  			return;  		}  		args.v2.ucEnable = enable; -		if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK)) +		if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev))  			args.v2.ucEnable = ATOM_DISABLE;  	} else if (ASIC_IS_DCE3(rdev)) {  		args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 7ad43c6b1db..79e8ebc0530 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -115,6 +115,7 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,  	u8 msg[20];  	int msg_bytes = send_bytes + 4;  	u8 ack; +	unsigned retry;  	if (send_bytes > 16)  		return -1; @@ -125,20 +126,22 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,  	msg[3] = (msg_bytes << 4) | (send_bytes - 1);  	memcpy(&msg[4], send, send_bytes); -	while (1) { +	for (retry = 0; retry < 4; retry++) {  		ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,  					    msg, msg_bytes, NULL, 0, delay, &ack); -		if (ret < 0) +		if (ret == -EBUSY) +			continue; +		else if (ret < 0)  			return ret;  		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) -			break; +			return send_bytes;  		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)  			udelay(400);  		else  			return -EIO;  	} -	return send_bytes; +	return -EIO;  }  static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, @@ -149,26 +152,31 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,  	int msg_bytes = 4;  	u8 ack;  	int ret; +	unsigned retry;  	msg[0] = address;  	msg[1] = address >> 8;  	msg[2] = AUX_NATIVE_READ << 4;  	msg[3] = (msg_bytes << 4) | (recv_bytes - 1); -	while (1) { +	for (retry = 0; retry < 4; retry++) {  		ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,  					    msg, msg_bytes, recv, recv_bytes, delay, &ack); -		if (ret == 0) -			return -EPROTO; -		if (ret < 0) +		if (ret == -EBUSY) +			continue; +		else if (ret < 0)  			return ret;  		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)  			return ret;  		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)  			udelay(400); +		else if (ret == 0) +			return -EPROTO;  		else  			return -EIO;  	} + +	return -EIO;  }  static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector, @@ -232,7 +240,9 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,  	for (retry = 0; retry < 4; retry++) {  		ret = radeon_process_aux_ch(auxch,  					    msg, msg_bytes, reply, reply_bytes, 0, &ack); -		if (ret < 0) { +		if (ret == -EBUSY) +			continue; +		else if (ret < 0) {  			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);  			return ret;  		} diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index fb5fa089886..c4ffa14fb2f 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -41,6 +41,31 @@ static void evergreen_gpu_init(struct radeon_device *rdev);  void evergreen_fini(struct radeon_device *rdev);  static void evergreen_pcie_gen2_enable(struct radeon_device *rdev); +void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) +{ +	u16 ctl, v; +	int cap, err; + +	cap = pci_pcie_cap(rdev->pdev); +	if (!cap) +		return; + +	err = pci_read_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, &ctl); +	if (err) +		return; + +	v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12; + +	/* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it +	 * to avoid hangs or perfomance issues +	 */ +	if ((v == 0) || (v == 6) || (v == 7)) { +		ctl &= ~PCI_EXP_DEVCTL_READRQ; +		ctl |= (2 << 12); +		pci_write_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, ctl); +	} +} +  void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)  {  	/* enable the pflip int */ @@ -1357,6 +1382,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)  				 SOFT_RESET_PA |  				 SOFT_RESET_SH |  				 SOFT_RESET_VGT | +				 SOFT_RESET_SPI |  				 SOFT_RESET_SX));  	RREG32(GRBM_SOFT_RESET);  	mdelay(15); @@ -1378,7 +1404,8 @@ int evergreen_cp_resume(struct radeon_device *rdev)  	/* Initialize the ring buffer's read and write pointers */  	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);  	WREG32(CP_RB_RPTR_WR, 0); -	WREG32(CP_RB_WPTR, 0); +	rdev->cp.wptr = 0; +	WREG32(CP_RB_WPTR, rdev->cp.wptr);  	/* set the wb address wether it's enabled or not */  	WREG32(CP_RB_RPTR_ADDR, @@ -1400,7 +1427,6 @@ int evergreen_cp_resume(struct radeon_device *rdev)  	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));  	rdev->cp.rptr = RREG32(CP_RB_RPTR); -	rdev->cp.wptr = RREG32(CP_RB_WPTR);  	evergreen_cp_start(rdev);  	rdev->cp.ready = true; @@ -1564,48 +1590,6 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,  	return backend_map;  } -static void evergreen_program_channel_remap(struct radeon_device *rdev) -{ -	u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; - -	tmp = RREG32(MC_SHARED_CHMAP); -	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { -	case 0: -	case 1: -	case 2: -	case 3: -	default: -		/* default mapping */ -		mc_shared_chremap = 0x00fac688; -		break; -	} - -	switch (rdev->family) { -	case CHIP_HEMLOCK: -	case CHIP_CYPRESS: -	case CHIP_BARTS: -		tcp_chan_steer_lo = 0x54763210; -		tcp_chan_steer_hi = 0x0000ba98; -		break; -	case CHIP_JUNIPER: -	case CHIP_REDWOOD: -	case CHIP_CEDAR: -	case CHIP_PALM: -	case CHIP_SUMO: -	case CHIP_SUMO2: -	case CHIP_TURKS: -	case CHIP_CAICOS: -	default: -		tcp_chan_steer_lo = 0x76543210; -		tcp_chan_steer_hi = 0x0000ba98; -		break; -	} - -	WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); -	WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); -	WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); -} -  static void evergreen_gpu_init(struct radeon_device *rdev)  {  	u32 cc_rb_backend_disable = 0; @@ -1862,6 +1846,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)  	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); +	evergreen_fix_pci_max_read_req_size(rdev); +  	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;  	cc_gc_shader_pipe_config |= @@ -2050,8 +2036,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)  	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);  	WREG32(HDP_ADDR_CONFIG, gb_addr_config); -	evergreen_program_channel_remap(rdev); -  	num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;  	grbm_gfx_index = INSTANCE_BROADCAST_WRITES; @@ -3143,21 +3127,23 @@ int evergreen_suspend(struct radeon_device *rdev)  }  int evergreen_copy_blit(struct radeon_device *rdev, -			uint64_t src_offset, uint64_t dst_offset, -			unsigned num_pages, struct radeon_fence *fence) +			uint64_t src_offset, +			uint64_t dst_offset, +			unsigned num_gpu_pages, +			struct radeon_fence *fence)  {  	int r;  	mutex_lock(&rdev->r600_blit.mutex);  	rdev->r600_blit.vb_ib = NULL; -	r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); +	r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);  	if (r) {  		if (rdev->r600_blit.vb_ib)  			radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);  		mutex_unlock(&rdev->r600_blit.mutex);  		return r;  	} -	evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); +	evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);  	evergreen_blit_done_copy(rdev, fence);  	mutex_unlock(&rdev->r600_blit.mutex);  	return 0; diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 44c4750f451..8c79ca97753 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -39,6 +39,7 @@ extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);  extern void evergreen_mc_program(struct radeon_device *rdev);  extern void evergreen_irq_suspend(struct radeon_device *rdev);  extern int evergreen_mc_init(struct radeon_device *rdev); +extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);  #define EVERGREEN_PFP_UCODE_SIZE 1120  #define EVERGREEN_PM4_UCODE_SIZE 1376 @@ -568,36 +569,6 @@ static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,  	return backend_map;  } -static void cayman_program_channel_remap(struct radeon_device *rdev) -{ -	u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; - -	tmp = RREG32(MC_SHARED_CHMAP); -	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { -	case 0: -	case 1: -	case 2: -	case 3: -	default: -		/* default mapping */ -		mc_shared_chremap = 0x00fac688; -		break; -	} - -	switch (rdev->family) { -	case CHIP_CAYMAN: -	default: -		//tcp_chan_steer_lo = 0x54763210 -		tcp_chan_steer_lo = 0x76543210; -		tcp_chan_steer_hi = 0x0000ba98; -		break; -	} - -	WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); -	WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); -	WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); -} -  static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,  					    u32 disable_mask_per_se,  					    u32 max_disable_mask_per_se, @@ -669,6 +640,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)  	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); +	evergreen_fix_pci_max_read_req_size(rdev); +  	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);  	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); @@ -839,8 +812,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)  	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);  	WREG32(HDP_ADDR_CONFIG, gb_addr_config); -	cayman_program_channel_remap(rdev); -  	/* primary versions */  	WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);  	WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); @@ -1159,6 +1130,7 @@ int cayman_cp_resume(struct radeon_device *rdev)  				 SOFT_RESET_PA |  				 SOFT_RESET_SH |  				 SOFT_RESET_VGT | +				 SOFT_RESET_SPI |  				 SOFT_RESET_SX));  	RREG32(GRBM_SOFT_RESET);  	mdelay(15); @@ -1183,7 +1155,8 @@ int cayman_cp_resume(struct radeon_device *rdev)  	/* Initialize the ring buffer's read and write pointers */  	WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); -	WREG32(CP_RB0_WPTR, 0); +	rdev->cp.wptr = 0; +	WREG32(CP_RB0_WPTR, rdev->cp.wptr);  	/* set the wb address wether it's enabled or not */  	WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); @@ -1203,7 +1176,6 @@ int cayman_cp_resume(struct radeon_device *rdev)  	WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);  	rdev->cp.rptr = RREG32(CP_RB0_RPTR); -	rdev->cp.wptr = RREG32(CP_RB0_WPTR);  	/* ring1  - compute only */  	/* Set ring buffer size */ @@ -1216,7 +1188,8 @@ int cayman_cp_resume(struct radeon_device *rdev)  	/* Initialize the ring buffer's read and write pointers */  	WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); -	WREG32(CP_RB1_WPTR, 0); +	rdev->cp1.wptr = 0; +	WREG32(CP_RB1_WPTR, rdev->cp1.wptr);  	/* set the wb address wether it's enabled or not */  	WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); @@ -1228,7 +1201,6 @@ int cayman_cp_resume(struct radeon_device *rdev)  	WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);  	rdev->cp1.rptr = RREG32(CP_RB1_RPTR); -	rdev->cp1.wptr = RREG32(CP_RB1_WPTR);  	/* ring2 - compute only */  	/* Set ring buffer size */ @@ -1241,7 +1213,8 @@ int cayman_cp_resume(struct radeon_device *rdev)  	/* Initialize the ring buffer's read and write pointers */  	WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); -	WREG32(CP_RB2_WPTR, 0); +	rdev->cp2.wptr = 0; +	WREG32(CP_RB2_WPTR, rdev->cp2.wptr);  	/* set the wb address wether it's enabled or not */  	WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); @@ -1253,7 +1226,6 @@ int cayman_cp_resume(struct radeon_device *rdev)  	WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);  	rdev->cp2.rptr = RREG32(CP_RB2_RPTR); -	rdev->cp2.wptr = RREG32(CP_RB2_WPTR);  	/* start the rings */  	cayman_cp_start(rdev); diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index f2204cb1ccd..7fcdbbbf297 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -721,11 +721,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev,  int r100_copy_blit(struct radeon_device *rdev,  		   uint64_t src_offset,  		   uint64_t dst_offset, -		   unsigned num_pages, +		   unsigned num_gpu_pages,  		   struct radeon_fence *fence)  {  	uint32_t cur_pages; -	uint32_t stride_bytes = PAGE_SIZE; +	uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;  	uint32_t pitch;  	uint32_t stride_pixels;  	unsigned ndw; @@ -737,7 +737,7 @@ int r100_copy_blit(struct radeon_device *rdev,  	/* radeon pitch is /64 */  	pitch = stride_bytes / 64;  	stride_pixels = stride_bytes / 4; -	num_loops = DIV_ROUND_UP(num_pages, 8191); +	num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);  	/* Ask for enough room for blit + flush + fence */  	ndw = 64 + (10 * num_loops); @@ -746,12 +746,12 @@ int r100_copy_blit(struct radeon_device *rdev,  		DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);  		return -EINVAL;  	} -	while (num_pages > 0) { -		cur_pages = num_pages; +	while (num_gpu_pages > 0) { +		cur_pages = num_gpu_pages;  		if (cur_pages > 8191) {  			cur_pages = 8191;  		} -		num_pages -= cur_pages; +		num_gpu_pages -= cur_pages;  		/* pages are in Y direction - height  		   page width in X direction - width */ @@ -773,8 +773,8 @@ int r100_copy_blit(struct radeon_device *rdev,  		radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));  		radeon_ring_write(rdev, 0);  		radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); -		radeon_ring_write(rdev, num_pages); -		radeon_ring_write(rdev, num_pages); +		radeon_ring_write(rdev, num_gpu_pages); +		radeon_ring_write(rdev, num_gpu_pages);  		radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));  	}  	radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); @@ -990,7 +990,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)  	/* Force read & write ptr to 0 */  	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);  	WREG32(RADEON_CP_RB_RPTR_WR, 0); -	WREG32(RADEON_CP_RB_WPTR, 0); +	rdev->cp.wptr = 0; +	WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);  	/* set the wb address whether it's enabled or not */  	WREG32(R_00070C_CP_RB_RPTR_ADDR, @@ -1007,9 +1008,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)  	WREG32(RADEON_CP_RB_CNTL, tmp);  	udelay(10);  	rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); -	rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR); -	/* protect against crazy HW on resume */ -	rdev->cp.wptr &= rdev->cp.ptr_mask;  	/* Set cp mode to bus mastering & enable cp*/  	WREG32(RADEON_CP_CSQ_MODE,  	       REG_SET(RADEON_INDIRECT2_START, indirect2_start) | diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index f2405830041..a1f3ba063c2 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c @@ -84,7 +84,7 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)  int r200_copy_dma(struct radeon_device *rdev,  		  uint64_t src_offset,  		  uint64_t dst_offset, -		  unsigned num_pages, +		  unsigned num_gpu_pages,  		  struct radeon_fence *fence)  {  	uint32_t size; @@ -93,7 +93,7 @@ int r200_copy_dma(struct radeon_device *rdev,  	int r = 0;  	/* radeon pitch is /64 */ -	size = num_pages << PAGE_SHIFT; +	size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;  	num_loops = DIV_ROUND_UP(size, 0x1FFFFF);  	r = radeon_ring_lock(rdev, num_loops * 4 + 64);  	if (r) { diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index aa5571b73aa..720dd99163f 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2209,7 +2209,8 @@ int r600_cp_resume(struct radeon_device *rdev)  	/* Initialize the ring buffer's read and write pointers */  	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);  	WREG32(CP_RB_RPTR_WR, 0); -	WREG32(CP_RB_WPTR, 0); +	rdev->cp.wptr = 0; +	WREG32(CP_RB_WPTR, rdev->cp.wptr);  	/* set the wb address whether it's enabled or not */  	WREG32(CP_RB_RPTR_ADDR, @@ -2231,7 +2232,6 @@ int r600_cp_resume(struct radeon_device *rdev)  	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));  	rdev->cp.rptr = RREG32(CP_RB_RPTR); -	rdev->cp.wptr = RREG32(CP_RB_WPTR);  	r600_cp_start(rdev);  	rdev->cp.ready = true; @@ -2353,21 +2353,23 @@ void r600_fence_ring_emit(struct radeon_device *rdev,  }  int r600_copy_blit(struct radeon_device *rdev, -		   uint64_t src_offset, uint64_t dst_offset, -		   unsigned num_pages, struct radeon_fence *fence) +		   uint64_t src_offset, +		   uint64_t dst_offset, +		   unsigned num_gpu_pages, +		   struct radeon_fence *fence)  {  	int r;  	mutex_lock(&rdev->r600_blit.mutex);  	rdev->r600_blit.vb_ib = NULL; -	r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); +	r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);  	if (r) {  		if (rdev->r600_blit.vb_ib)  			radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);  		mutex_unlock(&rdev->r600_blit.mutex);  		return r;  	} -	r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); +	r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);  	r600_blit_done_copy(rdev, fence);  	mutex_unlock(&rdev->r600_blit.mutex);  	return 0; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 32807baf55e..c1e056b35b2 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -322,6 +322,7 @@ union radeon_gart_table {  #define RADEON_GPU_PAGE_SIZE 4096  #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) +#define RADEON_GPU_PAGE_SHIFT 12  struct radeon_gart {  	dma_addr_t			table_addr; @@ -914,17 +915,17 @@ struct radeon_asic {  	int (*copy_blit)(struct radeon_device *rdev,  			 uint64_t src_offset,  			 uint64_t dst_offset, -			 unsigned num_pages, +			 unsigned num_gpu_pages,  			 struct radeon_fence *fence);  	int (*copy_dma)(struct radeon_device *rdev,  			uint64_t src_offset,  			uint64_t dst_offset, -			unsigned num_pages, +			unsigned num_gpu_pages,  			struct radeon_fence *fence);  	int (*copy)(struct radeon_device *rdev,  		    uint64_t src_offset,  		    uint64_t dst_offset, -		    unsigned num_pages, +		    unsigned num_gpu_pages,  		    struct radeon_fence *fence);  	uint32_t (*get_engine_clock)(struct radeon_device *rdev);  	void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 3d7a0d7c6a9..3dedaa07aac 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -75,7 +75,7 @@ uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);  int r100_copy_blit(struct radeon_device *rdev,  		   uint64_t src_offset,  		   uint64_t dst_offset, -		   unsigned num_pages, +		   unsigned num_gpu_pages,  		   struct radeon_fence *fence);  int r100_set_surface_reg(struct radeon_device *rdev, int reg,  			 uint32_t tiling_flags, uint32_t pitch, @@ -143,7 +143,7 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);  extern int r200_copy_dma(struct radeon_device *rdev,  			 uint64_t src_offset,  			 uint64_t dst_offset, -			 unsigned num_pages, +			 unsigned num_gpu_pages,  			 struct radeon_fence *fence);  void r200_set_safe_registers(struct radeon_device *rdev); @@ -311,7 +311,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);  int r600_ring_test(struct radeon_device *rdev);  int r600_copy_blit(struct radeon_device *rdev,  		   uint64_t src_offset, uint64_t dst_offset, -		   unsigned num_pages, struct radeon_fence *fence); +		   unsigned num_gpu_pages, struct radeon_fence *fence);  void r600_hpd_init(struct radeon_device *rdev);  void r600_hpd_fini(struct radeon_device *rdev);  bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); @@ -403,7 +403,7 @@ void evergreen_bandwidth_update(struct radeon_device *rdev);  void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);  int evergreen_copy_blit(struct radeon_device *rdev,  			uint64_t src_offset, uint64_t dst_offset, -			unsigned num_pages, struct radeon_fence *fence); +			unsigned num_gpu_pages, struct radeon_fence *fence);  void evergreen_hpd_init(struct radeon_device *rdev);  void evergreen_hpd_fini(struct radeon_device *rdev);  bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index dcd0863e31a..b6e18c8db9f 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c @@ -219,6 +219,9 @@ void radeon_get_clock_info(struct drm_device *dev)  		} else {  			DRM_INFO("Using generic clock info\n"); +			/* may need to be per card */ +			rdev->clock.max_pixel_clock = 35000; +  			if (rdev->flags & RADEON_IS_IGP) {  				p1pll->reference_freq = 1432;  				p2pll->reference_freq = 1432; diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index e0138b674ac..63675241c7f 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -3298,6 +3298,14 @@ void radeon_combios_asic_init(struct drm_device *dev)  	    rdev->pdev->subsystem_device == 0x30a4)  		return; +	/* quirk for rs4xx Compaq Presario V5245EU laptop to make it resume +	 * - it hangs on resume inside the dynclk 1 table. +	 */ +	if (rdev->family == CHIP_RS480 && +	    rdev->pdev->subsystem_vendor == 0x103c && +	    rdev->pdev->subsystem_device == 0x30ae) +		return; +  	/* DYN CLK 1 */  	table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);  	if (table) diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 7f65940f918..449c3d8c683 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -68,11 +68,11 @@ void radeon_connector_hotplug(struct drm_connector *connector)  	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {  		int saved_dpms = connector->dpms; -		if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) && -		    radeon_dp_needs_link_train(radeon_connector)) -			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); -		else +		/* Only turn off the display it it's physically disconnected */ +		if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))  			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); +		else if (radeon_dp_needs_link_train(radeon_connector)) +			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);  		connector->dpms = saved_dpms;  	}  } @@ -466,6 +466,16 @@ static bool radeon_connector_needs_extended_probe(struct radeon_device *dev,  		    (supported_device == ATOM_DEVICE_DFP2_SUPPORT))  			return true;  	} +	/* TOSHIBA Satellite L300D with ATI Mobility Radeon x1100 +	 * (RS690M) sends data to i2c bus for a HDMI connector that +	 * is not implemented */ +	if ((dev->pdev->device == 0x791f) && +	    (dev->pdev->subsystem_vendor == 0x1179) && +	    (dev->pdev->subsystem_device == 0xff68)) { +		if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) && +		    (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) +			return true; +	}  	/* Default: no EDID header probe required for DDC probing */  	return false; @@ -1287,12 +1297,24 @@ radeon_dp_detect(struct drm_connector *connector, bool force)  		if (!radeon_dig_connector->edp_on)  			atombios_set_edp_panel_power(connector,  						     ATOM_TRANSMITTER_ACTION_POWER_OFF); -	} else { -		/* need to setup ddc on the bridge */ -		if (radeon_connector_encoder_is_dp_bridge(connector)) { -			if (encoder) -				radeon_atom_ext_encoder_setup_ddc(encoder); +	} else if (radeon_connector_encoder_is_dp_bridge(connector)) { +		/* DP bridges are always DP */ +		radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; +		/* get the DPCD from the bridge */ +		radeon_dp_getdpcd(radeon_connector); + +		if (encoder) { +			/* setup ddc on the bridge */ +			radeon_atom_ext_encoder_setup_ddc(encoder); +			if (radeon_ddc_probe(radeon_connector, +					     radeon_connector->requires_extended_probe)) /* try DDC */ +				ret = connector_status_connected; +			else if (radeon_connector->dac_load_detect) { /* try load detection */ +				struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; +				ret = encoder_funcs->detect(encoder, connector); +			}  		} +	} else {  		radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);  		if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {  			ret = connector_status_connected; @@ -1308,16 +1330,6 @@ radeon_dp_detect(struct drm_connector *connector, bool force)  					ret = connector_status_connected;  			}  		} - -		if ((ret == connector_status_disconnected) && -		    radeon_connector->dac_load_detect) { -			struct drm_encoder *encoder = radeon_best_single_encoder(connector); -			struct drm_encoder_helper_funcs *encoder_funcs; -			if (encoder) { -				encoder_funcs = encoder->helper_private; -				ret = encoder_funcs->detect(encoder, connector); -			} -		}  	}  	radeon_connector_update_scratch_regs(connector, ret); diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 3189a7efb2e..fde25c0d65a 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -208,23 +208,25 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,  	int xorigin = 0, yorigin = 0;  	int w = radeon_crtc->cursor_width; -	if (x < 0) -		xorigin = -x + 1; -	if (y < 0) -		yorigin = -y + 1; -	if (xorigin >= CURSOR_WIDTH) -		xorigin = CURSOR_WIDTH - 1; -	if (yorigin >= CURSOR_HEIGHT) -		yorigin = CURSOR_HEIGHT - 1; -  	if (ASIC_IS_AVIVO(rdev)) { -		int i = 0; -		struct drm_crtc *crtc_p; -  		/* avivo cursor are offset into the total surface */  		x += crtc->x;  		y += crtc->y; -		DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); +	} +	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); + +	if (x < 0) { +		xorigin = min(-x, CURSOR_WIDTH - 1); +		x = 0; +	} +	if (y < 0) { +		yorigin = min(-y, CURSOR_HEIGHT - 1); +		y = 0; +	} + +	if (ASIC_IS_AVIVO(rdev)) { +		int i = 0; +		struct drm_crtc *crtc_p;  		/* avivo cursor image can't end on 128 pixel boundary or  		 * go past the end of the frame if both crtcs are enabled @@ -253,16 +255,12 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,  	radeon_lock_cursor(crtc, true);  	if (ASIC_IS_DCE4(rdev)) { -		WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, -		       ((xorigin ? 0 : x) << 16) | -		       (yorigin ? 0 : y)); +		WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);  		WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);  		WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,  		       ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));  	} else if (ASIC_IS_AVIVO(rdev)) { -		WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, -			     ((xorigin ? 0 : x) << 16) | -			     (yorigin ? 0 : y)); +		WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);  		WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);  		WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,  		       ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); @@ -276,8 +274,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,  			| yorigin));  		WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,  		       (RADEON_CUR_LOCK -			| ((xorigin ? 0 : x) << 16) -			| (yorigin ? 0 : y))); +			| (x << 16) +			| y));  		/* offset is from DISP(2)_BASE_ADDRESS */  		WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +  								      (yorigin * 256))); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index a3b011b4946..b51e15725c6 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -301,6 +301,8 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64  		mc->mc_vram_size = mc->aper_size;  	}  	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; +	if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size) +		mc->real_vram_size = radeon_vram_limit;  	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",  			mc->mc_vram_size >> 20, mc->vram_start,  			mc->vram_end, mc->real_vram_size >> 20); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 1a858944e4f..6adb3e58aff 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -473,8 +473,8 @@ pflip_cleanup:  	spin_lock_irqsave(&dev->event_lock, flags);  	radeon_crtc->unpin_work = NULL;  unlock_free: -	drm_gem_object_unreference_unlocked(old_radeon_fb->obj);  	spin_unlock_irqrestore(&dev->event_lock, flags); +	drm_gem_object_unreference_unlocked(old_radeon_fb->obj);  	radeon_fence_unref(&work->fence);  	kfree(work); @@ -707,16 +707,21 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)  		radeon_router_select_ddc_port(radeon_connector);  	if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || -	    (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { +	    (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) || +	    radeon_connector_encoder_is_dp_bridge(&radeon_connector->base)) {  		struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; +  		if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||  		     dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus) -			radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); -	} -	if (!radeon_connector->ddc_bus) -		return -1; -	if (!radeon_connector->edid) { -		radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); +			radeon_connector->edid = drm_get_edid(&radeon_connector->base, +							      &dig->dp_i2c_bus->adapter); +		else if (radeon_connector->ddc_bus && !radeon_connector->edid) +			radeon_connector->edid = drm_get_edid(&radeon_connector->base, +							      &radeon_connector->ddc_bus->adapter); +	} else { +		if (radeon_connector->ddc_bus && !radeon_connector->edid) +			radeon_connector->edid = drm_get_edid(&radeon_connector->base, +							      &radeon_connector->ddc_bus->adapter);  	}  	if (!radeon_connector->edid) { diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 319d85d7e75..eb3f6dc6df8 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -1507,7 +1507,14 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)  		switch (mode) {  		case DRM_MODE_DPMS_ON:  			args.ucAction = ATOM_ENABLE; -			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); +			/* workaround for DVOOutputControl on some RS690 systems */ +			if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) { +				u32 reg = RREG32(RADEON_BIOS_3_SCRATCH); +				WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE); +				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); +				WREG32(RADEON_BIOS_3_SCRATCH, reg); +			} else +				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);  			if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {  				args.ucAction = ATOM_LCD_BLON;  				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); @@ -1631,7 +1638,17 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)  			break;  		case 2:  			args.v2.ucCRTC = radeon_crtc->crtc_id; -			args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); +			if (radeon_encoder_is_dp_bridge(encoder)) { +				struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + +				if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) +					args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS; +				else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA) +					args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT; +				else +					args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); +			} else +				args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);  			switch (radeon_encoder->encoder_id) {  			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:  			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: @@ -1748,9 +1765,17 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)  	/* DCE4/5 */  	if (ASIC_IS_DCE4(rdev)) {  		dig = radeon_encoder->enc_priv; -		if (ASIC_IS_DCE41(rdev)) -			return radeon_crtc->crtc_id; -		else { +		if (ASIC_IS_DCE41(rdev)) { +			/* ontario follows DCE4 */ +			if (rdev->family == CHIP_PALM) { +				if (dig->linkb) +					return 1; +				else +					return 0; +			} else +				/* llano follows DCE3.2 */ +				return radeon_crtc->crtc_id; +		} else {  			switch (radeon_encoder->encoder_id) {  			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:  				if (dig->linkb) diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 7fd4e3e5ad5..3475a09f946 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -48,7 +48,7 @@ static void radeon_fence_write(struct radeon_device *rdev, u32 seq)  			scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;  		else  			scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; -		rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);; +		rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);  	} else  		WREG32(rdev->fence_drv.scratch_reg, seq);  } diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index dee4a0c1b4b..602fa3541c4 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -40,10 +40,14 @@ void radeon_test_moves(struct radeon_device *rdev)  	size = 1024 * 1024;  	/* Number of tests = -	 * (Total GTT - IB pool - writeback page - ring buffer) / test size +	 * (Total GTT - IB pool - writeback page - ring buffers) / test size  	 */ -	n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE - -	     rdev->cp.ring_size)) / size; +	n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - rdev->cp.ring_size; +	if (rdev->wb.wb_obj) +		n -= RADEON_GPU_PAGE_SIZE; +	if (rdev->ih.ring_obj) +		n -= rdev->ih.ring_size; +	n /= size;  	gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);  	if (!gtt_obj) { @@ -132,9 +136,15 @@ void radeon_test_moves(struct radeon_device *rdev)  		     gtt_start++, vram_start++) {  			if (*vram_start != gtt_start) {  				DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " -					  "expected 0x%p (GTT map 0x%p-0x%p)\n", -					  i, *vram_start, gtt_start, gtt_map, -					  gtt_end); +					  "expected 0x%p (GTT/VRAM offset " +					  "0x%16llx/0x%16llx)\n", +					  i, *vram_start, gtt_start, +					  (unsigned long long) +					  (gtt_addr - rdev->mc.gtt_start + +					   (void*)gtt_start - gtt_map), +					  (unsigned long long) +					  (vram_addr - rdev->mc.vram_start + +					   (void*)gtt_start - gtt_map));  				radeon_bo_kunmap(vram_obj);  				goto out_cleanup;  			} @@ -175,9 +185,15 @@ void radeon_test_moves(struct radeon_device *rdev)  		     gtt_start++, vram_start++) {  			if (*gtt_start != vram_start) {  				DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " -					  "expected 0x%p (VRAM map 0x%p-0x%p)\n", -					  i, *gtt_start, vram_start, vram_map, -					  vram_end); +					  "expected 0x%p (VRAM/GTT offset " +					  "0x%16llx/0x%16llx)\n", +					  i, *gtt_start, vram_start, +					  (unsigned long long) +					  (vram_addr - rdev->mc.vram_start + +					   (void*)vram_start - vram_map), +					  (unsigned long long) +					  (gtt_addr - rdev->mc.gtt_start + +					   (void*)vram_start - vram_map));  				radeon_bo_kunmap(gtt_obj[i]);  				goto out_cleanup;  			} diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 60125ddba1e..0b5468bfaf5 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -277,7 +277,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,  		DRM_ERROR("Trying to move memory with CP turned off.\n");  		return -EINVAL;  	} -	r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); + +	BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); + +	r = radeon_copy(rdev, old_start, new_start, +			new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ +			fence);  	/* FIXME: handle copy error */  	r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,  				      evict, no_wait_reserve, no_wait_gpu, new_mem); @@ -450,6 +455,29 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_  			return -EINVAL;  		mem->bus.base = rdev->mc.aper_base;  		mem->bus.is_iomem = true; +#ifdef __alpha__ +		/* +		 * Alpha: use bus.addr to hold the ioremap() return, +		 * so we can modify bus.base below. +		 */ +		if (mem->placement & TTM_PL_FLAG_WC) +			mem->bus.addr = +				ioremap_wc(mem->bus.base + mem->bus.offset, +					   mem->bus.size); +		else +			mem->bus.addr = +				ioremap_nocache(mem->bus.base + mem->bus.offset, +						mem->bus.size); + +		/* +		 * Alpha: Use just the bus offset plus +		 * the hose/domain memory base for bus.base. +		 * It then can be used to build PTEs for VRAM +		 * access, as done in ttm_bo_vm_fault(). +		 */ +		mem->bus.base = (mem->bus.base & 0x0ffffffffUL) + +			rdev->ddev->hose->dense_mem_base; +#endif  		break;  	default:  		return -EINVAL; diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 4720d000d44..b13c2eedc32 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -536,55 +536,6 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,  	return backend_map;  } -static void rv770_program_channel_remap(struct radeon_device *rdev) -{ -	u32 tcp_chan_steer, mc_shared_chremap, tmp; -	bool force_no_swizzle; - -	switch (rdev->family) { -	case CHIP_RV770: -	case CHIP_RV730: -		force_no_swizzle = false; -		break; -	case CHIP_RV710: -	case CHIP_RV740: -	default: -		force_no_swizzle = true; -		break; -	} - -	tmp = RREG32(MC_SHARED_CHMAP); -	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { -	case 0: -	case 1: -	default: -		/* default mapping */ -		mc_shared_chremap = 0x00fac688; -		break; -	case 2: -	case 3: -		if (force_no_swizzle) -			mc_shared_chremap = 0x00fac688; -		else -			mc_shared_chremap = 0x00bbc298; -		break; -	} - -	if (rdev->family == CHIP_RV740) -		tcp_chan_steer = 0x00ef2a60; -	else -		tcp_chan_steer = 0x00fac688; - -	/* RV770 CE has special chremap setup */ -	if (rdev->pdev->device == 0x944e) { -		tcp_chan_steer = 0x00b08b08; -		mc_shared_chremap = 0x00b08b08; -	} - -	WREG32(TCP_CHAN_STEER, tcp_chan_steer); -	WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); -} -  static void rv770_gpu_init(struct radeon_device *rdev)  {  	int i, j, num_qd_pipes; @@ -785,8 +736,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)  	WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));  	WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); -	rv770_program_channel_remap(rdev); -  	WREG32(CC_RB_BACKEND_DISABLE,      cc_rb_backend_disable);  	WREG32(CC_GC_SHADER_PIPE_CONFIG,   cc_gc_shader_pipe_config);  	WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 56619f64b6b..ef06194c5aa 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -353,8 +353,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)  		ret = ttm_tt_set_user(bo->ttm, current,  				      bo->buffer_start, bo->num_pages); -		if (unlikely(ret != 0)) +		if (unlikely(ret != 0)) {  			ttm_tt_destroy(bo->ttm); +			bo->ttm = NULL; +		}  		break;  	default:  		printk(KERN_ERR TTM_PFX "Illegal buffer object type\n"); @@ -390,10 +392,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,  	 * Create and bind a ttm if required.  	 */ -	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) { -		ret = ttm_bo_add_ttm(bo, false); -		if (ret) -			goto out_err; +	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { +		if (bo->ttm == NULL) { +			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); +			ret = ttm_bo_add_ttm(bo, zero); +			if (ret) +				goto out_err; +		}  		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);  		if (ret) diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 77dbf408c0d..082fcaea583 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -321,7 +321,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,  	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];  	struct ttm_tt *ttm = bo->ttm;  	struct ttm_mem_reg *old_mem = &bo->mem; -	struct ttm_mem_reg old_copy; +	struct ttm_mem_reg old_copy = *old_mem;  	void *old_iomap;  	void *new_iomap;  	int ret; @@ -635,13 +635,13 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,  		if (ret)  			return ret; -		ttm_bo_free_old_node(bo);  		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&  		    (bo->ttm != NULL)) {  			ttm_tt_unbind(bo->ttm);  			ttm_tt_destroy(bo->ttm);  			bo->ttm = NULL;  		} +		ttm_bo_free_old_node(bo);  	} else {  		/**  		 * This should help pipeline ordinary buffer moves. diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 306b15f39c9..22a4a051f22 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -69,7 +69,7 @@ config HID_ACRUX  	Say Y here if you want to enable support for ACRUX game controllers.  config HID_ACRUX_FF -	tristate "ACRUX force feedback support" +	bool "ACRUX force feedback support"  	depends on HID_ACRUX  	select INPUT_FF_MEMLESS  	---help--- @@ -245,6 +245,15 @@ config HID_LOGITECH  	---help---  	Support for Logitech devices that are not fully compliant with HID standard. +config HID_LOGITECH_DJ +	tristate "Logitech Unifying receivers full support" +	depends on HID_LOGITECH +	default m +	---help--- +	Say Y if you want support for Logitech Unifying receivers and devices. +	Unifying receivers are capable of pairing up to 6 Logitech compliant +	devices to the same receiver. +  config LOGITECH_FF  	bool "Logitech force feedback support"  	depends on HID_LOGITECH @@ -278,13 +287,21 @@ config LOGIG940_FF  	  Say Y here if you want to enable force feedback support for Logitech  	  Flight System G940 devices. -config LOGIWII_FF -	bool "Logitech Speed Force Wireless force feedback support" +config LOGIWHEELS_FF +	bool "Logitech wheels configuration and force feedback support"  	depends on HID_LOGITECH  	select INPUT_FF_MEMLESS +	default LOGITECH_FF  	help -	  Say Y here if you want to enable force feedback support for Logitech -	  Speed Force Wireless (Wii) devices. +	  Say Y here if you want to enable force feedback and range setting +	  support for following Logitech wheels: +	  - Logitech Driving Force +	  - Logitech Driving Force Pro +	  - Logitech Driving Force GT +	  - Logitech G25 +	  - Logitech G27 +	  - Logitech MOMO/MOMO 2 +	  - Logitech Formula Force EX  config HID_MAGICMOUSE  	tristate "Apple MagicMouse multi-touch support" @@ -328,6 +345,7 @@ config HID_MULTITOUCH  	  - Hanvon dual touch panels  	  - Ilitek dual touch panels  	  - IrTouch Infrared USB panels +	  - LG Display panels (Dell ST2220Tc)  	  - Lumio CrystalTouch panels  	  - MosArt dual-touch panels  	  - PenMount dual touch panels @@ -441,6 +459,13 @@ config HID_PICOLCD_LEDS  	---help---  	  Provide access to PicoLCD's GPO pins via leds class. +config HID_PRIMAX +	tristate "Primax non-fully HID-compliant devices" +	depends on USB_HID +	---help--- +	Support for Primax devices that are not fully compliant with the +	HID standard. +  config HID_QUANTA  	tristate "Quanta Optical Touch panels"  	depends on USB_HID @@ -539,7 +564,11 @@ config HID_SMARTJOYPLUS  	tristate "SmartJoy PLUS PS2/USB adapter support"  	depends on USB_HID  	---help--- -	Support for SmartJoy PLUS PS2/USB adapter. +	Support for SmartJoy PLUS PS2/USB adapter, Super Dual Box, +	Super Joy Box 3 Pro, Super Dual Box Pro, and Super Joy Box 5 Pro. + +	Note that DDR (Dance Dance Revolution) mode is not supported, nor +	is pressure sensitive buttons on the pro models.  config SMARTJOYPLUS_FF  	bool "SmartJoy PLUS PS2/USB adapter force feedback support" @@ -589,6 +618,8 @@ config HID_WACOM_POWER_SUPPLY  config HID_WIIMOTE  	tristate "Nintendo Wii Remote support"  	depends on BT_HIDP +	depends on LEDS_CLASS +	select POWER_SUPPLY  	---help---  	Support for the Nintendo Wii Remote bluetooth device. diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index 0a0a38e9fd2..1e0d2a638b2 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -21,7 +21,7 @@ endif  ifdef CONFIG_LOGIG940_FF  	hid-logitech-y	+= hid-lg3ff.o  endif -ifdef CONFIG_LOGIWII_FF +ifdef CONFIG_LOGIWHEELS_FF  	hid-logitech-y	+= hid-lg4ff.o  endif @@ -43,6 +43,7 @@ obj-$(CONFIG_HID_KEYTOUCH)	+= hid-keytouch.o  obj-$(CONFIG_HID_KYE)		+= hid-kye.o  obj-$(CONFIG_HID_LCPOWER)       += hid-lcpower.o  obj-$(CONFIG_HID_LOGITECH)	+= hid-logitech.o +obj-$(CONFIG_HID_LOGITECH_DJ)	+= hid-logitech-dj.o  obj-$(CONFIG_HID_MAGICMOUSE)    += hid-magicmouse.o  obj-$(CONFIG_HID_MICROSOFT)	+= hid-microsoft.o  obj-$(CONFIG_HID_MONTEREY)	+= hid-monterey.o @@ -54,6 +55,7 @@ obj-$(CONFIG_HID_QUANTA)	+= hid-quanta.o  obj-$(CONFIG_HID_PANTHERLORD)	+= hid-pl.o  obj-$(CONFIG_HID_PETALYNX)	+= hid-petalynx.o  obj-$(CONFIG_HID_PICOLCD)	+= hid-picolcd.o +obj-$(CONFIG_HID_PRIMAX)	+= hid-primax.o  obj-$(CONFIG_HID_ROCCAT)	+= hid-roccat.o  obj-$(CONFIG_HID_ROCCAT_COMMON)	+= hid-roccat-common.o  obj-$(CONFIG_HID_ROCCAT_ARVO)	+= hid-roccat-arvo.o diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index b85744fe846..9bc7b03269d 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -183,6 +183,9 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,  		if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&  				hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)  			table = macbookair_fn_keys; +		else if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI && +				hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) +			table = macbookair_fn_keys;  		else if (hid->product < 0x21d || hid->product >= 0x300)  			table = powerbook_fn_keys;  		else @@ -444,6 +447,12 @@ static const struct hid_device_id apple_devices[] = {  	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS),  		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |  			APPLE_RDESC_JIS }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI), +		.driver_data = APPLE_HAS_FN }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO), +		.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS), +		.driver_data = APPLE_HAS_FN },  	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),  		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },  	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO), @@ -487,6 +496,18 @@ static const struct hid_device_id apple_devices[] = {  		.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },  	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS),  		.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI), +		.driver_data = APPLE_HAS_FN }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO), +		.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS), +		.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI), +		.driver_data = APPLE_HAS_FN }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO), +		.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS), +		.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },  	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),  		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },  	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO), diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c index 121514149e0..3bdb4500f95 100644 --- a/drivers/hid/hid-axff.c +++ b/drivers/hid/hid-axff.c @@ -6,7 +6,7 @@   * Xbox 360 controller.   *   * 1a34:0802 "ACRUX USB GAMEPAD 8116" - *  - tested with a EXEQ EQ-PCU-02090 game controller. + *  - tested with an EXEQ EQ-PCU-02090 game controller.   *   * Copyright (c) 2010 Sergei Kolzun <x0r@dv-life.ru>   */ @@ -45,7 +45,10 @@ static int axff_play(struct input_dev *dev, void *data, struct ff_effect *effect  {  	struct hid_device *hid = input_get_drvdata(dev);  	struct axff_device *axff = data; +	struct hid_report *report = axff->report; +	int field_count = 0;  	int left, right; +	int i, j;  	left = effect->u.rumble.strong_magnitude;  	right = effect->u.rumble.weak_magnitude; @@ -55,10 +58,14 @@ static int axff_play(struct input_dev *dev, void *data, struct ff_effect *effect  	left = left * 0xff / 0xffff;  	right = right * 0xff / 0xffff; -	axff->report->field[0]->value[0] = left; -	axff->report->field[1]->value[0] = right; -	axff->report->field[2]->value[0] = left; -	axff->report->field[3]->value[0] = right; +	for (i = 0; i < report->maxfield; i++) { +		for (j = 0; j < report->field[i]->report_count; j++) { +			report->field[i]->value[j] = +				field_count % 2 ? right : left; +			field_count++; +		} +	} +  	dbg_hid("running with 0x%02x 0x%02x", left, right);  	usbhid_submit_report(hid, axff->report, USB_DIR_OUT); @@ -72,6 +79,8 @@ static int axff_init(struct hid_device *hid)  	struct hid_input *hidinput = list_first_entry(&hid->inputs, struct hid_input, list);  	struct list_head *report_list =&hid->report_enum[HID_OUTPUT_REPORT].report_list;  	struct input_dev *dev = hidinput->input; +	int field_count = 0; +	int i, j;  	int error;  	if (list_empty(report_list)) { @@ -80,9 +89,16 @@ static int axff_init(struct hid_device *hid)  	}  	report = list_first_entry(report_list, struct hid_report, list); +	for (i = 0; i < report->maxfield; i++) { +		for (j = 0; j < report->field[i]->report_count; j++) { +			report->field[i]->value[j] = 0x00; +			field_count++; +		} +	} -	if (report->maxfield < 4) { -		hid_err(hid, "no fields in the report: %d\n", report->maxfield); +	if (field_count < 4) { +		hid_err(hid, "not enough fields in the report: %d\n", +			field_count);  		return -ENODEV;  	} @@ -97,13 +113,9 @@ static int axff_init(struct hid_device *hid)  		goto err_free_mem;  	axff->report = report; -	axff->report->field[0]->value[0] = 0x00; -	axff->report->field[1]->value[0] = 0x00; -	axff->report->field[2]->value[0] = 0x00; -	axff->report->field[3]->value[0] = 0x00;  	usbhid_submit_report(hid, axff->report, USB_DIR_OUT); -	hid_info(hid, "Force Feedback for ACRUX game controllers by Sergei Kolzun<x0r@dv-life.ru>\n"); +	hid_info(hid, "Force Feedback for ACRUX game controllers by Sergei Kolzun <x0r@dv-life.ru>\n");  	return 0; diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 1a5cf0c9cfc..91adcc5bad2 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -29,6 +29,7 @@  #include <linux/wait.h>  #include <linux/vmalloc.h>  #include <linux/sched.h> +#include <linux/semaphore.h>  #include <linux/hid.h>  #include <linux/hiddev.h> @@ -1085,16 +1086,25 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i  	struct hid_report *report;  	char *buf;  	unsigned int i; -	int ret; +	int ret = 0; -	if (!hid || !hid->driver) +	if (!hid)  		return -ENODEV; + +	if (down_trylock(&hid->driver_lock)) +		return -EBUSY; + +	if (!hid->driver) { +		ret = -ENODEV; +		goto unlock; +	}  	report_enum = hid->report_enum + type;  	hdrv = hid->driver;  	if (!size) {  		dbg_hid("empty report\n"); -		return -1; +		ret = -1; +		goto unlock;  	}  	buf = kmalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_ATOMIC); @@ -1118,18 +1128,24 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i  nomem:  	report = hid_get_report(report_enum, data); -	if (!report) -		return -1; +	if (!report) { +		ret = -1; +		goto unlock; +	}  	if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {  		ret = hdrv->raw_event(hid, report, data, size); -		if (ret != 0) -			return ret < 0 ? ret : 0; +		if (ret != 0) { +			ret = ret < 0 ? ret : 0; +			goto unlock; +		}  	}  	hid_report_raw_event(hid, type, data, size, interrupt); -	return 0; +unlock: +	up(&hid->driver_lock); +	return ret;  }  EXPORT_SYMBOL_GPL(hid_input_report); @@ -1212,6 +1228,12 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)  	if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev,  				connect_mask & HID_CONNECT_HIDINPUT_FORCE))  		hdev->claimed |= HID_CLAIMED_INPUT; +	if (hdev->quirks & HID_QUIRK_MULTITOUCH) { +		/* this device should be handled by hid-multitouch, skip it */ +		hdev->quirks &= ~HID_QUIRK_MULTITOUCH; +		return -ENODEV; +	} +  	if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect &&  			!hdev->hiddev_connect(hdev,  				connect_mask & HID_CONNECT_HIDDEV_FORCE)) @@ -1340,6 +1362,15 @@ static const struct hid_device_id hid_have_special_driver[] = {  	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },  	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },  	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },  	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, @@ -1388,6 +1419,7 @@ static const struct hid_device_id hid_have_special_driver[] = {  	{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) }, + 	{ HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6650) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_ILITEK, USB_DEVICE_ID_ILITEK_MULTITOUCH) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_IRTOUCHSYSTEMS, USB_DEVICE_ID_IRTOUCH_INFRARED_USB) }, @@ -1396,6 +1428,7 @@ static const struct hid_device_id hid_have_special_driver[] = {  	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MULTITOUCH) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) }, @@ -1417,8 +1450,11 @@ static const struct hid_device_id hid_have_special_driver[] = {  	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) }, @@ -1458,6 +1494,7 @@ static const struct hid_device_id hid_have_special_driver[] = {  	{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_PCI) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) }, @@ -1498,6 +1535,10 @@ static const struct hid_device_id hid_have_special_driver[] = {  	{ HID_USB_DEVICE(USB_VENDOR_ID_UNITEC, USB_DEVICE_ID_UNITEC_USB_TOUCH_0709) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_UNITEC, USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },  	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) }, @@ -1617,10 +1658,15 @@ static int hid_device_probe(struct device *dev)  	const struct hid_device_id *id;  	int ret = 0; +	if (down_interruptible(&hdev->driver_lock)) +		return -EINTR; +  	if (!hdev->driver) {  		id = hid_match_device(hdev, hdrv); -		if (id == NULL) -			return -ENODEV; +		if (id == NULL) { +			ret = -ENODEV; +			goto unlock; +		}  		hdev->driver = hdrv;  		if (hdrv->probe) { @@ -1633,14 +1679,20 @@ static int hid_device_probe(struct device *dev)  		if (ret)  			hdev->driver = NULL;  	} +unlock: +	up(&hdev->driver_lock);  	return ret;  }  static int hid_device_remove(struct device *dev)  {  	struct hid_device *hdev = container_of(dev, struct hid_device, dev); -	struct hid_driver *hdrv = hdev->driver; +	struct hid_driver *hdrv; + +	if (down_interruptible(&hdev->driver_lock)) +		return -EINTR; +	hdrv = hdev->driver;  	if (hdrv) {  		if (hdrv->remove)  			hdrv->remove(hdev); @@ -1649,6 +1701,7 @@ static int hid_device_remove(struct device *dev)  		hdev->driver = NULL;  	} +	up(&hdev->driver_lock);  	return 0;  } @@ -1889,6 +1942,12 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {  	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },  	{ } @@ -1996,6 +2055,7 @@ struct hid_device *hid_allocate_device(void)  	init_waitqueue_head(&hdev->debug_wait);  	INIT_LIST_HEAD(&hdev->debug_list); +	sema_init(&hdev->driver_lock, 1);  	return hdev;  err: diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index bae48745bb4..9a243ca96e6 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -450,6 +450,11 @@ void hid_dump_field(struct hid_field *field, int n, struct seq_file *f) {  		seq_printf(f, "Logical(");  		hid_resolv_usage(field->logical, f); seq_printf(f, ")\n");  	} +	if (field->application) { +		tab(n, f); +		seq_printf(f, "Application("); +		hid_resolv_usage(field->application, f); seq_printf(f, ")\n"); +	}  	tab(n, f); seq_printf(f, "Usage(%d)\n", field->maxusage);  	for (j = 0; j < field->maxusage; j++) {  		tab(n+2, f); hid_resolv_usage(field->usage[j].hid, f); seq_printf(f, "\n"); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index db63ccf21cc..1680e99b481 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -109,6 +109,15 @@  #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI	0x0245  #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO	0x0246  #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS	0x0247 +#define USB_DEVICE_ID_APPLE_ALU_REVB_ANSI	0x024f +#define USB_DEVICE_ID_APPLE_ALU_REVB_ISO	0x0250 +#define USB_DEVICE_ID_APPLE_ALU_REVB_JIS	0x0251 +#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI	0x0249 +#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO	0x024a +#define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS	0x024b +#define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI	0x024c +#define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO	0x024d +#define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS	0x024e  #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI  0x0239  #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO   0x023a  #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS   0x023b @@ -274,6 +283,7 @@  #define USB_DEVICE_ID_PENPOWER		0x00f4  #define USB_VENDOR_ID_GREENASIA		0x0e8f +#define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD	0x3013  #define USB_VENDOR_ID_GRETAGMACBETH	0x0971  #define USB_DEVICE_ID_GRETAGMACBETH_HUEY	0x2005 @@ -347,6 +357,9 @@  #define USB_DEVICE_ID_UGCI_FLYING	0x0020  #define USB_DEVICE_ID_UGCI_FIGHTING	0x0030 +#define USB_VENDOR_ID_IDEACOM		0x1cb6 +#define USB_DEVICE_ID_IDEACOM_IDC6650	0x6650 +  #define USB_VENDOR_ID_ILITEK		0x222a  #define USB_DEVICE_ID_ILITEK_MULTITOUCH	0x0001 @@ -419,6 +432,9 @@  #define USB_DEVICE_ID_LD_HYBRID		0x2090  #define USB_DEVICE_ID_LD_HEATCONTROL	0x20A0 +#define USB_VENDOR_ID_LG		0x1fd2 +#define USB_DEVICE_ID_LG_MULTITOUCH	0x0064 +  #define USB_VENDOR_ID_LOGITECH		0x046d  #define USB_DEVICE_ID_LOGITECH_RECEIVER	0xc101  #define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST  0xc110 @@ -436,6 +452,7 @@  #define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL	0xc295  #define USB_DEVICE_ID_LOGITECH_DFP_WHEEL	0xc298  #define USB_DEVICE_ID_LOGITECH_G25_WHEEL	0xc299 +#define USB_DEVICE_ID_LOGITECH_DFGT_WHEEL	0xc29a  #define USB_DEVICE_ID_LOGITECH_G27_WHEEL	0xc29b  #define USB_DEVICE_ID_LOGITECH_WII_WHEEL	0xc29c  #define USB_DEVICE_ID_LOGITECH_ELITE_KBD	0xc30a @@ -443,6 +460,8 @@  #define USB_DEVICE_ID_S510_RECEIVER_2	0xc517  #define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500	0xc512  #define USB_DEVICE_ID_MX3000_RECEIVER	0xc513 +#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER	0xc52b +#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2	0xc532  #define USB_DEVICE_ID_SPACETRAVELLER	0xc623  #define USB_DEVICE_ID_SPACENAVIGATOR	0xc626  #define USB_DEVICE_ID_DINOVO_DESKTOP	0xc704 @@ -576,6 +595,9 @@  #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE	0x0001  #define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE	0x0600 +#define USB_VENDOR_ID_SIGMA_MICRO	0x1c4f +#define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD	0x0002 +  #define USB_VENDOR_ID_SKYCABLE			0x1223  #define	USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER	0x3F07 @@ -671,6 +693,9 @@  #define USB_VENDOR_ID_WISEGROUP_LTD	0x6666  #define USB_VENDOR_ID_WISEGROUP_LTD2	0x6677  #define USB_DEVICE_ID_SMARTJOY_DUAL_PLUS 0x8802 +#define USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO 0x8801 +#define USB_DEVICE_ID_SUPER_DUAL_BOX_PRO 0x8802 +#define USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO 0x8804  #define USB_VENDOR_ID_X_TENSIONS               0x1ae7  #define USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE    0x9001 @@ -686,4 +711,7 @@  #define USB_VENDOR_ID_ZYDACRON	0x13EC  #define USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL	0x0006 +#define USB_VENDOR_ID_PRIMAX	0x0461 +#define USB_DEVICE_ID_PRIMAX_KEYBOARD	0x4e05 +  #endif diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 6559e2e3364..f333139d1a4 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -474,6 +474,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel  			map_key_clear(BTN_STYLUS2);  			break; +		case 0x51: /* ContactID */ +			device->quirks |= HID_QUIRK_MULTITOUCH; +			goto unknown; +  		default:  goto unknown;  		}  		break; @@ -978,6 +982,13 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)  		}  	} +	if (hid->quirks & HID_QUIRK_MULTITOUCH) { +		/* generic hid does not know how to handle multitouch devices */ +		if (hidinput) +			goto out_cleanup; +		goto out_unwind; +	} +  	if (hidinput && input_register_device(hidinput->input))  		goto out_cleanup; diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c index a7f916e8fc3..e7a7bd1eb34 100644 --- a/drivers/hid/hid-lg.c +++ b/drivers/hid/hid-lg.c @@ -363,7 +363,7 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)  		goto err_free;  	} -	if (quirks & (LG_FF | LG_FF2 | LG_FF3)) +	if (quirks & (LG_FF | LG_FF2 | LG_FF3 | LG_FF4))  		connect_mask &= ~HID_CONNECT_FF;  	ret = hid_hw_start(hdev, connect_mask); @@ -372,7 +372,8 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)  		goto err_free;  	} -	if (quirks & LG_FF4) { +	/* Setup wireless link with Logitech Wii wheel */ +	if(hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) {  		unsigned char buf[] = { 0x00, 0xAF,  0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };  		ret = hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT); @@ -405,6 +406,15 @@ err_free:  	return ret;  } +static void lg_remove(struct hid_device *hdev) +{ +	unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); +	if(quirks & LG_FF4) +		lg4ff_deinit(hdev); + +	hid_hw_stop(hdev); +} +  static const struct hid_device_id lg_devices[] = {  	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER),  		.driver_data = LG_RDESC | LG_WIRELESS }, @@ -431,7 +441,7 @@ static const struct hid_device_id lg_devices[] = {  	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D),  		.driver_data = LG_NOGET },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL), -		.driver_data = LG_NOGET | LG_FF }, +		.driver_data = LG_NOGET | LG_FF4 },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD),  		.driver_data = LG_FF2 }, @@ -444,15 +454,17 @@ static const struct hid_device_id lg_devices[] = {  	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO),  		.driver_data = LG_FF },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL), -		.driver_data = LG_FF }, +		.driver_data = LG_FF4 },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2), -		.driver_data = LG_FF }, +		.driver_data = LG_FF4 },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL), -		.driver_data = LG_FF }, +		.driver_data = LG_FF4 }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL), +		.driver_data = LG_FF4 },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL), -		.driver_data = LG_FF }, +		.driver_data = LG_FF4 },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL), -		.driver_data = LG_NOGET | LG_FF }, +		.driver_data = LG_NOGET | LG_FF4 },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL),  		.driver_data = LG_FF4 },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ), @@ -478,6 +490,7 @@ static struct hid_driver lg_driver = {  	.input_mapped = lg_input_mapped,  	.event = lg_event,  	.probe = lg_probe, +	.remove = lg_remove,  };  static int __init lg_init(void) diff --git a/drivers/hid/hid-lg.h b/drivers/hid/hid-lg.h index b0100ba2ae0..4b097286dc7 100644 --- a/drivers/hid/hid-lg.h +++ b/drivers/hid/hid-lg.h @@ -19,10 +19,12 @@ int lg3ff_init(struct hid_device *hdev);  static inline int lg3ff_init(struct hid_device *hdev) { return -1; }  #endif -#ifdef CONFIG_LOGIWII_FF +#ifdef CONFIG_LOGIWHEELS_FF  int lg4ff_init(struct hid_device *hdev); +int lg4ff_deinit(struct hid_device *hdev);  #else  static inline int lg4ff_init(struct hid_device *hdev) { return -1; } +static inline int lg4ff_deinit(struct hid_device *hdev) { return -1; }  #endif  #endif diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c index fa550c8e1d1..103f30d93f7 100644 --- a/drivers/hid/hid-lg4ff.c +++ b/drivers/hid/hid-lg4ff.c @@ -29,19 +29,108 @@  #include "usbhid/usbhid.h"  #include "hid-lg.h" +#include "hid-ids.h" -struct lg4ff_device { -	struct hid_report *report; +#define DFGT_REV_MAJ 0x13 +#define DFGT_REV_MIN 0x22 +#define DFP_REV_MAJ 0x11 +#define DFP_REV_MIN 0x06 +#define FFEX_REV_MAJ 0x21 +#define FFEX_REV_MIN 0x00 +#define G25_REV_MAJ 0x12 +#define G25_REV_MIN 0x22 +#define G27_REV_MAJ 0x12 +#define G27_REV_MIN 0x38 + +#define to_hid_device(pdev) container_of(pdev, struct hid_device, dev) + +static void hid_lg4ff_set_range_dfp(struct hid_device *hid, u16 range); +static void hid_lg4ff_set_range_g25(struct hid_device *hid, u16 range); +static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *attr, char *buf); +static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); + +static DEVICE_ATTR(range, S_IRWXU | S_IRWXG | S_IRWXO, lg4ff_range_show, lg4ff_range_store); + +static bool list_inited; + +struct lg4ff_device_entry { +	char  *device_id;	/* Use name in respective kobject structure's address as the ID */ +	__u16 range; +	__u16 min_range; +	__u16 max_range; +	__u8  leds; +	struct list_head list; +	void (*set_range)(struct hid_device *hid, u16 range);  }; -static const signed short ff4_wheel_ac[] = { +static struct lg4ff_device_entry device_list; + +static const signed short lg4ff_wheel_effects[] = {  	FF_CONSTANT,  	FF_AUTOCENTER,  	-1  }; -static int hid_lg4ff_play(struct input_dev *dev, void *data, -			 struct ff_effect *effect) +struct lg4ff_wheel { +	const __u32 product_id; +	const signed short *ff_effects; +	const __u16 min_range; +	const __u16 max_range; +	void (*set_range)(struct hid_device *hid, u16 range); +}; + +static const struct lg4ff_wheel lg4ff_devices[] = { +	{USB_DEVICE_ID_LOGITECH_WHEEL,       lg4ff_wheel_effects, 40, 270, NULL}, +	{USB_DEVICE_ID_LOGITECH_MOMO_WHEEL,  lg4ff_wheel_effects, 40, 270, NULL}, +	{USB_DEVICE_ID_LOGITECH_DFP_WHEEL,   lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_dfp}, +	{USB_DEVICE_ID_LOGITECH_G25_WHEEL,   lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25}, +	{USB_DEVICE_ID_LOGITECH_DFGT_WHEEL,  lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25}, +	{USB_DEVICE_ID_LOGITECH_G27_WHEEL,   lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25}, +	{USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2, lg4ff_wheel_effects, 40, 270, NULL}, +	{USB_DEVICE_ID_LOGITECH_WII_WHEEL,   lg4ff_wheel_effects, 40, 270, NULL} +}; + +struct lg4ff_native_cmd { +	const __u8 cmd_num;	/* Number of commands to send */ +	const __u8 cmd[]; +}; + +struct lg4ff_usb_revision { +	const __u16 rev_maj; +	const __u16 rev_min; +	const struct lg4ff_native_cmd *command; +}; + +static const struct lg4ff_native_cmd native_dfp = { +	1, +	{0xf8, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00} +}; + +static const struct lg4ff_native_cmd native_dfgt = { +	2, +	{0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00,	/* 1st command */ +	 0xf8, 0x09, 0x03, 0x01, 0x00, 0x00, 0x00}	/* 2nd command */ +}; + +static const struct lg4ff_native_cmd native_g25 = { +	1, +	{0xf8, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00} +}; + +static const struct lg4ff_native_cmd native_g27 = { +	2, +	{0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00,	/* 1st command */ +	 0xf8, 0x09, 0x04, 0x01, 0x00, 0x00, 0x00}	/* 2nd command */ +}; + +static const struct lg4ff_usb_revision lg4ff_revs[] = { +	{DFGT_REV_MAJ, DFGT_REV_MIN, &native_dfgt},	/* Driving Force GT */ +	{DFP_REV_MAJ,  DFP_REV_MIN,  &native_dfp},	/* Driving Force Pro */ +	{G25_REV_MAJ,  G25_REV_MIN,  &native_g25},	/* G25 */ +	{G27_REV_MAJ,  G27_REV_MIN,  &native_g27},	/* G27 */ +}; + +static int hid_lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *effect)  {  	struct hid_device *hid = input_get_drvdata(dev);  	struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; @@ -55,13 +144,12 @@ static int hid_lg4ff_play(struct input_dev *dev, void *data,  		x = effect->u.ramp.start_level + 0x80;	/* 0x80 is no force */  		CLAMP(x);  		report->field[0]->value[0] = 0x11;	/* Slot 1 */ -		report->field[0]->value[1] = 0x10; +		report->field[0]->value[1] = 0x08;  		report->field[0]->value[2] = x; -		report->field[0]->value[3] = 0x00; +		report->field[0]->value[3] = 0x80;  		report->field[0]->value[4] = 0x00; -		report->field[0]->value[5] = 0x08; +		report->field[0]->value[5] = 0x00;  		report->field[0]->value[6] = 0x00; -		dbg_hid("Autocenter, x=0x%02X\n", x);  		usbhid_submit_report(hid, report, USB_DIR_OUT);  		break; @@ -69,24 +157,184 @@ static int hid_lg4ff_play(struct input_dev *dev, void *data,  	return 0;  } -static void hid_lg4ff_set_autocenter(struct input_dev *dev, u16 magnitude) +/* Sends default autocentering command compatible with + * all wheels except Formula Force EX */ +static void hid_lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitude)  {  	struct hid_device *hid = input_get_drvdata(dev);  	struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;  	struct hid_report *report = list_entry(report_list->next, struct hid_report, list); -	__s32 *value = report->field[0]->value; -	*value++ = 0xfe; -	*value++ = 0x0d; -	*value++ = 0x07; -	*value++ = 0x07; -	*value++ = (magnitude >> 8) & 0xff; -	*value++ = 0x00; -	*value = 0x00; +	report->field[0]->value[0] = 0xfe; +	report->field[0]->value[1] = 0x0d; +	report->field[0]->value[2] = magnitude >> 13; +	report->field[0]->value[3] = magnitude >> 13; +	report->field[0]->value[4] = magnitude >> 8; +	report->field[0]->value[5] = 0x00; +	report->field[0]->value[6] = 0x00; + +	usbhid_submit_report(hid, report, USB_DIR_OUT); +} + +/* Sends autocentering command compatible with Formula Force EX */ +static void hid_lg4ff_set_autocenter_ffex(struct input_dev *dev, u16 magnitude) +{ +	struct hid_device *hid = input_get_drvdata(dev); +	struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; +	struct hid_report *report = list_entry(report_list->next, struct hid_report, list); +	magnitude = magnitude * 90 / 65535; +	 + +	report->field[0]->value[0] = 0xfe; +	report->field[0]->value[1] = 0x03; +	report->field[0]->value[2] = magnitude >> 14; +	report->field[0]->value[3] = magnitude >> 14; +	report->field[0]->value[4] = magnitude; +	report->field[0]->value[5] = 0x00; +	report->field[0]->value[6] = 0x00; + +	usbhid_submit_report(hid, report, USB_DIR_OUT); +} + +/* Sends command to set range compatible with G25/G27/Driving Force GT */ +static void hid_lg4ff_set_range_g25(struct hid_device *hid, u16 range) +{ +	struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; +	struct hid_report *report = list_entry(report_list->next, struct hid_report, list); +	dbg_hid("G25/G27/DFGT: setting range to %u\n", range); + +	report->field[0]->value[0] = 0xf8; +	report->field[0]->value[1] = 0x81; +	report->field[0]->value[2] = range & 0x00ff; +	report->field[0]->value[3] = (range & 0xff00) >> 8; +	report->field[0]->value[4] = 0x00; +	report->field[0]->value[5] = 0x00; +	report->field[0]->value[6] = 0x00; + +	usbhid_submit_report(hid, report, USB_DIR_OUT); +} + +/* Sends commands to set range compatible with Driving Force Pro wheel */ +static void hid_lg4ff_set_range_dfp(struct hid_device *hid, __u16 range) +{ +	struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; +	struct hid_report *report = list_entry(report_list->next, struct hid_report, list); +	int start_left, start_right, full_range; +	dbg_hid("Driving Force Pro: setting range to %u\n", range); + +	/* Prepare "coarse" limit command */ +	report->field[0]->value[0] = 0xf8; +	report->field[0]->value[1] = 0x00; 	/* Set later */ +	report->field[0]->value[2] = 0x00; +	report->field[0]->value[3] = 0x00; +	report->field[0]->value[4] = 0x00; +	report->field[0]->value[5] = 0x00; +	report->field[0]->value[6] = 0x00; + +	if (range > 200) { +		report->field[0]->value[1] = 0x03; +		full_range = 900; +	} else { +		report->field[0]->value[1] = 0x02; +		full_range = 200; +	} +	usbhid_submit_report(hid, report, USB_DIR_OUT); + +	/* Prepare "fine" limit command */ +	report->field[0]->value[0] = 0x81; +	report->field[0]->value[1] = 0x0b; +	report->field[0]->value[2] = 0x00; +	report->field[0]->value[3] = 0x00; +	report->field[0]->value[4] = 0x00; +	report->field[0]->value[5] = 0x00; +	report->field[0]->value[6] = 0x00; + +	if (range == 200 || range == 900) {	/* Do not apply any fine limit */ +		usbhid_submit_report(hid, report, USB_DIR_OUT); +		return; +	} + +	/* Construct fine limit command */ +	start_left = (((full_range - range + 1) * 2047) / full_range); +	start_right = 0xfff - start_left; + +	report->field[0]->value[2] = start_left >> 4; +	report->field[0]->value[3] = start_right >> 4; +	report->field[0]->value[4] = 0xff; +	report->field[0]->value[5] = (start_right & 0xe) << 4 | (start_left & 0xe); +	report->field[0]->value[6] = 0xff;  	usbhid_submit_report(hid, report, USB_DIR_OUT);  } +static void hid_lg4ff_switch_native(struct hid_device *hid, const struct lg4ff_native_cmd *cmd) +{ +	struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; +	struct hid_report *report = list_entry(report_list->next, struct hid_report, list); +	__u8 i, j; + +	j = 0; +	while (j < 7*cmd->cmd_num) { +		for (i = 0; i < 7; i++) +			report->field[0]->value[i] = cmd->cmd[j++]; + +		usbhid_submit_report(hid, report, USB_DIR_OUT); +	} +} + +/* Read current range and display it in terminal */ +static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *attr, char *buf) +{ +	struct lg4ff_device_entry *uninitialized_var(entry); +	struct list_head *h; +	struct hid_device *hid = to_hid_device(dev); +	size_t count; + +	list_for_each(h, &device_list.list) { +		entry = list_entry(h, struct lg4ff_device_entry, list); +		if (strcmp(entry->device_id, (&hid->dev)->kobj.name) == 0) +			break; +	} +	if (h == &device_list.list) { +		dbg_hid("Device not found!"); +		return 0; +	} + +	count = scnprintf(buf, PAGE_SIZE, "%u\n", entry->range); +	return count; +} + +/* Set range to user specified value, call appropriate function + * according to the type of the wheel */ +static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ +	struct lg4ff_device_entry *uninitialized_var(entry); +	struct list_head *h; +	struct hid_device *hid = to_hid_device(dev); +	__u16 range = simple_strtoul(buf, NULL, 10); + +	list_for_each(h, &device_list.list) { +		entry = list_entry(h, struct lg4ff_device_entry, list); +		if (strcmp(entry->device_id, (&hid->dev)->kobj.name) == 0) +			break; +	} +	if (h == &device_list.list) { +		dbg_hid("Device not found!"); +		return count; +	} + +	if (range == 0) +		range = entry->max_range; + +	/* Check if the wheel supports range setting +	 * and that the range is within limits for the wheel */ +	if (entry->set_range != NULL && range >= entry->min_range && range <= entry->max_range) { +		entry->set_range(hid, range); +		entry->range = range; +	} + +	return count; +}  int lg4ff_init(struct hid_device *hid)  { @@ -95,9 +343,10 @@ int lg4ff_init(struct hid_device *hid)  	struct input_dev *dev = hidinput->input;  	struct hid_report *report;  	struct hid_field *field; -	const signed short *ff_bits = ff4_wheel_ac; -	int error; -	int i; +	struct lg4ff_device_entry *entry; +	struct usb_device_descriptor *udesc; +	int error, i, j; +	__u16 bcdDevice, rev_maj, rev_min;  	/* Find the report to use */  	if (list_empty(report_list)) { @@ -118,18 +367,122 @@ int lg4ff_init(struct hid_device *hid)  		return -1;  	} -	for (i = 0; ff_bits[i] >= 0; i++) -		set_bit(ff_bits[i], dev->ffbit); +	/* Check what wheel has been connected */ +	for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) { +		if (hid->product == lg4ff_devices[i].product_id) { +			dbg_hid("Found compatible device, product ID %04X\n", lg4ff_devices[i].product_id); +			break; +		} +	} + +	if (i == ARRAY_SIZE(lg4ff_devices)) { +		hid_err(hid, "Device is not supported by lg4ff driver. If you think it should be, consider reporting a bug to" +			     "LKML, Simon Wood <simon@mungewell.org> or Michal Maly <madcatxster@gmail.com>\n"); +		return -1; +	} + +	/* Attempt to switch wheel to native mode when applicable */ +	udesc = &(hid_to_usb_dev(hid)->descriptor); +	if (!udesc) { +		hid_err(hid, "NULL USB device descriptor\n"); +		return -1; +	} +	bcdDevice = le16_to_cpu(udesc->bcdDevice); +	rev_maj = bcdDevice >> 8; +	rev_min = bcdDevice & 0xff; + +	if (lg4ff_devices[i].product_id == USB_DEVICE_ID_LOGITECH_WHEEL) { +		dbg_hid("Generic wheel detected, can it do native?\n"); +		dbg_hid("USB revision: %2x.%02x\n", rev_maj, rev_min); + +		for (j = 0; j < ARRAY_SIZE(lg4ff_revs); j++) { +			if (lg4ff_revs[j].rev_maj == rev_maj && lg4ff_revs[j].rev_min == rev_min) { +				hid_lg4ff_switch_native(hid, lg4ff_revs[j].command); +				hid_info(hid, "Switched to native mode\n"); +			} +		} +	} + +	/* Set supported force feedback capabilities */ +	for (j = 0; lg4ff_devices[i].ff_effects[j] >= 0; j++) +		set_bit(lg4ff_devices[i].ff_effects[j], dev->ffbit);  	error = input_ff_create_memless(dev, NULL, hid_lg4ff_play);  	if (error)  		return error; -	if (test_bit(FF_AUTOCENTER, dev->ffbit)) -		dev->ff->set_autocenter = hid_lg4ff_set_autocenter; +	/* Check if autocentering is available and +	 * set the centering force to zero by default */ +	if (test_bit(FF_AUTOCENTER, dev->ffbit)) { +		if(rev_maj == FFEX_REV_MAJ && rev_min == FFEX_REV_MIN)	/* Formula Force EX expects different autocentering command */ +			dev->ff->set_autocenter = hid_lg4ff_set_autocenter_ffex; +		else +			dev->ff->set_autocenter = hid_lg4ff_set_autocenter_default; + +		dev->ff->set_autocenter(dev, 0); +	} + +		/* Initialize device_list if this is the first device to handle by lg4ff */ +	if (!list_inited) { +		INIT_LIST_HEAD(&device_list.list); +		list_inited = 1; +	} + +	/* Add the device to device_list */ +	entry = (struct lg4ff_device_entry *)kzalloc(sizeof(struct lg4ff_device_entry), GFP_KERNEL); +	if (!entry) { +		hid_err(hid, "Cannot add device, insufficient memory.\n"); +		return -ENOMEM; +	} +	entry->device_id = kstrdup((&hid->dev)->kobj.name, GFP_KERNEL); +	if (!entry->device_id) { +		hid_err(hid, "Cannot set device_id, insufficient memory.\n"); +		kfree(entry); +		return -ENOMEM; +	} +	entry->min_range = lg4ff_devices[i].min_range; +	entry->max_range = lg4ff_devices[i].max_range; +	entry->set_range = lg4ff_devices[i].set_range; +	list_add(&entry->list, &device_list.list); + +	/* Create sysfs interface */ +	error = device_create_file(&hid->dev, &dev_attr_range); +	if (error) +		return error; +	dbg_hid("sysfs interface created\n"); + +	/* Set the maximum range to start with */ +	entry->range = entry->max_range; +	if (entry->set_range != NULL) +		entry->set_range(hid, entry->range);  	hid_info(hid, "Force feedback for Logitech Speed Force Wireless by Simon Wood <simon@mungewell.org>\n");  	return 0;  } +int lg4ff_deinit(struct hid_device *hid) +{ +	bool found = 0; +	struct lg4ff_device_entry *entry; +	struct list_head *h, *g; +	list_for_each_safe(h, g, &device_list.list) { +		entry = list_entry(h, struct lg4ff_device_entry, list); +		if (strcmp(entry->device_id, (&hid->dev)->kobj.name) == 0) { +			list_del(h); +			kfree(entry->device_id); +			kfree(entry); +			found = 1; +			break; +		} +	} + +	if (!found) { +		dbg_hid("Device entry not found!\n"); +		return -1; +	} + +	device_remove_file(&hid->dev, &dev_attr_range); +	dbg_hid("Device successfully unregistered\n"); +	return 0; +} diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c index 088f8504929..27bc54f92f4 100644 --- a/drivers/hid/hid-lgff.c +++ b/drivers/hid/hid-lgff.c @@ -58,12 +58,6 @@ static const signed short ff_joystick_ac[] = {  	-1  }; -static const signed short ff_wheel[] = { -	FF_CONSTANT, -	FF_AUTOCENTER, -	-1 -}; -  static const struct dev_type devices[] = {  	{ 0x046d, 0xc211, ff_rumble },  	{ 0x046d, 0xc219, ff_rumble }, @@ -71,14 +65,7 @@ static const struct dev_type devices[] = {  	{ 0x046d, 0xc286, ff_joystick_ac },  	{ 0x046d, 0xc287, ff_joystick_ac },  	{ 0x046d, 0xc293, ff_joystick }, -	{ 0x046d, 0xc294, ff_wheel }, -	{ 0x046d, 0xc298, ff_wheel }, -	{ 0x046d, 0xc299, ff_wheel }, -	{ 0x046d, 0xc29b, ff_wheel },  	{ 0x046d, 0xc295, ff_joystick }, -	{ 0x046d, 0xc298, ff_wheel }, -	{ 0x046d, 0xc299, ff_wheel }, -	{ 0x046d, 0xca03, ff_wheel },  };  static int hid_lgff_play(struct input_dev *dev, void *data, struct ff_effect *effect) diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c new file mode 100644 index 00000000000..38b12e45780 --- /dev/null +++ b/drivers/hid/hid-logitech-dj.c @@ -0,0 +1,922 @@ +/* + *  HID driver for Logitech Unifying receivers + * + *  Copyright (c) 2011 Logitech + */ + +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + + +#include <linux/device.h> +#include <linux/hid.h> +#include <linux/module.h> +#include <linux/usb.h> +#include "usbhid/usbhid.h" +#include "hid-ids.h" +#include "hid-logitech-dj.h" + +/* Keyboard descriptor (1) */ +static const char kbd_descriptor[] = { +	0x05, 0x01,		/* USAGE_PAGE (generic Desktop)     */ +	0x09, 0x06,		/* USAGE (Keyboard)         */ +	0xA1, 0x01,		/* COLLECTION (Application)     */ +	0x85, 0x01,		/* REPORT_ID (1)            */ +	0x95, 0x08,		/*   REPORT_COUNT (8)           */ +	0x75, 0x01,		/*   REPORT_SIZE (1)            */ +	0x15, 0x00,		/*   LOGICAL_MINIMUM (0)        */ +	0x25, 0x01,		/*   LOGICAL_MAXIMUM (1)        */ +	0x05, 0x07,		/*   USAGE_PAGE (Keyboard)      */ +	0x19, 0xE0,		/*   USAGE_MINIMUM (Left Control)   */ +	0x29, 0xE7,		/*   USAGE_MAXIMUM (Right GUI)      */ +	0x81, 0x02,		/*   INPUT (Data,Var,Abs)       */ +	0x95, 0x05,		/*   REPORT COUNT (5)           */ +	0x05, 0x08,		/*   USAGE PAGE (LED page)      */ +	0x19, 0x01,		/*   USAGE MINIMUM (1)          */ +	0x29, 0x05,		/*   USAGE MAXIMUM (5)          */ +	0x91, 0x02,		/*   OUTPUT (Data, Variable, Absolute)  */ +	0x95, 0x01,		/*   REPORT COUNT (1)           */ +	0x75, 0x03,		/*   REPORT SIZE (3)            */ +	0x91, 0x01,		/*   OUTPUT (Constant)          */ +	0x95, 0x06,		/*   REPORT_COUNT (6)           */ +	0x75, 0x08,		/*   REPORT_SIZE (8)            */ +	0x15, 0x00,		/*   LOGICAL_MINIMUM (0)        */ +	0x26, 0xFF, 0x00,	/*   LOGICAL_MAXIMUM (255)      */ +	0x05, 0x07,		/*   USAGE_PAGE (Keyboard)      */ +	0x19, 0x00,		/*   USAGE_MINIMUM (no event)       */ +	0x2A, 0xFF, 0x00,	/*   USAGE_MAXIMUM (reserved)       */ +	0x81, 0x00,		/*   INPUT (Data,Ary,Abs)       */ +	0xC0 +}; + +/* Mouse descriptor (2)     */ +static const char mse_descriptor[] = { +	0x05, 0x01,		/*  USAGE_PAGE (Generic Desktop)        */ +	0x09, 0x02,		/*  USAGE (Mouse)                       */ +	0xA1, 0x01,		/*  COLLECTION (Application)            */ +	0x85, 0x02,		/*    REPORT_ID = 2                     */ +	0x09, 0x01,		/*    USAGE (pointer)                   */ +	0xA1, 0x00,		/*    COLLECTION (physical)             */ +	0x05, 0x09,		/*      USAGE_PAGE (buttons)            */ +	0x19, 0x01,		/*      USAGE_MIN (1)                   */ +	0x29, 0x10,		/*      USAGE_MAX (16)                  */ +	0x15, 0x00,		/*      LOGICAL_MIN (0)                 */ +	0x25, 0x01,		/*      LOGICAL_MAX (1)                 */ +	0x95, 0x10,		/*      REPORT_COUNT (16)               */ +	0x75, 0x01,		/*      REPORT_SIZE (1)                 */ +	0x81, 0x02,		/*      INPUT (data var abs)            */ +	0x05, 0x01,		/*      USAGE_PAGE (generic desktop)    */ +	0x16, 0x01, 0xF8,	/*      LOGICAL_MIN (-2047)             */ +	0x26, 0xFF, 0x07,	/*      LOGICAL_MAX (2047)              */ +	0x75, 0x0C,		/*      REPORT_SIZE (12)                */ +	0x95, 0x02,		/*      REPORT_COUNT (2)                */ +	0x09, 0x30,		/*      USAGE (X)                       */ +	0x09, 0x31,		/*      USAGE (Y)                       */ +	0x81, 0x06,		/*      INPUT                           */ +	0x15, 0x81,		/*      LOGICAL_MIN (-127)              */ +	0x25, 0x7F,		/*      LOGICAL_MAX (127)               */ +	0x75, 0x08,		/*      REPORT_SIZE (8)                 */ +	0x95, 0x01,		/*      REPORT_COUNT (1)                */ +	0x09, 0x38,		/*      USAGE (wheel)                   */ +	0x81, 0x06,		/*      INPUT                           */ +	0x05, 0x0C,		/*      USAGE_PAGE(consumer)            */ +	0x0A, 0x38, 0x02,	/*      USAGE(AC Pan)                   */ +	0x95, 0x01,		/*      REPORT_COUNT (1)                */ +	0x81, 0x06,		/*      INPUT                           */ +	0xC0,			/*    END_COLLECTION                    */ +	0xC0,			/*  END_COLLECTION                      */ +}; + +/* Consumer Control descriptor (3) */ +static const char consumer_descriptor[] = { +	0x05, 0x0C,		/* USAGE_PAGE (Consumer Devices)       */ +	0x09, 0x01,		/* USAGE (Consumer Control)            */ +	0xA1, 0x01,		/* COLLECTION (Application)            */ +	0x85, 0x03,		/* REPORT_ID = 3                       */ +	0x75, 0x10,		/* REPORT_SIZE (16)                    */ +	0x95, 0x02,		/* REPORT_COUNT (2)                    */ +	0x15, 0x01,		/* LOGICAL_MIN (1)                     */ +	0x26, 0x8C, 0x02,	/* LOGICAL_MAX (652)                   */ +	0x19, 0x01,		/* USAGE_MIN (1)                       */ +	0x2A, 0x8C, 0x02,	/* USAGE_MAX (652)                     */ +	0x81, 0x00,		/* INPUT (Data Ary Abs)                */ +	0xC0,			/* END_COLLECTION                      */ +};				/*                                     */ + +/* System control descriptor (4) */ +static const char syscontrol_descriptor[] = { +	0x05, 0x01,		/*   USAGE_PAGE (Generic Desktop)      */ +	0x09, 0x80,		/*   USAGE (System Control)            */ +	0xA1, 0x01,		/*   COLLECTION (Application)          */ +	0x85, 0x04,		/*   REPORT_ID = 4                     */ +	0x75, 0x02,		/*   REPORT_SIZE (2)                   */ +	0x95, 0x01,		/*   REPORT_COUNT (1)                  */ +	0x15, 0x01,		/*   LOGICAL_MIN (1)                   */ +	0x25, 0x03,		/*   LOGICAL_MAX (3)                   */ +	0x09, 0x82,		/*   USAGE (System Sleep)              */ +	0x09, 0x81,		/*   USAGE (System Power Down)         */ +	0x09, 0x83,		/*   USAGE (System Wake Up)            */ +	0x81, 0x60,		/*   INPUT (Data Ary Abs NPrf Null)    */ +	0x75, 0x06,		/*   REPORT_SIZE (6)                   */ +	0x81, 0x03,		/*   INPUT (Cnst Var Abs)              */ +	0xC0,			/*   END_COLLECTION                    */ +}; + +/* Media descriptor (8) */ +static const char media_descriptor[] = { +	0x06, 0xbc, 0xff,	/* Usage Page 0xffbc                   */ +	0x09, 0x88,		/* Usage 0x0088                        */ +	0xa1, 0x01,		/* BeginCollection                     */ +	0x85, 0x08,		/*   Report ID 8                       */ +	0x19, 0x01,		/*   Usage Min 0x0001                  */ +	0x29, 0xff,		/*   Usage Max 0x00ff                  */ +	0x15, 0x01,		/*   Logical Min 1                     */ +	0x26, 0xff, 0x00,	/*   Logical Max 255                   */ +	0x75, 0x08,		/*   Report Size 8                     */ +	0x95, 0x01,		/*   Report Count 1                    */ +	0x81, 0x00,		/*   Input                             */ +	0xc0,			/* EndCollection                       */ +};				/*                                     */ + +/* Maximum size of all defined hid reports in bytes (including report id) */ +#define MAX_REPORT_SIZE 8 + +/* Number of possible hid report types that can be created by this driver. + * + * Right now, RF report types have the same report types (or report id's) + * than the hid report created from those RF reports. In the future + * this doesnt have to be true. + * + * For instance, RF report type 0x01 which has a size of 8 bytes, corresponds + * to hid report id 0x01, this is standard keyboard. Same thing applies to mice + * reports and consumer control, etc. If a new RF report is created, it doesn't + * has to have the same report id as its corresponding hid report, so an + * translation may have to take place for future report types. + */ +#define NUMBER_OF_HID_REPORTS 32 +static const u8 hid_reportid_size_map[NUMBER_OF_HID_REPORTS] = { +	[1] = 8,		/* Standard keyboard */ +	[2] = 8,		/* Standard mouse */ +	[3] = 5,		/* Consumer control */ +	[4] = 2,		/* System control */ +	[8] = 2,		/* Media Center */ +}; + + +#define LOGITECH_DJ_INTERFACE_NUMBER 0x02 + +static struct hid_ll_driver logi_dj_ll_driver; + +static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf, +					size_t count, +					unsigned char report_type); + +static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev, +						struct dj_report *dj_report) +{ +	/* Called in delayed work context */ +	struct dj_device *dj_dev; +	unsigned long flags; + +	spin_lock_irqsave(&djrcv_dev->lock, flags); +	dj_dev = djrcv_dev->paired_dj_devices[dj_report->device_index]; +	djrcv_dev->paired_dj_devices[dj_report->device_index] = NULL; +	spin_unlock_irqrestore(&djrcv_dev->lock, flags); + +	if (dj_dev != NULL) { +		hid_destroy_device(dj_dev->hdev); +		kfree(dj_dev); +	} else { +		dev_err(&djrcv_dev->hdev->dev, "%s: can't destroy a NULL device\n", +			__func__); +	} +} + +static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev, +					  struct dj_report *dj_report) +{ +	/* Called in delayed work context */ +	struct hid_device *djrcv_hdev = djrcv_dev->hdev; +	struct usb_interface *intf = to_usb_interface(djrcv_hdev->dev.parent); +	struct usb_device *usbdev = interface_to_usbdev(intf); +	struct hid_device *dj_hiddev; +	struct dj_device *dj_dev; + +	/* Device index goes from 1 to 6, we need 3 bytes to store the +	 * semicolon, the index, and a null terminator +	 */ +	unsigned char tmpstr[3]; + +	if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] & +	    SPFUNCTION_DEVICE_LIST_EMPTY) { +		dbg_hid("%s: device list is empty\n", __func__); +		return; +	} + +	if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) || +	    (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) { +		dev_err(&djrcv_hdev->dev, "%s: invalid device index:%d\n", +			__func__, dj_report->device_index); +		return; +	} + +	dj_hiddev = hid_allocate_device(); +	if (IS_ERR(dj_hiddev)) { +		dev_err(&djrcv_hdev->dev, "%s: hid_allocate_device failed\n", +			__func__); +		return; +	} + +	dj_hiddev->ll_driver = &logi_dj_ll_driver; +	dj_hiddev->hid_output_raw_report = logi_dj_output_hidraw_report; + +	dj_hiddev->dev.parent = &djrcv_hdev->dev; +	dj_hiddev->bus = BUS_USB; +	dj_hiddev->vendor = le16_to_cpu(usbdev->descriptor.idVendor); +	dj_hiddev->product = le16_to_cpu(usbdev->descriptor.idProduct); +	snprintf(dj_hiddev->name, sizeof(dj_hiddev->name), +		"Logitech Unifying Device. Wireless PID:%02x%02x", +		dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_MSB], +		dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_LSB]); + +	usb_make_path(usbdev, dj_hiddev->phys, sizeof(dj_hiddev->phys)); +	snprintf(tmpstr, sizeof(tmpstr), ":%d", dj_report->device_index); +	strlcat(dj_hiddev->phys, tmpstr, sizeof(dj_hiddev->phys)); + +	dj_dev = kzalloc(sizeof(struct dj_device), GFP_KERNEL); + +	if (!dj_dev) { +		dev_err(&djrcv_hdev->dev, "%s: failed allocating dj_device\n", +			__func__); +		goto dj_device_allocate_fail; +	} + +	dj_dev->reports_supported = le32_to_cpu( +		dj_report->report_params[DEVICE_PAIRED_RF_REPORT_TYPE]); +	dj_dev->hdev = dj_hiddev; +	dj_dev->dj_receiver_dev = djrcv_dev; +	dj_dev->device_index = dj_report->device_index; +	dj_hiddev->driver_data = dj_dev; + +	djrcv_dev->paired_dj_devices[dj_report->device_index] = dj_dev; + +	if (hid_add_device(dj_hiddev)) { +		dev_err(&djrcv_hdev->dev, "%s: failed adding dj_device\n", +			__func__); +		goto hid_add_device_fail; +	} + +	return; + +hid_add_device_fail: +	djrcv_dev->paired_dj_devices[dj_report->device_index] = NULL; +	kfree(dj_dev); +dj_device_allocate_fail: +	hid_destroy_device(dj_hiddev); +} + +static void delayedwork_callback(struct work_struct *work) +{ +	struct dj_receiver_dev *djrcv_dev = +		container_of(work, struct dj_receiver_dev, work); + +	struct dj_report dj_report; +	unsigned long flags; +	int count; + +	dbg_hid("%s\n", __func__); + +	spin_lock_irqsave(&djrcv_dev->lock, flags); + +	count = kfifo_out(&djrcv_dev->notif_fifo, &dj_report, +				sizeof(struct dj_report)); + +	if (count != sizeof(struct dj_report)) { +		dev_err(&djrcv_dev->hdev->dev, "%s: workitem triggered without " +			"notifications available\n", __func__); +		spin_unlock_irqrestore(&djrcv_dev->lock, flags); +		return; +	} + +	if (!kfifo_is_empty(&djrcv_dev->notif_fifo)) { +		if (schedule_work(&djrcv_dev->work) == 0) { +			dbg_hid("%s: did not schedule the work item, was " +				"already queued\n", __func__); +		} +	} + +	spin_unlock_irqrestore(&djrcv_dev->lock, flags); + +	switch (dj_report.report_type) { +	case REPORT_TYPE_NOTIF_DEVICE_PAIRED: +		logi_dj_recv_add_djhid_device(djrcv_dev, &dj_report); +		break; +	case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED: +		logi_dj_recv_destroy_djhid_device(djrcv_dev, &dj_report); +		break; +	default: +		dbg_hid("%s: unexpected report type\n", __func__); +	} +} + +static void logi_dj_recv_queue_notification(struct dj_receiver_dev *djrcv_dev, +					   struct dj_report *dj_report) +{ +	/* We are called from atomic context (tasklet && djrcv->lock held) */ + +	kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report)); + +	if (schedule_work(&djrcv_dev->work) == 0) { +		dbg_hid("%s: did not schedule the work item, was already " +			"queued\n", __func__); +	} +} + +static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev, +					     struct dj_report *dj_report) +{ +	/* We are called from atomic context (tasklet && djrcv->lock held) */ +	unsigned int i; +	u8 reportbuffer[MAX_REPORT_SIZE]; +	struct dj_device *djdev; + +	djdev = djrcv_dev->paired_dj_devices[dj_report->device_index]; + +	if (!djdev) { +		dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]" +			" is NULL, index %d\n", dj_report->device_index); +		return; +	} + +	memset(reportbuffer, 0, sizeof(reportbuffer)); + +	for (i = 0; i < NUMBER_OF_HID_REPORTS; i++) { +		if (djdev->reports_supported & (1 << i)) { +			reportbuffer[0] = i; +			if (hid_input_report(djdev->hdev, +					     HID_INPUT_REPORT, +					     reportbuffer, +					     hid_reportid_size_map[i], 1)) { +				dbg_hid("hid_input_report error sending null " +					"report\n"); +			} +		} +	} +} + +static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev, +					struct dj_report *dj_report) +{ +	/* We are called from atomic context (tasklet && djrcv->lock held) */ +	struct dj_device *dj_device; + +	dj_device = djrcv_dev->paired_dj_devices[dj_report->device_index]; + +	if (dj_device == NULL) { +		dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]" +			" is NULL, index %d\n", dj_report->device_index); +		return; +	} + +	if ((dj_report->report_type > ARRAY_SIZE(hid_reportid_size_map) - 1) || +	    (hid_reportid_size_map[dj_report->report_type] == 0)) { +		dbg_hid("invalid report type:%x\n", dj_report->report_type); +		return; +	} + +	if (hid_input_report(dj_device->hdev, +			HID_INPUT_REPORT, &dj_report->report_type, +			hid_reportid_size_map[dj_report->report_type], 1)) { +		dbg_hid("hid_input_report error\n"); +	} +} + + +static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev, +				    struct dj_report *dj_report) +{ +	struct hid_device *hdev = djrcv_dev->hdev; +	int sent_bytes; + +	if (!hdev->hid_output_raw_report) { +		dev_err(&hdev->dev, "%s:" +			"hid_output_raw_report is null\n", __func__); +		return -ENODEV; +	} + +	sent_bytes = hdev->hid_output_raw_report(hdev, (u8 *) dj_report, +						 sizeof(struct dj_report), +						 HID_OUTPUT_REPORT); + +	return (sent_bytes < 0) ? sent_bytes : 0; +} + +static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) +{ +	struct dj_report dj_report; + +	memset(&dj_report, 0, sizeof(dj_report)); +	dj_report.report_id = REPORT_ID_DJ_SHORT; +	dj_report.device_index = 0xFF; +	dj_report.report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES; +	return logi_dj_recv_send_report(djrcv_dev, &dj_report); +} + +static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, +					  unsigned timeout) +{ +	struct dj_report dj_report; + +	memset(&dj_report, 0, sizeof(dj_report)); +	dj_report.report_id = REPORT_ID_DJ_SHORT; +	dj_report.device_index = 0xFF; +	dj_report.report_type = REPORT_TYPE_CMD_SWITCH; +	dj_report.report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x1F; +	dj_report.report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout; +	return logi_dj_recv_send_report(djrcv_dev, &dj_report); +} + + +static int logi_dj_ll_open(struct hid_device *hid) +{ +	dbg_hid("%s:%s\n", __func__, hid->phys); +	return 0; + +} + +static void logi_dj_ll_close(struct hid_device *hid) +{ +	dbg_hid("%s:%s\n", __func__, hid->phys); +} + +static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf, +					size_t count, +					unsigned char report_type) +{ +	/* Called by hid raw to send data */ +	dbg_hid("%s\n", __func__); + +	return 0; +} + +static int logi_dj_ll_parse(struct hid_device *hid) +{ +	struct dj_device *djdev = hid->driver_data; +	int retval; + +	dbg_hid("%s\n", __func__); + +	djdev->hdev->version = 0x0111; +	djdev->hdev->country = 0x00; + +	if (djdev->reports_supported & STD_KEYBOARD) { +		dbg_hid("%s: sending a kbd descriptor, reports_supported: %x\n", +			__func__, djdev->reports_supported); +		retval = hid_parse_report(hid, +					  (u8 *) kbd_descriptor, +					  sizeof(kbd_descriptor)); +		if (retval) { +			dbg_hid("%s: sending a kbd descriptor, hid_parse failed" +				" error: %d\n", __func__, retval); +			return retval; +		} +	} + +	if (djdev->reports_supported & STD_MOUSE) { +		dbg_hid("%s: sending a mouse descriptor, reports_supported: " +			"%x\n", __func__, djdev->reports_supported); +		retval = hid_parse_report(hid, +					  (u8 *) mse_descriptor, +					  sizeof(mse_descriptor)); +		if (retval) { +			dbg_hid("%s: sending a mouse descriptor, hid_parse " +				"failed error: %d\n", __func__, retval); +			return retval; +		} +	} + +	if (djdev->reports_supported & MULTIMEDIA) { +		dbg_hid("%s: sending a multimedia report descriptor: %x\n", +			__func__, djdev->reports_supported); +		retval = hid_parse_report(hid, +					  (u8 *) consumer_descriptor, +					  sizeof(consumer_descriptor)); +		if (retval) { +			dbg_hid("%s: sending a consumer_descriptor, hid_parse " +				"failed error: %d\n", __func__, retval); +			return retval; +		} +	} + +	if (djdev->reports_supported & POWER_KEYS) { +		dbg_hid("%s: sending a power keys report descriptor: %x\n", +			__func__, djdev->reports_supported); +		retval = hid_parse_report(hid, +					  (u8 *) syscontrol_descriptor, +					  sizeof(syscontrol_descriptor)); +		if (retval) { +			dbg_hid("%s: sending a syscontrol_descriptor, " +				"hid_parse failed error: %d\n", +				__func__, retval); +			return retval; +		} +	} + +	if (djdev->reports_supported & MEDIA_CENTER) { +		dbg_hid("%s: sending a media center report descriptor: %x\n", +			__func__, djdev->reports_supported); +		retval = hid_parse_report(hid, +					  (u8 *) media_descriptor, +					  sizeof(media_descriptor)); +		if (retval) { +			dbg_hid("%s: sending a media_descriptor, hid_parse " +				"failed error: %d\n", __func__, retval); +			return retval; +		} +	} + +	if (djdev->reports_supported & KBD_LEDS) { +		dbg_hid("%s: need to send kbd leds report descriptor: %x\n", +			__func__, djdev->reports_supported); +	} + +	return 0; +} + +static int logi_dj_ll_input_event(struct input_dev *dev, unsigned int type, +				  unsigned int code, int value) +{ +	/* Sent by the input layer to handle leds and Force Feedback */ +	struct hid_device *dj_hiddev = input_get_drvdata(dev); +	struct dj_device *dj_dev = dj_hiddev->driver_data; + +	struct dj_receiver_dev *djrcv_dev = +	    dev_get_drvdata(dj_hiddev->dev.parent); +	struct hid_device *dj_rcv_hiddev = djrcv_dev->hdev; +	struct hid_report_enum *output_report_enum; + +	struct hid_field *field; +	struct hid_report *report; +	unsigned char data[8]; +	int offset; + +	dbg_hid("%s: %s, type:%d | code:%d | value:%d\n", +		__func__, dev->phys, type, code, value); + +	if (type != EV_LED) +		return -1; + +	offset = hidinput_find_field(dj_hiddev, type, code, &field); + +	if (offset == -1) { +		dev_warn(&dev->dev, "event field not found\n"); +		return -1; +	} +	hid_set_field(field, offset, value); +	hid_output_report(field->report, &data[0]); + +	output_report_enum = &dj_rcv_hiddev->report_enum[HID_OUTPUT_REPORT]; +	report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT]; +	hid_set_field(report->field[0], 0, dj_dev->device_index); +	hid_set_field(report->field[0], 1, REPORT_TYPE_LEDS); +	hid_set_field(report->field[0], 2, data[1]); + +	usbhid_submit_report(dj_rcv_hiddev, report, USB_DIR_OUT); + +	return 0; + +} + +static int logi_dj_ll_start(struct hid_device *hid) +{ +	dbg_hid("%s\n", __func__); +	return 0; +} + +static void logi_dj_ll_stop(struct hid_device *hid) +{ +	dbg_hid("%s\n", __func__); +} + + +static struct hid_ll_driver logi_dj_ll_driver = { +	.parse = logi_dj_ll_parse, +	.start = logi_dj_ll_start, +	.stop = logi_dj_ll_stop, +	.open = logi_dj_ll_open, +	.close = logi_dj_ll_close, +	.hidinput_input_event = logi_dj_ll_input_event, +}; + + +static int logi_dj_raw_event(struct hid_device *hdev, +			     struct hid_report *report, u8 *data, +			     int size) +{ +	struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev); +	struct dj_report *dj_report = (struct dj_report *) data; +	unsigned long flags; +	bool report_processed = false; + +	dbg_hid("%s, size:%d\n", __func__, size); + +	/* Here we receive all data coming from iface 2, there are 4 cases: +	 * +	 * 1) Data should continue its normal processing i.e. data does not +	 * come from the DJ collection, in which case we do nothing and +	 * return 0, so hid-core can continue normal processing (will forward +	 * to associated hidraw device) +	 * +	 * 2) Data is from DJ collection, and is intended for this driver i. e. +	 * data contains arrival, departure, etc notifications, in which case +	 * we queue them for delayed processing by the work queue. We return 1 +	 * to hid-core as no further processing is required from it. +	 * +	 * 3) Data is from DJ collection, and informs a connection change, +	 * if the change means rf link loss, then we must send a null report +	 * to the upper layer to discard potentially pressed keys that may be +	 * repeated forever by the input layer. Return 1 to hid-core as no +	 * further processing is required. +	 * +	 * 4) Data is from DJ collection and is an actual input event from +	 * a paired DJ device in which case we forward it to the correct hid +	 * device (via hid_input_report() ) and return 1 so hid-core does not do +	 * anything else with it. +	 */ + +	spin_lock_irqsave(&djrcv_dev->lock, flags); +	if (dj_report->report_id == REPORT_ID_DJ_SHORT) { +		switch (dj_report->report_type) { +		case REPORT_TYPE_NOTIF_DEVICE_PAIRED: +		case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED: +			logi_dj_recv_queue_notification(djrcv_dev, dj_report); +			break; +		case REPORT_TYPE_NOTIF_CONNECTION_STATUS: +			if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] == +			    STATUS_LINKLOSS) { +				logi_dj_recv_forward_null_report(djrcv_dev, dj_report); +			} +			break; +		default: +			logi_dj_recv_forward_report(djrcv_dev, dj_report); +		} +		report_processed = true; +	} +	spin_unlock_irqrestore(&djrcv_dev->lock, flags); + +	return report_processed; +} + +static int logi_dj_probe(struct hid_device *hdev, +			 const struct hid_device_id *id) +{ +	struct usb_interface *intf = to_usb_interface(hdev->dev.parent); +	struct dj_receiver_dev *djrcv_dev; +	int retval; + +	if (is_dj_device((struct dj_device *)hdev->driver_data)) +		return -ENODEV; + +	dbg_hid("%s called for ifnum %d\n", __func__, +		intf->cur_altsetting->desc.bInterfaceNumber); + +	/* Ignore interfaces 0 and 1, they will not carry any data, dont create +	 * any hid_device for them */ +	if (intf->cur_altsetting->desc.bInterfaceNumber != +	    LOGITECH_DJ_INTERFACE_NUMBER) { +		dbg_hid("%s: ignoring ifnum %d\n", __func__, +			intf->cur_altsetting->desc.bInterfaceNumber); +		return -ENODEV; +	} + +	/* Treat interface 2 */ + +	djrcv_dev = kzalloc(sizeof(struct dj_receiver_dev), GFP_KERNEL); +	if (!djrcv_dev) { +		dev_err(&hdev->dev, +			"%s:failed allocating dj_receiver_dev\n", __func__); +		return -ENOMEM; +	} +	djrcv_dev->hdev = hdev; +	INIT_WORK(&djrcv_dev->work, delayedwork_callback); +	spin_lock_init(&djrcv_dev->lock); +	if (kfifo_alloc(&djrcv_dev->notif_fifo, +			DJ_MAX_NUMBER_NOTIFICATIONS * sizeof(struct dj_report), +			GFP_KERNEL)) { +		dev_err(&hdev->dev, +			"%s:failed allocating notif_fifo\n", __func__); +		kfree(djrcv_dev); +		return -ENOMEM; +	} +	hid_set_drvdata(hdev, djrcv_dev); + +	/* Call  to usbhid to fetch the HID descriptors of interface 2 and +	 * subsequently call to the hid/hid-core to parse the fetched +	 * descriptors, this will in turn create the hidraw and hiddev nodes +	 * for interface 2 of the receiver */ +	retval = hid_parse(hdev); +	if (retval) { +		dev_err(&hdev->dev, +			"%s:parse of interface 2 failed\n", __func__); +		goto hid_parse_fail; +	} + +	/* Starts the usb device and connects to upper interfaces hiddev and +	 * hidraw */ +	retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT); +	if (retval) { +		dev_err(&hdev->dev, +			"%s:hid_hw_start returned error\n", __func__); +		goto hid_hw_start_fail; +	} + +	retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0); +	if (retval < 0) { +		dev_err(&hdev->dev, +			"%s:logi_dj_recv_switch_to_dj_mode returned error:%d\n", +			__func__, retval); +		goto switch_to_dj_mode_fail; +	} + +	/* This is enabling the polling urb on the IN endpoint */ +	retval = hdev->ll_driver->open(hdev); +	if (retval < 0) { +		dev_err(&hdev->dev, "%s:hdev->ll_driver->open returned " +			"error:%d\n", __func__, retval); +		goto llopen_failed; +	} + +	retval = logi_dj_recv_query_paired_devices(djrcv_dev); +	if (retval < 0) { +		dev_err(&hdev->dev, "%s:logi_dj_recv_query_paired_devices " +			"error:%d\n", __func__, retval); +		goto logi_dj_recv_query_paired_devices_failed; +	} + +	return retval; + +logi_dj_recv_query_paired_devices_failed: +	hdev->ll_driver->close(hdev); + +llopen_failed: +switch_to_dj_mode_fail: +	hid_hw_stop(hdev); + +hid_hw_start_fail: +hid_parse_fail: +	kfifo_free(&djrcv_dev->notif_fifo); +	kfree(djrcv_dev); +	hid_set_drvdata(hdev, NULL); +	return retval; + +} + +#ifdef CONFIG_PM +static int logi_dj_reset_resume(struct hid_device *hdev) +{ +	int retval; +	struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev); + +	retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0); +	if (retval < 0) { +		dev_err(&hdev->dev, +			"%s:logi_dj_recv_switch_to_dj_mode returned error:%d\n", +			__func__, retval); +	} + +	return 0; +} +#endif + +static void logi_dj_remove(struct hid_device *hdev) +{ +	struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev); +	struct dj_device *dj_dev; +	int i; + +	dbg_hid("%s\n", __func__); + +	cancel_work_sync(&djrcv_dev->work); + +	hdev->ll_driver->close(hdev); +	hid_hw_stop(hdev); + +	/* I suppose that at this point the only context that can access +	 * the djrecv_data is this thread as the work item is guaranteed to +	 * have finished and no more raw_event callbacks should arrive after +	 * the remove callback was triggered so no locks are put around the +	 * code below */ +	for (i = 0; i < (DJ_MAX_PAIRED_DEVICES + DJ_DEVICE_INDEX_MIN); i++) { +		dj_dev = djrcv_dev->paired_dj_devices[i]; +		if (dj_dev != NULL) { +			hid_destroy_device(dj_dev->hdev); +			kfree(dj_dev); +			djrcv_dev->paired_dj_devices[i] = NULL; +		} +	} + +	kfifo_free(&djrcv_dev->notif_fifo); +	kfree(djrcv_dev); +	hid_set_drvdata(hdev, NULL); +} + +static int logi_djdevice_probe(struct hid_device *hdev, +			 const struct hid_device_id *id) +{ +	int ret; +	struct dj_device *dj_dev = hdev->driver_data; + +	if (!is_dj_device(dj_dev)) +		return -ENODEV; + +	ret = hid_parse(hdev); +	if (!ret) +		ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); + +	return ret; +} + +static const struct hid_device_id logi_dj_receivers[] = { +	{HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, +		USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER)}, +	{HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, +		USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2)}, +	{} +}; + +MODULE_DEVICE_TABLE(hid, logi_dj_receivers); + +static struct hid_driver logi_djreceiver_driver = { +	.name = "logitech-djreceiver", +	.id_table = logi_dj_receivers, +	.probe = logi_dj_probe, +	.remove = logi_dj_remove, +	.raw_event = logi_dj_raw_event, +#ifdef CONFIG_PM +	.reset_resume = logi_dj_reset_resume, +#endif +}; + + +static const struct hid_device_id logi_dj_devices[] = { +	{HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, +		USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER)}, +	{HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, +		USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2)}, +	{} +}; + +static struct hid_driver logi_djdevice_driver = { +	.name = "logitech-djdevice", +	.id_table = logi_dj_devices, +	.probe = logi_djdevice_probe, +}; + + +static int __init logi_dj_init(void) +{ +	int retval; + +	dbg_hid("Logitech-DJ:%s\n", __func__); + +	retval = hid_register_driver(&logi_djreceiver_driver); +	if (retval) +		return retval; + +	retval = hid_register_driver(&logi_djdevice_driver); +	if (retval) +		hid_unregister_driver(&logi_djreceiver_driver); + +	return retval; + +} + +static void __exit logi_dj_exit(void) +{ +	dbg_hid("Logitech-DJ:%s\n", __func__); + +	hid_unregister_driver(&logi_djdevice_driver); +	hid_unregister_driver(&logi_djreceiver_driver); + +} + +module_init(logi_dj_init); +module_exit(logi_dj_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Logitech"); +MODULE_AUTHOR("Nestor Lopez Casado"); +MODULE_AUTHOR("nlopezcasad@logitech.com"); diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h new file mode 100644 index 00000000000..fd28a5e0ca3 --- /dev/null +++ b/drivers/hid/hid-logitech-dj.h @@ -0,0 +1,123 @@ +#ifndef __HID_LOGITECH_DJ_H +#define __HID_LOGITECH_DJ_H + +/* + *  HID driver for Logitech Unifying receivers + * + *  Copyright (c) 2011 Logitech + */ + +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include <linux/kfifo.h> + +#define DJ_MAX_PAIRED_DEVICES			6 +#define DJ_MAX_NUMBER_NOTIFICATIONS		8 +#define DJ_DEVICE_INDEX_MIN 			1 +#define DJ_DEVICE_INDEX_MAX 			6 + +#define DJREPORT_SHORT_LENGTH			15 +#define DJREPORT_LONG_LENGTH			32 + +#define REPORT_ID_DJ_SHORT			0x20 +#define REPORT_ID_DJ_LONG			0x21 + +#define REPORT_TYPE_RFREPORT_FIRST		0x01 +#define REPORT_TYPE_RFREPORT_LAST		0x1F + +/* Command Switch to DJ mode */ +#define REPORT_TYPE_CMD_SWITCH			0x80 +#define CMD_SWITCH_PARAM_DEVBITFIELD		0x00 +#define CMD_SWITCH_PARAM_TIMEOUT_SECONDS	0x01 +#define TIMEOUT_NO_KEEPALIVE			0x00 + +/* Command to Get the list of Paired devices */ +#define REPORT_TYPE_CMD_GET_PAIRED_DEVICES	0x81 + +/* Device Paired Notification */ +#define REPORT_TYPE_NOTIF_DEVICE_PAIRED		0x41 +#define SPFUNCTION_MORE_NOTIF_EXPECTED		0x01 +#define SPFUNCTION_DEVICE_LIST_EMPTY		0x02 +#define DEVICE_PAIRED_PARAM_SPFUNCTION		0x00 +#define DEVICE_PAIRED_PARAM_EQUAD_ID_LSB	0x01 +#define DEVICE_PAIRED_PARAM_EQUAD_ID_MSB	0x02 +#define DEVICE_PAIRED_RF_REPORT_TYPE		0x03 + +/* Device Un-Paired Notification */ +#define REPORT_TYPE_NOTIF_DEVICE_UNPAIRED	0x40 + + +/* Connection Status Notification */ +#define REPORT_TYPE_NOTIF_CONNECTION_STATUS	0x42 +#define CONNECTION_STATUS_PARAM_STATUS		0x00 +#define STATUS_LINKLOSS				0x01 + +/* Error Notification */ +#define REPORT_TYPE_NOTIF_ERROR			0x7F +#define NOTIF_ERROR_PARAM_ETYPE			0x00 +#define ETYPE_KEEPALIVE_TIMEOUT			0x01 + +/* supported DJ HID && RF report types */ +#define REPORT_TYPE_KEYBOARD			0x01 +#define REPORT_TYPE_MOUSE			0x02 +#define REPORT_TYPE_CONSUMER_CONTROL		0x03 +#define REPORT_TYPE_SYSTEM_CONTROL		0x04 +#define REPORT_TYPE_MEDIA_CENTER		0x08 +#define REPORT_TYPE_LEDS			0x0E + +/* RF Report types bitfield */ +#define STD_KEYBOARD				0x00000002 +#define STD_MOUSE				0x00000004 +#define MULTIMEDIA				0x00000008 +#define POWER_KEYS				0x00000010 +#define MEDIA_CENTER				0x00000100 +#define KBD_LEDS				0x00004000 + +struct dj_report { +	u8 report_id; +	u8 device_index; +	u8 report_type; +	u8 report_params[DJREPORT_SHORT_LENGTH - 3]; +}; + +struct dj_receiver_dev { +	struct hid_device *hdev; +	struct dj_device *paired_dj_devices[DJ_MAX_PAIRED_DEVICES + +					    DJ_DEVICE_INDEX_MIN]; +	struct work_struct work; +	struct kfifo notif_fifo; +	spinlock_t lock; +}; + +struct dj_device { +	struct hid_device *hdev; +	struct dj_receiver_dev *dj_receiver_dev; +	u32 reports_supported; +	u8 device_index; +}; + +/** + * is_dj_device - know if the given dj_device is not the receiver. + * @dj_dev: the dj device to test + * + * This macro tests if a struct dj_device pointer is a device created + * by the bus enumarator. + */ +#define is_dj_device(dj_dev) \ +	(&(dj_dev)->dj_receiver_dev->hdev->dev == (dj_dev)->hdev->dev.parent) + +#endif diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 0ec91c18a42..2ab71758e2e 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -81,6 +81,28 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie  #define NO_TOUCHES -1  #define SINGLE_TOUCH_UP -2 +/* Touch surface information. Dimension is in hundredths of a mm, min and max + * are in units. */ +#define MOUSE_DIMENSION_X (float)9056 +#define MOUSE_MIN_X -1100 +#define MOUSE_MAX_X 1258 +#define MOUSE_RES_X ((MOUSE_MAX_X - MOUSE_MIN_X) / (MOUSE_DIMENSION_X / 100)) +#define MOUSE_DIMENSION_Y (float)5152 +#define MOUSE_MIN_Y -1589 +#define MOUSE_MAX_Y 2047 +#define MOUSE_RES_Y ((MOUSE_MAX_Y - MOUSE_MIN_Y) / (MOUSE_DIMENSION_Y / 100)) + +#define TRACKPAD_DIMENSION_X (float)13000 +#define TRACKPAD_MIN_X -2909 +#define TRACKPAD_MAX_X 3167 +#define TRACKPAD_RES_X \ +	((TRACKPAD_MAX_X - TRACKPAD_MIN_X) / (TRACKPAD_DIMENSION_X / 100)) +#define TRACKPAD_DIMENSION_Y (float)11000 +#define TRACKPAD_MIN_Y -2456 +#define TRACKPAD_MAX_Y 2565 +#define TRACKPAD_RES_Y \ +	((TRACKPAD_MAX_Y - TRACKPAD_MIN_Y) / (TRACKPAD_DIMENSION_Y / 100)) +  /**   * struct magicmouse_sc - Tracks Magic Mouse-specific data.   * @input: Input device through which we report events. @@ -383,6 +405,13 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h  			__set_bit(REL_HWHEEL, input->relbit);  		}  	} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ +		/* input->keybit is initialized with incorrect button info +		 * for Magic Trackpad. There really is only one physical +		 * button (BTN_LEFT == BTN_MOUSE). Make sure we don't +		 * advertise buttons that don't exist... +		 */ +		__clear_bit(BTN_RIGHT, input->keybit); +		__clear_bit(BTN_MIDDLE, input->keybit);  		__set_bit(BTN_MOUSE, input->keybit);  		__set_bit(BTN_TOOL_FINGER, input->keybit);  		__set_bit(BTN_TOOL_DOUBLETAP, input->keybit); @@ -406,17 +435,31 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h  		 * inverse of the reported Y.  		 */  		if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) { -			input_set_abs_params(input, ABS_MT_POSITION_X, -1100, -				1358, 4, 0); -			input_set_abs_params(input, ABS_MT_POSITION_Y, -1589, -				2047, 4, 0); +			input_set_abs_params(input, ABS_MT_POSITION_X, +				MOUSE_MIN_X, MOUSE_MAX_X, 4, 0); +			input_set_abs_params(input, ABS_MT_POSITION_Y, +				MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0); + +			input_abs_set_res(input, ABS_MT_POSITION_X, +				MOUSE_RES_X); +			input_abs_set_res(input, ABS_MT_POSITION_Y, +				MOUSE_RES_Y);  		} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ -			input_set_abs_params(input, ABS_X, -2909, 3167, 4, 0); -			input_set_abs_params(input, ABS_Y, -2456, 2565, 4, 0); -			input_set_abs_params(input, ABS_MT_POSITION_X, -2909, -				3167, 4, 0); -			input_set_abs_params(input, ABS_MT_POSITION_Y, -2456, -				2565, 4, 0); +			input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X, +				TRACKPAD_MAX_X, 4, 0); +			input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y, +				TRACKPAD_MAX_Y, 4, 0); +			input_set_abs_params(input, ABS_MT_POSITION_X, +				TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0); +			input_set_abs_params(input, ABS_MT_POSITION_Y, +				TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0); + +			input_abs_set_res(input, ABS_X, TRACKPAD_RES_X); +			input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y); +			input_abs_set_res(input, ABS_MT_POSITION_X, +				TRACKPAD_RES_X); +			input_abs_set_res(input, ABS_MT_POSITION_Y, +				TRACKPAD_RES_Y);  		}  		input_set_events_per_packet(input, 60); @@ -501,9 +544,17 @@ static int magicmouse_probe(struct hid_device *hdev,  	}  	report->size = 6; +	/* +	 * Some devices repond with 'invalid report id' when feature +	 * report switching it into multitouch mode is sent to it. +	 * +	 * This results in -EIO from the _raw low-level transport callback, +	 * but there seems to be no other way of switching the mode. +	 * Thus the super-ugly hacky success check below. +	 */  	ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),  			HID_FEATURE_REPORT); -	if (ret != sizeof(feature)) { +	if (ret != -EIO && ret != sizeof(feature)) {  		hid_err(hdev, "unable to request touch data (%d)\n", ret);  		goto err_stop_hw;  	} diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 58d0e7aaf08..fa5d7a1ffa9 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -47,10 +47,11 @@ MODULE_LICENSE("GPL");  #define MT_QUIRK_SLOT_IS_CONTACTID	(1 << 1)  #define MT_QUIRK_CYPRESS		(1 << 2)  #define MT_QUIRK_SLOT_IS_CONTACTNUMBER	(1 << 3) -#define MT_QUIRK_VALID_IS_INRANGE	(1 << 4) -#define MT_QUIRK_VALID_IS_CONFIDENCE	(1 << 5) -#define MT_QUIRK_EGALAX_XYZ_FIXUP	(1 << 6) -#define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE	(1 << 7) +#define MT_QUIRK_ALWAYS_VALID		(1 << 4) +#define MT_QUIRK_VALID_IS_INRANGE	(1 << 5) +#define MT_QUIRK_VALID_IS_CONFIDENCE	(1 << 6) +#define MT_QUIRK_EGALAX_XYZ_FIXUP	(1 << 7) +#define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE	(1 << 8)  struct mt_slot {  	__s32 x, y, p, w, h; @@ -86,11 +87,12 @@ struct mt_class {  /* classes of device behavior */  #define MT_CLS_DEFAULT				0x0001 -#define MT_CLS_CONFIDENCE			0x0002 -#define MT_CLS_CONFIDENCE_MINUS_ONE		0x0003 -#define MT_CLS_DUAL_INRANGE_CONTACTID		0x0004 -#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER	0x0005 -#define MT_CLS_DUAL_NSMU_CONTACTID		0x0006 +#define MT_CLS_SERIAL				0x0002 +#define MT_CLS_CONFIDENCE			0x0003 +#define MT_CLS_CONFIDENCE_MINUS_ONE		0x0004 +#define MT_CLS_DUAL_INRANGE_CONTACTID		0x0005 +#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER	0x0006 +#define MT_CLS_DUAL_NSMU_CONTACTID		0x0007  /* vendor specific classes */  #define MT_CLS_3M				0x0101 @@ -134,6 +136,8 @@ static int find_slot_from_contactid(struct mt_device *td)  struct mt_class mt_classes[] = {  	{ .name = MT_CLS_DEFAULT,  		.quirks = MT_QUIRK_NOT_SEEN_MEANS_UP }, +	{ .name = MT_CLS_SERIAL, +		.quirks = MT_QUIRK_ALWAYS_VALID},  	{ .name = MT_CLS_CONFIDENCE,  		.quirks = MT_QUIRK_VALID_IS_CONFIDENCE },  	{ .name = MT_CLS_CONFIDENCE_MINUS_ONE, @@ -213,6 +217,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,  	struct mt_class *cls = td->mtclass;  	__s32 quirks = cls->quirks; +	/* Only map fields from TouchScreen or TouchPad collections. +         * We need to ignore fields that belong to other collections +         * such as Mouse that might have the same GenericDesktop usages. */ +	if (field->application == HID_DG_TOUCHSCREEN) +		set_bit(INPUT_PROP_DIRECT, hi->input->propbit); +	else if (field->application == HID_DG_TOUCHPAD) +		set_bit(INPUT_PROP_POINTER, hi->input->propbit); +	else +		return 0; +  	switch (usage->hid & HID_USAGE_PAGE) {  	case HID_UP_GENDESK: @@ -277,6 +291,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,  			td->last_slot_field = usage->hid;  			td->last_field_index = field->index;  			td->last_mt_collection = usage->collection_index; +			hdev->quirks &= ~HID_QUIRK_MULTITOUCH;  			return 1;  		case HID_DG_WIDTH:  			hid_map_usage(hi, usage, bit, max, @@ -435,7 +450,9 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,  	if (hid->claimed & HID_CLAIMED_INPUT && td->slots) {  		switch (usage->hid) {  		case HID_DG_INRANGE: -			if (quirks & MT_QUIRK_VALID_IS_INRANGE) +			if (quirks & MT_QUIRK_ALWAYS_VALID) +				td->curvalid = true; +			else if (quirks & MT_QUIRK_VALID_IS_INRANGE)  				td->curvalid = value;  			break;  		case HID_DG_TIPSWITCH: @@ -513,12 +530,44 @@ static void mt_set_input_mode(struct hid_device *hdev)  	}  } +/* a list of devices for which there is a specialized multitouch driver */ +static const struct hid_device_id mt_have_special_driver[] = { +	{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, 0x0001) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, 0x0006) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, +			USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, +			USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) }, +	{ } +}; + +static bool mt_match_one_id(struct hid_device *hdev, +		const struct hid_device_id *id) +{ +	return id->bus == hdev->bus && +		(id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) && +		(id->product == HID_ANY_ID || id->product == hdev->product); +} + +static const struct hid_device_id *mt_match_id(struct hid_device *hdev, +		const struct hid_device_id *id) +{ +	for (; id->bus; id++) +		if (mt_match_one_id(hdev, id)) +			return id; + +	return NULL; +} +  static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)  {  	int ret, i;  	struct mt_device *td;  	struct mt_class *mtclass = mt_classes; /* MT_CLS_DEFAULT */ +	if (mt_match_id(hdev, mt_have_special_driver)) +		return -ENODEV; +  	for (i = 0; mt_classes[i].name ; i++) {  		if (id->driver_data == mt_classes[i].name) {  			mtclass = &(mt_classes[i]); @@ -526,10 +575,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)  		}  	} -	/* This allows the driver to correctly support devices -	 * that emit events over several HID messages. -	 */ -	hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;  	td = kzalloc(sizeof(struct mt_device), GFP_KERNEL);  	if (!td) { @@ -545,10 +590,16 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)  	if (ret != 0)  		goto fail; +	hdev->quirks |= HID_QUIRK_MULTITOUCH;  	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);  	if (ret)  		goto fail; +	/* This allows the driver to correctly support devices +	 * that emit events over several HID messages. +	 */ +	hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC; +  	td->slots = kzalloc(td->maxcontacts * sizeof(struct mt_slot),  				GFP_KERNEL);  	if (!td->slots) { @@ -662,6 +713,11 @@ static const struct hid_device_id mt_devices[] = {  		HID_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH,  			USB_DEVICE_ID_GOODTOUCH_000f) }, +	/* Ideacom panel */ +	{ .driver_data = MT_CLS_SERIAL, +		HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, +			USB_DEVICE_ID_IDEACOM_IDC6650) }, +  	/* Ilitek dual touch panel */  	{  .driver_data = MT_CLS_DEFAULT,  		HID_USB_DEVICE(USB_VENDOR_ID_ILITEK, @@ -672,6 +728,11 @@ static const struct hid_device_id mt_devices[] = {  		HID_USB_DEVICE(USB_VENDOR_ID_IRTOUCHSYSTEMS,  			USB_DEVICE_ID_IRTOUCH_INFRARED_USB) }, +	/* LG Display panels */ +	{ .driver_data = MT_CLS_DEFAULT, +		HID_USB_DEVICE(USB_VENDOR_ID_LG, +			USB_DEVICE_ID_LG_MULTITOUCH) }, +  	/* Lumio panels */  	{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,  		HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, @@ -732,6 +793,10 @@ static const struct hid_device_id mt_devices[] = {  		HID_USB_DEVICE(USB_VENDOR_ID_XAT,  			USB_DEVICE_ID_XAT_CSR) }, +	/* Rest of the world */ +	{ .driver_data = MT_CLS_DEFAULT, +		HID_USB_DEVICE(HID_ANY_ID, HID_ANY_ID) }, +  	{ }  };  MODULE_DEVICE_TABLE(hid, mt_devices); diff --git a/drivers/hid/hid-primax.c b/drivers/hid/hid-primax.c new file mode 100644 index 00000000000..4d3c60d8831 --- /dev/null +++ b/drivers/hid/hid-primax.c @@ -0,0 +1,117 @@ +/* + * HID driver for primax and similar keyboards with in-band modifiers + * + * Copyright 2011 Google Inc. All Rights Reserved + * + * Author: + *	Terry Lambert <tlambert@google.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + */ + +#include <linux/device.h> +#include <linux/hid.h> +#include <linux/module.h> + +#include "hid-ids.h" + +static int px_raw_event(struct hid_device *hid, struct hid_report *report, +	 u8 *data, int size) +{ +	int idx = size; + +	switch (report->id) { +	case 0:		/* keyboard input */ +		/* +		 * Convert in-band modifier key values into out of band +		 * modifier bits and pull the key strokes from the report. +		 * Thus a report data set which looked like: +		 * +		 * [00][00][E0][30][00][00][00][00] +		 * (no modifier bits + "Left Shift" key + "1" key) +		 * +		 * Would be converted to: +		 * +		 * [01][00][00][30][00][00][00][00] +		 * (Left Shift modifier bit + "1" key) +		 * +		 * As long as it's in the size range, the upper level +		 * drivers don't particularly care if there are in-band +		 * 0-valued keys, so they don't stop parsing. +		 */ +		while (--idx > 1) { +			if (data[idx] < 0xE0 || data[idx] > 0xE7) +				continue; +			data[0] |= (1 << (data[idx] - 0xE0)); +			data[idx] = 0; +		} +		hid_report_raw_event(hid, HID_INPUT_REPORT, data, size, 0); +		return 1; + +	default:	/* unknown report */ +		/* Unknown report type; pass upstream */ +		hid_info(hid, "unknown report type %d\n", report->id); +		break; +	} + +	return 0; +} + +static int px_probe(struct hid_device *hid, const struct hid_device_id *id) +{ +	int ret; + +	ret = hid_parse(hid); +	if (ret) { +		hid_err(hid, "parse failed\n"); +		goto fail; +	} + +	ret = hid_hw_start(hid, HID_CONNECT_DEFAULT); +	if (ret) +		hid_err(hid, "hw start failed\n"); + +fail: +	return ret; +} + +static void px_remove(struct hid_device *hid) +{ +	hid_hw_stop(hid); +} + +static const struct hid_device_id px_devices[] = { +	{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) }, +	{ } +}; +MODULE_DEVICE_TABLE(hid, px_devices); + +static struct hid_driver px_driver = { +	.name = "primax", +	.id_table = px_devices, +	.raw_event = px_raw_event, +	.probe = px_probe, +	.remove = px_remove, +}; + +static int __init px_init(void) +{ +	return hid_register_driver(&px_driver); +} + +static void __exit px_exit(void) +{ +	hid_unregister_driver(&px_driver); +} + +module_init(px_init); +module_exit(px_exit); +MODULE_AUTHOR("Terry Lambert <tlambert@google.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c index 158b389d0fb..f779009104e 100644 --- a/drivers/hid/hid-prodikeys.c +++ b/drivers/hid/hid-prodikeys.c @@ -816,7 +816,7 @@ static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)  	if (pm == NULL) {  		hid_err(hdev, "can't alloc descriptor\n");  		ret = -ENOMEM; -		goto err_free; +		goto err_free_pk;  	}  	pm->pk = pk; @@ -849,10 +849,10 @@ static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)  err_stop:  	hid_hw_stop(hdev);  err_free: -	if (pm != NULL) -		kfree(pm); - +	kfree(pm); +err_free_pk:  	kfree(pk); +  	return ret;  } diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c index 2b8f3a31ffb..e2072afb34b 100644 --- a/drivers/hid/hid-roccat-kone.c +++ b/drivers/hid/hid-roccat-kone.c @@ -37,6 +37,21 @@  static uint profile_numbers[5] = {0, 1, 2, 3, 4}; +static void kone_profile_activated(struct kone_device *kone, uint new_profile) +{ +	kone->actual_profile = new_profile; +	kone->actual_dpi = kone->profiles[new_profile - 1].startup_dpi; +} + +static void kone_profile_report(struct kone_device *kone, uint new_profile) +{ +	struct kone_roccat_report roccat_report; +	roccat_report.event = kone_mouse_event_switch_profile; +	roccat_report.value = new_profile; +	roccat_report.key = 0; +	roccat_report_event(kone->chrdev_minor, (uint8_t *)&roccat_report); +} +  static int kone_receive(struct usb_device *usb_dev, uint usb_command,  		void *data, uint size)  { @@ -283,7 +298,7 @@ static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,  			container_of(kobj, struct device, kobj)->parent->parent;  	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));  	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); -	int retval = 0, difference; +	int retval = 0, difference, old_profile;  	/* I need to get my data in one piece */  	if (off != 0 || count != sizeof(struct kone_settings)) @@ -294,21 +309,20 @@ static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,  	if (difference) {  		retval = kone_set_settings(usb_dev,  				(struct kone_settings const *)buf); -		if (!retval) -			memcpy(&kone->settings, buf, -					sizeof(struct kone_settings)); -	} -	mutex_unlock(&kone->kone_lock); +		if (retval) { +			mutex_unlock(&kone->kone_lock); +			return retval; +		} -	if (retval) -		return retval; +		old_profile = kone->settings.startup_profile; +		memcpy(&kone->settings, buf, sizeof(struct kone_settings)); -	/* -	 * If we get here, treat settings as okay and update actual values -	 * according to startup_profile -	 */ -	kone->actual_profile = kone->settings.startup_profile; -	kone->actual_dpi = kone->profiles[kone->actual_profile - 1].startup_dpi; +		kone_profile_activated(kone, kone->settings.startup_profile); + +		if (kone->settings.startup_profile != old_profile) +			kone_profile_report(kone, kone->settings.startup_profile); +	} +	mutex_unlock(&kone->kone_lock);  	return sizeof(struct kone_settings);  } @@ -501,6 +515,8 @@ static ssize_t kone_sysfs_set_tcu(struct device *dev,  				goto exit_no_settings;  			goto exit_unlock;  		} +		/* calibration resets profile */ +		kone_profile_activated(kone, kone->settings.startup_profile);  	}  	retval = size; @@ -544,16 +560,16 @@ static ssize_t kone_sysfs_set_startup_profile(struct device *dev,  	kone_set_settings_checksum(&kone->settings);  	retval = kone_set_settings(usb_dev, &kone->settings); - -	mutex_unlock(&kone->kone_lock); - -	if (retval) +	if (retval) { +		mutex_unlock(&kone->kone_lock);  		return retval; +	}  	/* changing the startup profile immediately activates this profile */ -	kone->actual_profile = new_startup_profile; -	kone->actual_dpi = kone->profiles[kone->actual_profile - 1].startup_dpi; +	kone_profile_activated(kone, new_startup_profile); +	kone_profile_report(kone, new_startup_profile); +	mutex_unlock(&kone->kone_lock);  	return size;  } @@ -665,8 +681,7 @@ static int kone_init_kone_device_struct(struct usb_device *usb_dev,  	if (retval)  		return retval; -	kone->actual_profile = kone->settings.startup_profile; -	kone->actual_dpi = kone->profiles[kone->actual_profile].startup_dpi; +	kone_profile_activated(kone, kone->settings.startup_profile);  	return 0;  } @@ -776,10 +791,10 @@ static void kone_keep_values_up_to_date(struct kone_device *kone,  {  	switch (event->event) {  	case kone_mouse_event_switch_profile: +		kone->actual_dpi = kone->profiles[event->value - 1]. +				startup_dpi;  	case kone_mouse_event_osd_profile:  		kone->actual_profile = event->value; -		kone->actual_dpi = kone->profiles[kone->actual_profile - 1]. -				startup_dpi;  		break;  	case kone_mouse_event_switch_dpi:  	case kone_mouse_event_osd_dpi: diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c index 1f8336e3f58..112d934132c 100644 --- a/drivers/hid/hid-roccat-kovaplus.c +++ b/drivers/hid/hid-roccat-kovaplus.c @@ -323,6 +323,7 @@ static ssize_t kovaplus_sysfs_set_actual_profile(struct device *dev,  	struct usb_device *usb_dev;  	unsigned long profile;  	int retval; +	struct kovaplus_roccat_report roccat_report;  	dev = dev->parent->parent;  	kovaplus = hid_get_drvdata(dev_get_drvdata(dev)); @@ -337,10 +338,22 @@ static ssize_t kovaplus_sysfs_set_actual_profile(struct device *dev,  	mutex_lock(&kovaplus->kovaplus_lock);  	retval = kovaplus_set_actual_profile(usb_dev, profile); +	if (retval) { +		mutex_unlock(&kovaplus->kovaplus_lock); +		return retval; +	} +  	kovaplus_profile_activated(kovaplus, profile); + +	roccat_report.type = KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE_1; +	roccat_report.profile = profile + 1; +	roccat_report.button = 0; +	roccat_report.data1 = profile + 1; +	roccat_report.data2 = 0; +	roccat_report_event(kovaplus->chrdev_minor, +			(uint8_t const *)&roccat_report); +  	mutex_unlock(&kovaplus->kovaplus_lock); -	if (retval) -		return retval;  	return size;  } diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c index 8140776bd8c..df05c1b1064 100644 --- a/drivers/hid/hid-roccat-pyra.c +++ b/drivers/hid/hid-roccat-pyra.c @@ -298,6 +298,7 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,  	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));  	int retval = 0;  	int difference; +	struct pyra_roccat_report roccat_report;  	if (off != 0 || count != sizeof(struct pyra_settings))  		return -EINVAL; @@ -307,17 +308,23 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,  	if (difference) {  		retval = pyra_set_settings(usb_dev,  				(struct pyra_settings const *)buf); -		if (!retval) -			memcpy(&pyra->settings, buf, -					sizeof(struct pyra_settings)); -	} -	mutex_unlock(&pyra->pyra_lock); +		if (retval) { +			mutex_unlock(&pyra->pyra_lock); +			return retval; +		} -	if (retval) -		return retval; +		memcpy(&pyra->settings, buf, +				sizeof(struct pyra_settings)); -	profile_activated(pyra, pyra->settings.startup_profile); +		profile_activated(pyra, pyra->settings.startup_profile); +		roccat_report.type = PYRA_MOUSE_EVENT_BUTTON_TYPE_PROFILE_2; +		roccat_report.value = pyra->settings.startup_profile + 1; +		roccat_report.key = 0; +		roccat_report_event(pyra->chrdev_minor, +				(uint8_t const *)&roccat_report); +	} +	mutex_unlock(&pyra->pyra_lock);  	return sizeof(struct pyra_settings);  } diff --git a/drivers/hid/hid-sjoy.c b/drivers/hid/hid-sjoy.c index 16f7cafc969..670da9109f8 100644 --- a/drivers/hid/hid-sjoy.c +++ b/drivers/hid/hid-sjoy.c @@ -65,8 +65,7 @@ static int sjoyff_init(struct hid_device *hid)  {  	struct sjoyff_device *sjoyff;  	struct hid_report *report; -	struct hid_input *hidinput = list_entry(hid->inputs.next, -						struct hid_input, list); +	struct hid_input *hidinput;  	struct list_head *report_list =  			&hid->report_enum[HID_OUTPUT_REPORT].report_list;  	struct list_head *report_ptr = report_list; @@ -78,43 +77,45 @@ static int sjoyff_init(struct hid_device *hid)  		return -ENODEV;  	} -	report_ptr = report_ptr->next; +	list_for_each_entry(hidinput, &hid->inputs, list) { +		report_ptr = report_ptr->next; -	if (report_ptr == report_list) { -		hid_err(hid, "required output report is missing\n"); -		return -ENODEV; -	} +		if (report_ptr == report_list) { +			hid_err(hid, "required output report is missing\n"); +			return -ENODEV; +		} -	report = list_entry(report_ptr, struct hid_report, list); -	if (report->maxfield < 1) { -		hid_err(hid, "no fields in the report\n"); -		return -ENODEV; -	} +		report = list_entry(report_ptr, struct hid_report, list); +		if (report->maxfield < 1) { +			hid_err(hid, "no fields in the report\n"); +			return -ENODEV; +		} -	if (report->field[0]->report_count < 3) { -		hid_err(hid, "not enough values in the field\n"); -		return -ENODEV; -	} +		if (report->field[0]->report_count < 3) { +			hid_err(hid, "not enough values in the field\n"); +			return -ENODEV; +		} -	sjoyff = kzalloc(sizeof(struct sjoyff_device), GFP_KERNEL); -	if (!sjoyff) -		return -ENOMEM; +		sjoyff = kzalloc(sizeof(struct sjoyff_device), GFP_KERNEL); +		if (!sjoyff) +			return -ENOMEM; -	dev = hidinput->input; +		dev = hidinput->input; -	set_bit(FF_RUMBLE, dev->ffbit); +		set_bit(FF_RUMBLE, dev->ffbit); -	error = input_ff_create_memless(dev, sjoyff, hid_sjoyff_play); -	if (error) { -		kfree(sjoyff); -		return error; -	} +		error = input_ff_create_memless(dev, sjoyff, hid_sjoyff_play); +		if (error) { +			kfree(sjoyff); +			return error; +		} -	sjoyff->report = report; -	sjoyff->report->field[0]->value[0] = 0x01; -	sjoyff->report->field[0]->value[1] = 0x00; -	sjoyff->report->field[0]->value[2] = 0x00; -	usbhid_submit_report(hid, sjoyff->report, USB_DIR_OUT); +		sjoyff->report = report; +		sjoyff->report->field[0]->value[0] = 0x01; +		sjoyff->report->field[0]->value[1] = 0x00; +		sjoyff->report->field[0]->value[2] = 0x00; +		usbhid_submit_report(hid, sjoyff->report, USB_DIR_OUT); +	}  	hid_info(hid, "Force feedback for SmartJoy PLUS PS2/USB adapter\n"); @@ -131,6 +132,8 @@ static int sjoy_probe(struct hid_device *hdev, const struct hid_device_id *id)  {  	int ret; +	hdev->quirks |= id->driver_data; +  	ret = hid_parse(hdev);  	if (ret) {  		hid_err(hdev, "parse failed\n"); @@ -151,7 +154,17 @@ err:  }  static const struct hid_device_id sjoy_devices[] = { +	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO), +		.driver_data = HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET | +			       HID_QUIRK_SKIP_OUTPUT_REPORTS }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO), +		.driver_data = HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET | +			       HID_QUIRK_SKIP_OUTPUT_REPORTS },  	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD), +		.driver_data = HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET | +			       HID_QUIRK_SKIP_OUTPUT_REPORTS },  	{ }  };  MODULE_DEVICE_TABLE(hid, sjoy_devices); diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c index 06888323828..17bb88f782b 100644 --- a/drivers/hid/hid-wacom.c +++ b/drivers/hid/hid-wacom.c @@ -304,11 +304,51 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,  	return 1;  } +static int wacom_input_mapped(struct hid_device *hdev, struct hid_input *hi, +	struct hid_field *field, struct hid_usage *usage, unsigned long **bit, +								int *max) +{ +	struct input_dev *input = hi->input; + +	__set_bit(INPUT_PROP_POINTER, input->propbit); + +	/* Basics */ +	input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL); + +	__set_bit(REL_WHEEL, input->relbit); + +	__set_bit(BTN_TOOL_PEN, input->keybit); +	__set_bit(BTN_TOUCH, input->keybit); +	__set_bit(BTN_STYLUS, input->keybit); +	__set_bit(BTN_STYLUS2, input->keybit); +	__set_bit(BTN_LEFT, input->keybit); +	__set_bit(BTN_RIGHT, input->keybit); +	__set_bit(BTN_MIDDLE, input->keybit); + +	/* Pad */ +	input->evbit[0] |= BIT(EV_MSC); + +	__set_bit(MSC_SERIAL, input->mscbit); + +	__set_bit(BTN_0, input->keybit); +	__set_bit(BTN_1, input->keybit); +	__set_bit(BTN_TOOL_FINGER, input->keybit); + +	/* Distance, rubber and mouse */ +	__set_bit(BTN_TOOL_RUBBER, input->keybit); +	__set_bit(BTN_TOOL_MOUSE, input->keybit); + +	input_set_abs_params(input, ABS_X, 0, 16704, 4, 0); +	input_set_abs_params(input, ABS_Y, 0, 12064, 4, 0); +	input_set_abs_params(input, ABS_PRESSURE, 0, 511, 0, 0); +	input_set_abs_params(input, ABS_DISTANCE, 0, 32, 0, 0); + +	return 0; +} +  static int wacom_probe(struct hid_device *hdev,  		const struct hid_device_id *id)  { -	struct hid_input *hidinput; -	struct input_dev *input;  	struct wacom_data *wdata;  	int ret; @@ -353,11 +393,7 @@ static int wacom_probe(struct hid_device *hdev,  	if (ret) {  		hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n",  			 ret); -		/* -		 * battery attribute is not critical for the tablet, but if it -		 * failed then there is no need to create ac attribute -		 */ -		goto move_on; +		goto err_battery;  	}  	wdata->ac.properties = wacom_ac_props; @@ -371,51 +407,18 @@ static int wacom_probe(struct hid_device *hdev,  	if (ret) {  		hid_warn(hdev,  			 "can't create ac battery attribute, err: %d\n", ret); -		/* -		 * ac attribute is not critical for the tablet, but if it -		 * failed then we don't want to battery attribute to exist -		 */ -		power_supply_unregister(&wdata->battery); +		goto err_ac;  	} - -move_on:  #endif -	hidinput = list_entry(hdev->inputs.next, struct hid_input, list); -	input = hidinput->input; - -	/* Basics */ -	input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL); - -	__set_bit(REL_WHEEL, input->relbit); - -	__set_bit(BTN_TOOL_PEN, input->keybit); -	__set_bit(BTN_TOUCH, input->keybit); -	__set_bit(BTN_STYLUS, input->keybit); -	__set_bit(BTN_STYLUS2, input->keybit); -	__set_bit(BTN_LEFT, input->keybit); -	__set_bit(BTN_RIGHT, input->keybit); -	__set_bit(BTN_MIDDLE, input->keybit); - -	/* Pad */ -	input->evbit[0] |= BIT(EV_MSC); - -	__set_bit(MSC_SERIAL, input->mscbit); - -	__set_bit(BTN_0, input->keybit); -	__set_bit(BTN_1, input->keybit); -	__set_bit(BTN_TOOL_FINGER, input->keybit); - -	/* Distance, rubber and mouse */ -	__set_bit(BTN_TOOL_RUBBER, input->keybit); -	__set_bit(BTN_TOOL_MOUSE, input->keybit); - -	input_set_abs_params(input, ABS_X, 0, 16704, 4, 0); -	input_set_abs_params(input, ABS_Y, 0, 12064, 4, 0); -	input_set_abs_params(input, ABS_PRESSURE, 0, 511, 0, 0); -	input_set_abs_params(input, ABS_DISTANCE, 0, 32, 0, 0); -  	return 0; +#ifdef CONFIG_HID_WACOM_POWER_SUPPLY +err_ac: +	power_supply_unregister(&wdata->battery); +err_battery: +	device_remove_file(&hdev->dev, &dev_attr_speed); +	hid_hw_stop(hdev); +#endif  err_free:  	kfree(wdata);  	return ret; @@ -426,6 +429,7 @@ static void wacom_remove(struct hid_device *hdev)  #ifdef CONFIG_HID_WACOM_POWER_SUPPLY  	struct wacom_data *wdata = hid_get_drvdata(hdev);  #endif +	device_remove_file(&hdev->dev, &dev_attr_speed);  	hid_hw_stop(hdev);  #ifdef CONFIG_HID_WACOM_POWER_SUPPLY @@ -448,6 +452,7 @@ static struct hid_driver wacom_driver = {  	.probe = wacom_probe,  	.remove = wacom_remove,  	.raw_event = wacom_raw_event, +	.input_mapped = wacom_input_mapped,  };  static int __init wacom_init(void) diff --git a/drivers/hid/hid-wiimote.c b/drivers/hid/hid-wiimote.c index a594383ce03..76739c07fa3 100644 --- a/drivers/hid/hid-wiimote.c +++ b/drivers/hid/hid-wiimote.c @@ -10,15 +10,18 @@   * any later version.   */ -#include <linux/atomic.h> +#include <linux/completion.h>  #include <linux/device.h>  #include <linux/hid.h>  #include <linux/input.h> +#include <linux/leds.h>  #include <linux/module.h> +#include <linux/mutex.h> +#include <linux/power_supply.h>  #include <linux/spinlock.h>  #include "hid-ids.h" -#define WIIMOTE_VERSION "0.1" +#define WIIMOTE_VERSION "0.2"  #define WIIMOTE_NAME "Nintendo Wii Remote"  #define WIIMOTE_BUFSIZE 32 @@ -30,12 +33,26 @@ struct wiimote_buf {  struct wiimote_state {  	spinlock_t lock;  	__u8 flags; +	__u8 accel_split[2]; + +	/* synchronous cmd requests */ +	struct mutex sync; +	struct completion ready; +	int cmd; +	__u32 opt; + +	/* results of synchronous requests */ +	__u8 cmd_battery; +	__u8 cmd_err;  };  struct wiimote_data { -	atomic_t ready;  	struct hid_device *hdev;  	struct input_dev *input; +	struct led_classdev *leds[4]; +	struct input_dev *accel; +	struct input_dev *ir; +	struct power_supply battery;  	spinlock_t qlock;  	__u8 head; @@ -46,16 +63,47 @@ struct wiimote_data {  	struct wiimote_state state;  }; -#define WIIPROTO_FLAG_LED1 0x01 -#define WIIPROTO_FLAG_LED2 0x02 -#define WIIPROTO_FLAG_LED3 0x04 -#define WIIPROTO_FLAG_LED4 0x08 +#define WIIPROTO_FLAG_LED1		0x01 +#define WIIPROTO_FLAG_LED2		0x02 +#define WIIPROTO_FLAG_LED3		0x04 +#define WIIPROTO_FLAG_LED4		0x08 +#define WIIPROTO_FLAG_RUMBLE		0x10 +#define WIIPROTO_FLAG_ACCEL		0x20 +#define WIIPROTO_FLAG_IR_BASIC		0x40 +#define WIIPROTO_FLAG_IR_EXT		0x80 +#define WIIPROTO_FLAG_IR_FULL		0xc0 /* IR_BASIC | IR_EXT */  #define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \  					WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4) +#define WIIPROTO_FLAGS_IR (WIIPROTO_FLAG_IR_BASIC | WIIPROTO_FLAG_IR_EXT | \ +							WIIPROTO_FLAG_IR_FULL) + +/* return flag for led \num */ +#define WIIPROTO_FLAG_LED(num) (WIIPROTO_FLAG_LED1 << (num - 1))  enum wiiproto_reqs { +	WIIPROTO_REQ_NULL = 0x0, +	WIIPROTO_REQ_RUMBLE = 0x10,  	WIIPROTO_REQ_LED = 0x11, +	WIIPROTO_REQ_DRM = 0x12, +	WIIPROTO_REQ_IR1 = 0x13, +	WIIPROTO_REQ_SREQ = 0x15, +	WIIPROTO_REQ_WMEM = 0x16, +	WIIPROTO_REQ_RMEM = 0x17, +	WIIPROTO_REQ_IR2 = 0x1a, +	WIIPROTO_REQ_STATUS = 0x20, +	WIIPROTO_REQ_DATA = 0x21, +	WIIPROTO_REQ_RETURN = 0x22,  	WIIPROTO_REQ_DRM_K = 0x30, +	WIIPROTO_REQ_DRM_KA = 0x31, +	WIIPROTO_REQ_DRM_KE = 0x32, +	WIIPROTO_REQ_DRM_KAI = 0x33, +	WIIPROTO_REQ_DRM_KEE = 0x34, +	WIIPROTO_REQ_DRM_KAE = 0x35, +	WIIPROTO_REQ_DRM_KIE = 0x36, +	WIIPROTO_REQ_DRM_KAIE = 0x37, +	WIIPROTO_REQ_DRM_E = 0x3d, +	WIIPROTO_REQ_DRM_SKAI1 = 0x3e, +	WIIPROTO_REQ_DRM_SKAI2 = 0x3f,  };  enum wiiproto_keys { @@ -87,8 +135,55 @@ static __u16 wiiproto_keymap[] = {  	BTN_MODE,	/* WIIPROTO_KEY_HOME */  }; -#define dev_to_wii(pdev) hid_get_drvdata(container_of(pdev, struct hid_device, \ -									dev)) +static enum power_supply_property wiimote_battery_props[] = { +	POWER_SUPPLY_PROP_CAPACITY +}; + +/* requires the state.lock spinlock to be held */ +static inline bool wiimote_cmd_pending(struct wiimote_data *wdata, int cmd, +								__u32 opt) +{ +	return wdata->state.cmd == cmd && wdata->state.opt == opt; +} + +/* requires the state.lock spinlock to be held */ +static inline void wiimote_cmd_complete(struct wiimote_data *wdata) +{ +	wdata->state.cmd = WIIPROTO_REQ_NULL; +	complete(&wdata->state.ready); +} + +static inline int wiimote_cmd_acquire(struct wiimote_data *wdata) +{ +	return mutex_lock_interruptible(&wdata->state.sync) ? -ERESTARTSYS : 0; +} + +/* requires the state.lock spinlock to be held */ +static inline void wiimote_cmd_set(struct wiimote_data *wdata, int cmd, +								__u32 opt) +{ +	INIT_COMPLETION(wdata->state.ready); +	wdata->state.cmd = cmd; +	wdata->state.opt = opt; +} + +static inline void wiimote_cmd_release(struct wiimote_data *wdata) +{ +	mutex_unlock(&wdata->state.sync); +} + +static inline int wiimote_cmd_wait(struct wiimote_data *wdata) +{ +	int ret; + +	ret = wait_for_completion_interruptible_timeout(&wdata->state.ready, HZ); +	if (ret < 0) +		return -ERESTARTSYS; +	else if (ret == 0) +		return -EIO; +	else +		return 0; +}  static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer,  								size_t count) @@ -168,6 +263,39 @@ static void wiimote_queue(struct wiimote_data *wdata, const __u8 *buffer,  	spin_unlock_irqrestore(&wdata->qlock, flags);  } +/* + * This sets the rumble bit on the given output report if rumble is + * currently enabled. + * \cmd1 must point to the second byte in the output report => &cmd[1] + * This must be called on nearly every output report before passing it + * into the output queue! + */ +static inline void wiiproto_keep_rumble(struct wiimote_data *wdata, __u8 *cmd1) +{ +	if (wdata->state.flags & WIIPROTO_FLAG_RUMBLE) +		*cmd1 |= 0x01; +} + +static void wiiproto_req_rumble(struct wiimote_data *wdata, __u8 rumble) +{ +	__u8 cmd[2]; + +	rumble = !!rumble; +	if (rumble == !!(wdata->state.flags & WIIPROTO_FLAG_RUMBLE)) +		return; + +	if (rumble) +		wdata->state.flags |= WIIPROTO_FLAG_RUMBLE; +	else +		wdata->state.flags &= ~WIIPROTO_FLAG_RUMBLE; + +	cmd[0] = WIIPROTO_REQ_RUMBLE; +	cmd[1] = 0; + +	wiiproto_keep_rumble(wdata, &cmd[1]); +	wiimote_queue(wdata, cmd, sizeof(cmd)); +} +  static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)  {  	__u8 cmd[2]; @@ -189,71 +317,439 @@ static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)  	if (leds & WIIPROTO_FLAG_LED4)  		cmd[1] |= 0x80; +	wiiproto_keep_rumble(wdata, &cmd[1]); +	wiimote_queue(wdata, cmd, sizeof(cmd)); +} + +/* + * Check what peripherals of the wiimote are currently + * active and select a proper DRM that supports all of + * the requested data inputs. + */ +static __u8 select_drm(struct wiimote_data *wdata) +{ +	__u8 ir = wdata->state.flags & WIIPROTO_FLAGS_IR; + +	if (ir == WIIPROTO_FLAG_IR_BASIC) { +		if (wdata->state.flags & WIIPROTO_FLAG_ACCEL) +			return WIIPROTO_REQ_DRM_KAIE; +		else +			return WIIPROTO_REQ_DRM_KIE; +	} else if (ir == WIIPROTO_FLAG_IR_EXT) { +		return WIIPROTO_REQ_DRM_KAI; +	} else if (ir == WIIPROTO_FLAG_IR_FULL) { +		return WIIPROTO_REQ_DRM_SKAI1; +	} else { +		if (wdata->state.flags & WIIPROTO_FLAG_ACCEL) +			return WIIPROTO_REQ_DRM_KA; +		else +			return WIIPROTO_REQ_DRM_K; +	} +} + +static void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm) +{ +	__u8 cmd[3]; + +	if (drm == WIIPROTO_REQ_NULL) +		drm = select_drm(wdata); + +	cmd[0] = WIIPROTO_REQ_DRM; +	cmd[1] = 0; +	cmd[2] = drm; + +	wiiproto_keep_rumble(wdata, &cmd[1]);  	wiimote_queue(wdata, cmd, sizeof(cmd));  } -#define wiifs_led_show_set(num)						\ -static ssize_t wiifs_led_show_##num(struct device *dev,			\ -			struct device_attribute *attr, char *buf)	\ -{									\ -	struct wiimote_data *wdata = dev_to_wii(dev);			\ -	unsigned long flags;						\ -	int state;							\ -									\ -	if (!atomic_read(&wdata->ready))				\ -		return -EBUSY;						\ -									\ -	spin_lock_irqsave(&wdata->state.lock, flags);			\ -	state = !!(wdata->state.flags & WIIPROTO_FLAG_LED##num);	\ -	spin_unlock_irqrestore(&wdata->state.lock, flags);		\ -									\ -	return sprintf(buf, "%d\n", state);				\ -}									\ -static ssize_t wiifs_led_set_##num(struct device *dev,			\ -	struct device_attribute *attr, const char *buf, size_t count)	\ -{									\ -	struct wiimote_data *wdata = dev_to_wii(dev);			\ -	int tmp = simple_strtoul(buf, NULL, 10);			\ -	unsigned long flags;						\ -	__u8 state;							\ -									\ -	if (!atomic_read(&wdata->ready))				\ -		return -EBUSY;						\ -									\ -	spin_lock_irqsave(&wdata->state.lock, flags);			\ -									\ -	state = wdata->state.flags;					\ -									\ -	if (tmp)							\ -		wiiproto_req_leds(wdata, state | WIIPROTO_FLAG_LED##num);\ -	else								\ -		wiiproto_req_leds(wdata, state & ~WIIPROTO_FLAG_LED##num);\ -									\ -	spin_unlock_irqrestore(&wdata->state.lock, flags);		\ -									\ -	return count;							\ -}									\ -static DEVICE_ATTR(led##num, S_IRUGO | S_IWUSR, wiifs_led_show_##num,	\ -						wiifs_led_set_##num) +static void wiiproto_req_status(struct wiimote_data *wdata) +{ +	__u8 cmd[2]; + +	cmd[0] = WIIPROTO_REQ_SREQ; +	cmd[1] = 0; + +	wiiproto_keep_rumble(wdata, &cmd[1]); +	wiimote_queue(wdata, cmd, sizeof(cmd)); +} + +static void wiiproto_req_accel(struct wiimote_data *wdata, __u8 accel) +{ +	accel = !!accel; +	if (accel == !!(wdata->state.flags & WIIPROTO_FLAG_ACCEL)) +		return; + +	if (accel) +		wdata->state.flags |= WIIPROTO_FLAG_ACCEL; +	else +		wdata->state.flags &= ~WIIPROTO_FLAG_ACCEL; + +	wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL); +} + +static void wiiproto_req_ir1(struct wiimote_data *wdata, __u8 flags) +{ +	__u8 cmd[2]; + +	cmd[0] = WIIPROTO_REQ_IR1; +	cmd[1] = flags; + +	wiiproto_keep_rumble(wdata, &cmd[1]); +	wiimote_queue(wdata, cmd, sizeof(cmd)); +} + +static void wiiproto_req_ir2(struct wiimote_data *wdata, __u8 flags) +{ +	__u8 cmd[2]; + +	cmd[0] = WIIPROTO_REQ_IR2; +	cmd[1] = flags; + +	wiiproto_keep_rumble(wdata, &cmd[1]); +	wiimote_queue(wdata, cmd, sizeof(cmd)); +} + +#define wiiproto_req_wreg(wdata, os, buf, sz) \ +			wiiproto_req_wmem((wdata), false, (os), (buf), (sz)) + +#define wiiproto_req_weeprom(wdata, os, buf, sz) \ +			wiiproto_req_wmem((wdata), true, (os), (buf), (sz)) + +static void wiiproto_req_wmem(struct wiimote_data *wdata, bool eeprom, +				__u32 offset, const __u8 *buf, __u8 size) +{ +	__u8 cmd[22]; + +	if (size > 16 || size == 0) { +		hid_warn(wdata->hdev, "Invalid length %d wmem request\n", size); +		return; +	} + +	memset(cmd, 0, sizeof(cmd)); +	cmd[0] = WIIPROTO_REQ_WMEM; +	cmd[2] = (offset >> 16) & 0xff; +	cmd[3] = (offset >> 8) & 0xff; +	cmd[4] = offset & 0xff; +	cmd[5] = size; +	memcpy(&cmd[6], buf, size); + +	if (!eeprom) +		cmd[1] |= 0x04; + +	wiiproto_keep_rumble(wdata, &cmd[1]); +	wiimote_queue(wdata, cmd, sizeof(cmd)); +} + +/* requries the cmd-mutex to be held */ +static int wiimote_cmd_write(struct wiimote_data *wdata, __u32 offset, +						const __u8 *wmem, __u8 size) +{ +	unsigned long flags; +	int ret; + +	spin_lock_irqsave(&wdata->state.lock, flags); +	wiimote_cmd_set(wdata, WIIPROTO_REQ_WMEM, 0); +	wiiproto_req_wreg(wdata, offset, wmem, size); +	spin_unlock_irqrestore(&wdata->state.lock, flags); + +	ret = wiimote_cmd_wait(wdata); +	if (!ret && wdata->state.cmd_err) +		ret = -EIO; + +	return ret; +} + +static int wiimote_battery_get_property(struct power_supply *psy, +						enum power_supply_property psp, +						union power_supply_propval *val) +{ +	struct wiimote_data *wdata = container_of(psy, +						struct wiimote_data, battery); +	int ret = 0, state; +	unsigned long flags; + +	ret = wiimote_cmd_acquire(wdata); +	if (ret) +		return ret; + +	spin_lock_irqsave(&wdata->state.lock, flags); +	wiimote_cmd_set(wdata, WIIPROTO_REQ_SREQ, 0); +	wiiproto_req_status(wdata); +	spin_unlock_irqrestore(&wdata->state.lock, flags); + +	ret = wiimote_cmd_wait(wdata); +	state = wdata->state.cmd_battery; +	wiimote_cmd_release(wdata); + +	if (ret) +		return ret; + +	switch (psp) { +		case POWER_SUPPLY_PROP_CAPACITY: +			val->intval = state * 100 / 255; +			break; +		default: +			ret = -EINVAL; +			break; +	} + +	return ret; +} + +static int wiimote_init_ir(struct wiimote_data *wdata, __u16 mode) +{ +	int ret; +	unsigned long flags; +	__u8 format = 0; +	static const __u8 data_enable[] = { 0x01 }; +	static const __u8 data_sens1[] = { 0x02, 0x00, 0x00, 0x71, 0x01, +						0x00, 0xaa, 0x00, 0x64 }; +	static const __u8 data_sens2[] = { 0x63, 0x03 }; +	static const __u8 data_fin[] = { 0x08 }; + +	spin_lock_irqsave(&wdata->state.lock, flags); + +	if (mode == (wdata->state.flags & WIIPROTO_FLAGS_IR)) { +		spin_unlock_irqrestore(&wdata->state.lock, flags); +		return 0; +	} + +	if (mode == 0) { +		wdata->state.flags &= ~WIIPROTO_FLAGS_IR; +		wiiproto_req_ir1(wdata, 0); +		wiiproto_req_ir2(wdata, 0); +		wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL); +		spin_unlock_irqrestore(&wdata->state.lock, flags); +		return 0; +	} + +	spin_unlock_irqrestore(&wdata->state.lock, flags); + +	ret = wiimote_cmd_acquire(wdata); +	if (ret) +		return ret; + +	/* send PIXEL CLOCK ENABLE cmd first */ +	spin_lock_irqsave(&wdata->state.lock, flags); +	wiimote_cmd_set(wdata, WIIPROTO_REQ_IR1, 0); +	wiiproto_req_ir1(wdata, 0x06); +	spin_unlock_irqrestore(&wdata->state.lock, flags); + +	ret = wiimote_cmd_wait(wdata); +	if (ret) +		goto unlock; +	if (wdata->state.cmd_err) { +		ret = -EIO; +		goto unlock; +	} + +	/* enable IR LOGIC */ +	spin_lock_irqsave(&wdata->state.lock, flags); +	wiimote_cmd_set(wdata, WIIPROTO_REQ_IR2, 0); +	wiiproto_req_ir2(wdata, 0x06); +	spin_unlock_irqrestore(&wdata->state.lock, flags); + +	ret = wiimote_cmd_wait(wdata); +	if (ret) +		goto unlock; +	if (wdata->state.cmd_err) { +		ret = -EIO; +		goto unlock; +	} + +	/* enable IR cam but do not make it send data, yet */ +	ret = wiimote_cmd_write(wdata, 0xb00030, data_enable, +							sizeof(data_enable)); +	if (ret) +		goto unlock; + +	/* write first sensitivity block */ +	ret = wiimote_cmd_write(wdata, 0xb00000, data_sens1, +							sizeof(data_sens1)); +	if (ret) +		goto unlock; + +	/* write second sensitivity block */ +	ret = wiimote_cmd_write(wdata, 0xb0001a, data_sens2, +							sizeof(data_sens2)); +	if (ret) +		goto unlock; + +	/* put IR cam into desired state */ +	switch (mode) { +		case WIIPROTO_FLAG_IR_FULL: +			format = 5; +			break; +		case WIIPROTO_FLAG_IR_EXT: +			format = 3; +			break; +		case WIIPROTO_FLAG_IR_BASIC: +			format = 1; +			break; +	} +	ret = wiimote_cmd_write(wdata, 0xb00033, &format, sizeof(format)); +	if (ret) +		goto unlock; + +	/* make IR cam send data */ +	ret = wiimote_cmd_write(wdata, 0xb00030, data_fin, sizeof(data_fin)); +	if (ret) +		goto unlock; + +	/* request new DRM mode compatible to IR mode */ +	spin_lock_irqsave(&wdata->state.lock, flags); +	wdata->state.flags &= ~WIIPROTO_FLAGS_IR; +	wdata->state.flags |= mode & WIIPROTO_FLAGS_IR; +	wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL); +	spin_unlock_irqrestore(&wdata->state.lock, flags); + +unlock: +	wiimote_cmd_release(wdata); +	return ret; +} + +static enum led_brightness wiimote_leds_get(struct led_classdev *led_dev) +{ +	struct wiimote_data *wdata; +	struct device *dev = led_dev->dev->parent; +	int i; +	unsigned long flags; +	bool value = false; + +	wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev)); + +	for (i = 0; i < 4; ++i) { +		if (wdata->leds[i] == led_dev) { +			spin_lock_irqsave(&wdata->state.lock, flags); +			value = wdata->state.flags & WIIPROTO_FLAG_LED(i + 1); +			spin_unlock_irqrestore(&wdata->state.lock, flags); +			break; +		} +	} + +	return value ? LED_FULL : LED_OFF; +} + +static void wiimote_leds_set(struct led_classdev *led_dev, +						enum led_brightness value) +{ +	struct wiimote_data *wdata; +	struct device *dev = led_dev->dev->parent; +	int i; +	unsigned long flags; +	__u8 state, flag; + +	wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev)); + +	for (i = 0; i < 4; ++i) { +		if (wdata->leds[i] == led_dev) { +			flag = WIIPROTO_FLAG_LED(i + 1); +			spin_lock_irqsave(&wdata->state.lock, flags); +			state = wdata->state.flags; +			if (value == LED_OFF) +				wiiproto_req_leds(wdata, state & ~flag); +			else +				wiiproto_req_leds(wdata, state | flag); +			spin_unlock_irqrestore(&wdata->state.lock, flags); +			break; +		} +	} +} + +static int wiimote_ff_play(struct input_dev *dev, void *data, +							struct ff_effect *eff) +{ +	struct wiimote_data *wdata = input_get_drvdata(dev); +	__u8 value; +	unsigned long flags; + +	/* +	 * The wiimote supports only a single rumble motor so if any magnitude +	 * is set to non-zero then we start the rumble motor. If both are set to +	 * zero, we stop the rumble motor. +	 */ + +	if (eff->u.rumble.strong_magnitude || eff->u.rumble.weak_magnitude) +		value = 1; +	else +		value = 0; + +	spin_lock_irqsave(&wdata->state.lock, flags); +	wiiproto_req_rumble(wdata, value); +	spin_unlock_irqrestore(&wdata->state.lock, flags); + +	return 0; +} + +static int wiimote_input_open(struct input_dev *dev) +{ +	struct wiimote_data *wdata = input_get_drvdata(dev); -wiifs_led_show_set(1); -wiifs_led_show_set(2); -wiifs_led_show_set(3); -wiifs_led_show_set(4); +	return hid_hw_open(wdata->hdev); +} -static int wiimote_input_event(struct input_dev *dev, unsigned int type, -						unsigned int code, int value) +static void wiimote_input_close(struct input_dev *dev)  {  	struct wiimote_data *wdata = input_get_drvdata(dev); -	if (!atomic_read(&wdata->ready)) -		return -EBUSY; -	/* smp_rmb: Make sure wdata->xy is available when wdata->ready is 1 */ -	smp_rmb(); +	hid_hw_close(wdata->hdev); +} + +static int wiimote_accel_open(struct input_dev *dev) +{ +	struct wiimote_data *wdata = input_get_drvdata(dev); +	int ret; +	unsigned long flags; + +	ret = hid_hw_open(wdata->hdev); +	if (ret) +		return ret; + +	spin_lock_irqsave(&wdata->state.lock, flags); +	wiiproto_req_accel(wdata, true); +	spin_unlock_irqrestore(&wdata->state.lock, flags);  	return 0;  } +static void wiimote_accel_close(struct input_dev *dev) +{ +	struct wiimote_data *wdata = input_get_drvdata(dev); +	unsigned long flags; + +	spin_lock_irqsave(&wdata->state.lock, flags); +	wiiproto_req_accel(wdata, false); +	spin_unlock_irqrestore(&wdata->state.lock, flags); + +	hid_hw_close(wdata->hdev); +} + +static int wiimote_ir_open(struct input_dev *dev) +{ +	struct wiimote_data *wdata = input_get_drvdata(dev); +	int ret; + +	ret = hid_hw_open(wdata->hdev); +	if (ret) +		return ret; + +	ret = wiimote_init_ir(wdata, WIIPROTO_FLAG_IR_BASIC); +	if (ret) { +		hid_hw_close(wdata->hdev); +		return ret; +	} + +	return 0; +} + +static void wiimote_ir_close(struct input_dev *dev) +{ +	struct wiimote_data *wdata = input_get_drvdata(dev); + +	wiimote_init_ir(wdata, 0); +	hid_hw_close(wdata->hdev); +} +  static void handler_keys(struct wiimote_data *wdata, const __u8 *payload)  {  	input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_LEFT], @@ -281,6 +777,210 @@ static void handler_keys(struct wiimote_data *wdata, const __u8 *payload)  	input_sync(wdata->input);  } +static void handler_accel(struct wiimote_data *wdata, const __u8 *payload) +{ +	__u16 x, y, z; + +	if (!(wdata->state.flags & WIIPROTO_FLAG_ACCEL)) +		return; + +	/* +	 * payload is: BB BB XX YY ZZ +	 * Accelerometer data is encoded into 3 10bit values. XX, YY and ZZ +	 * contain the upper 8 bits of each value. The lower 2 bits are +	 * contained in the buttons data BB BB. +	 * Bits 6 and 7 of the first buttons byte BB is the lower 2 bits of the +	 * X accel value. Bit 5 of the second buttons byte is the 2nd bit of Y +	 * accel value and bit 6 is the second bit of the Z value. +	 * The first bit of Y and Z values is not available and always set to 0. +	 * 0x200 is returned on no movement. +	 */ + +	x = payload[2] << 2; +	y = payload[3] << 2; +	z = payload[4] << 2; + +	x |= (payload[0] >> 5) & 0x3; +	y |= (payload[1] >> 4) & 0x2; +	z |= (payload[1] >> 5) & 0x2; + +	input_report_abs(wdata->accel, ABS_RX, x - 0x200); +	input_report_abs(wdata->accel, ABS_RY, y - 0x200); +	input_report_abs(wdata->accel, ABS_RZ, z - 0x200); +	input_sync(wdata->accel); +} + +#define ir_to_input0(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \ +							ABS_HAT0X, ABS_HAT0Y) +#define ir_to_input1(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \ +							ABS_HAT1X, ABS_HAT1Y) +#define ir_to_input2(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \ +							ABS_HAT2X, ABS_HAT2Y) +#define ir_to_input3(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \ +							ABS_HAT3X, ABS_HAT3Y) + +static void __ir_to_input(struct wiimote_data *wdata, const __u8 *ir, +						bool packed, __u8 xid, __u8 yid) +{ +	__u16 x, y; + +	if (!(wdata->state.flags & WIIPROTO_FLAGS_IR)) +		return; + +	/* +	 * Basic IR data is encoded into 3 bytes. The first two bytes are the +	 * upper 8 bit of the X/Y data, the 3rd byte contains the lower 2 bits +	 * of both. +	 * If data is packed, then the 3rd byte is put first and slightly +	 * reordered. This allows to interleave packed and non-packed data to +	 * have two IR sets in 5 bytes instead of 6. +	 * The resulting 10bit X/Y values are passed to the ABS_HATXY input dev. +	 */ + +	if (packed) { +		x = ir[1] << 2; +		y = ir[2] << 2; + +		x |= ir[0] & 0x3; +		y |= (ir[0] >> 2) & 0x3; +	} else { +		x = ir[0] << 2; +		y = ir[1] << 2; + +		x |= (ir[2] >> 4) & 0x3; +		y |= (ir[2] >> 6) & 0x3; +	} + +	input_report_abs(wdata->ir, xid, x); +	input_report_abs(wdata->ir, yid, y); +} + +static void handler_status(struct wiimote_data *wdata, const __u8 *payload) +{ +	handler_keys(wdata, payload); + +	/* on status reports the drm is reset so we need to resend the drm */ +	wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL); + +	if (wiimote_cmd_pending(wdata, WIIPROTO_REQ_SREQ, 0)) { +		wdata->state.cmd_battery = payload[5]; +		wiimote_cmd_complete(wdata); +	} +} + +static void handler_data(struct wiimote_data *wdata, const __u8 *payload) +{ +	handler_keys(wdata, payload); +} + +static void handler_return(struct wiimote_data *wdata, const __u8 *payload) +{ +	__u8 err = payload[3]; +	__u8 cmd = payload[2]; + +	handler_keys(wdata, payload); + +	if (wiimote_cmd_pending(wdata, cmd, 0)) { +		wdata->state.cmd_err = err; +		wiimote_cmd_complete(wdata); +	} else if (err) { +		hid_warn(wdata->hdev, "Remote error %hhu on req %hhu\n", err, +									cmd); +	} +} + +static void handler_drm_KA(struct wiimote_data *wdata, const __u8 *payload) +{ +	handler_keys(wdata, payload); +	handler_accel(wdata, payload); +} + +static void handler_drm_KE(struct wiimote_data *wdata, const __u8 *payload) +{ +	handler_keys(wdata, payload); +} + +static void handler_drm_KAI(struct wiimote_data *wdata, const __u8 *payload) +{ +	handler_keys(wdata, payload); +	handler_accel(wdata, payload); +	ir_to_input0(wdata, &payload[5], false); +	ir_to_input1(wdata, &payload[8], false); +	ir_to_input2(wdata, &payload[11], false); +	ir_to_input3(wdata, &payload[14], false); +	input_sync(wdata->ir); +} + +static void handler_drm_KEE(struct wiimote_data *wdata, const __u8 *payload) +{ +	handler_keys(wdata, payload); +} + +static void handler_drm_KIE(struct wiimote_data *wdata, const __u8 *payload) +{ +	handler_keys(wdata, payload); +	ir_to_input0(wdata, &payload[2], false); +	ir_to_input1(wdata, &payload[4], true); +	ir_to_input2(wdata, &payload[7], false); +	ir_to_input3(wdata, &payload[9], true); +	input_sync(wdata->ir); +} + +static void handler_drm_KAE(struct wiimote_data *wdata, const __u8 *payload) +{ +	handler_keys(wdata, payload); +	handler_accel(wdata, payload); +} + +static void handler_drm_KAIE(struct wiimote_data *wdata, const __u8 *payload) +{ +	handler_keys(wdata, payload); +	handler_accel(wdata, payload); +	ir_to_input0(wdata, &payload[5], false); +	ir_to_input1(wdata, &payload[7], true); +	ir_to_input2(wdata, &payload[10], false); +	ir_to_input3(wdata, &payload[12], true); +	input_sync(wdata->ir); +} + +static void handler_drm_E(struct wiimote_data *wdata, const __u8 *payload) +{ +} + +static void handler_drm_SKAI1(struct wiimote_data *wdata, const __u8 *payload) +{ +	handler_keys(wdata, payload); + +	wdata->state.accel_split[0] = payload[2]; +	wdata->state.accel_split[1] = (payload[0] >> 1) & (0x10 | 0x20); +	wdata->state.accel_split[1] |= (payload[1] << 1) & (0x40 | 0x80); + +	ir_to_input0(wdata, &payload[3], false); +	ir_to_input1(wdata, &payload[12], false); +	input_sync(wdata->ir); +} + +static void handler_drm_SKAI2(struct wiimote_data *wdata, const __u8 *payload) +{ +	__u8 buf[5]; + +	handler_keys(wdata, payload); + +	wdata->state.accel_split[1] |= (payload[0] >> 5) & (0x01 | 0x02); +	wdata->state.accel_split[1] |= (payload[1] >> 3) & (0x04 | 0x08); + +	buf[0] = 0; +	buf[1] = 0; +	buf[2] = wdata->state.accel_split[0]; +	buf[3] = payload[2]; +	buf[4] = wdata->state.accel_split[1]; +	handler_accel(wdata, buf); + +	ir_to_input2(wdata, &payload[3], false); +	ir_to_input3(wdata, &payload[12], false); +	input_sync(wdata->ir); +} +  struct wiiproto_handler {  	__u8 id;  	size_t size; @@ -288,7 +988,20 @@ struct wiiproto_handler {  };  static struct wiiproto_handler handlers[] = { +	{ .id = WIIPROTO_REQ_STATUS, .size = 6, .func = handler_status }, +	{ .id = WIIPROTO_REQ_DATA, .size = 21, .func = handler_data }, +	{ .id = WIIPROTO_REQ_RETURN, .size = 4, .func = handler_return },  	{ .id = WIIPROTO_REQ_DRM_K, .size = 2, .func = handler_keys }, +	{ .id = WIIPROTO_REQ_DRM_KA, .size = 5, .func = handler_drm_KA }, +	{ .id = WIIPROTO_REQ_DRM_KE, .size = 10, .func = handler_drm_KE }, +	{ .id = WIIPROTO_REQ_DRM_KAI, .size = 17, .func = handler_drm_KAI }, +	{ .id = WIIPROTO_REQ_DRM_KEE, .size = 21, .func = handler_drm_KEE }, +	{ .id = WIIPROTO_REQ_DRM_KAE, .size = 21, .func = handler_drm_KAE }, +	{ .id = WIIPROTO_REQ_DRM_KIE, .size = 21, .func = handler_drm_KIE }, +	{ .id = WIIPROTO_REQ_DRM_KAIE, .size = 21, .func = handler_drm_KAIE }, +	{ .id = WIIPROTO_REQ_DRM_E, .size = 21, .func = handler_drm_E }, +	{ .id = WIIPROTO_REQ_DRM_SKAI1, .size = 21, .func = handler_drm_SKAI1 }, +	{ .id = WIIPROTO_REQ_DRM_SKAI2, .size = 21, .func = handler_drm_SKAI2 },  	{ .id = 0 }  }; @@ -299,11 +1012,7 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,  	struct wiiproto_handler *h;  	int i;  	unsigned long flags; - -	if (!atomic_read(&wdata->ready)) -		return -EBUSY; -	/* smp_rmb: Make sure wdata->xy is available when wdata->ready is 1 */ -	smp_rmb(); +	bool handled = false;  	if (size < 1)  		return -EINVAL; @@ -312,15 +1021,73 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,  	for (i = 0; handlers[i].id; ++i) {  		h = &handlers[i]; -		if (h->id == raw_data[0] && h->size < size) +		if (h->id == raw_data[0] && h->size < size) {  			h->func(wdata, &raw_data[1]); +			handled = true; +		}  	} +	if (!handled) +		hid_warn(hdev, "Unhandled report %hhu size %d\n", raw_data[0], +									size); +  	spin_unlock_irqrestore(&wdata->state.lock, flags);  	return 0;  } +static void wiimote_leds_destroy(struct wiimote_data *wdata) +{ +	int i; +	struct led_classdev *led; + +	for (i = 0; i < 4; ++i) { +		if (wdata->leds[i]) { +			led = wdata->leds[i]; +			wdata->leds[i] = NULL; +			led_classdev_unregister(led); +			kfree(led); +		} +	} +} + +static int wiimote_leds_create(struct wiimote_data *wdata) +{ +	int i, ret; +	struct device *dev = &wdata->hdev->dev; +	size_t namesz = strlen(dev_name(dev)) + 9; +	struct led_classdev *led; +	char *name; + +	for (i = 0; i < 4; ++i) { +		led = kzalloc(sizeof(struct led_classdev) + namesz, GFP_KERNEL); +		if (!led) { +			ret = -ENOMEM; +			goto err; +		} +		name = (void*)&led[1]; +		snprintf(name, namesz, "%s:blue:p%d", dev_name(dev), i); +		led->name = name; +		led->brightness = 0; +		led->max_brightness = 1; +		led->brightness_get = wiimote_leds_get; +		led->brightness_set = wiimote_leds_set; + +		ret = led_classdev_register(dev, led); +		if (ret) { +			kfree(led); +			goto err; +		} +		wdata->leds[i] = led; +	} + +	return 0; + +err: +	wiimote_leds_destroy(wdata); +	return ret; +} +  static struct wiimote_data *wiimote_create(struct hid_device *hdev)  {  	struct wiimote_data *wdata; @@ -331,16 +1098,15 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)  		return NULL;  	wdata->input = input_allocate_device(); -	if (!wdata->input) { -		kfree(wdata); -		return NULL; -	} +	if (!wdata->input) +		goto err;  	wdata->hdev = hdev;  	hid_set_drvdata(hdev, wdata);  	input_set_drvdata(wdata->input, wdata); -	wdata->input->event = wiimote_input_event; +	wdata->input->open = wiimote_input_open; +	wdata->input->close = wiimote_input_close;  	wdata->input->dev.parent = &wdata->hdev->dev;  	wdata->input->id.bustype = wdata->hdev->bus;  	wdata->input->id.vendor = wdata->hdev->vendor; @@ -352,16 +1118,93 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)  	for (i = 0; i < WIIPROTO_KEY_COUNT; ++i)  		set_bit(wiiproto_keymap[i], wdata->input->keybit); +	set_bit(FF_RUMBLE, wdata->input->ffbit); +	if (input_ff_create_memless(wdata->input, NULL, wiimote_ff_play)) +		goto err_input; + +	wdata->accel = input_allocate_device(); +	if (!wdata->accel) +		goto err_input; + +	input_set_drvdata(wdata->accel, wdata); +	wdata->accel->open = wiimote_accel_open; +	wdata->accel->close = wiimote_accel_close; +	wdata->accel->dev.parent = &wdata->hdev->dev; +	wdata->accel->id.bustype = wdata->hdev->bus; +	wdata->accel->id.vendor = wdata->hdev->vendor; +	wdata->accel->id.product = wdata->hdev->product; +	wdata->accel->id.version = wdata->hdev->version; +	wdata->accel->name = WIIMOTE_NAME " Accelerometer"; + +	set_bit(EV_ABS, wdata->accel->evbit); +	set_bit(ABS_RX, wdata->accel->absbit); +	set_bit(ABS_RY, wdata->accel->absbit); +	set_bit(ABS_RZ, wdata->accel->absbit); +	input_set_abs_params(wdata->accel, ABS_RX, -500, 500, 2, 4); +	input_set_abs_params(wdata->accel, ABS_RY, -500, 500, 2, 4); +	input_set_abs_params(wdata->accel, ABS_RZ, -500, 500, 2, 4); + +	wdata->ir = input_allocate_device(); +	if (!wdata->ir) +		goto err_ir; + +	input_set_drvdata(wdata->ir, wdata); +	wdata->ir->open = wiimote_ir_open; +	wdata->ir->close = wiimote_ir_close; +	wdata->ir->dev.parent = &wdata->hdev->dev; +	wdata->ir->id.bustype = wdata->hdev->bus; +	wdata->ir->id.vendor = wdata->hdev->vendor; +	wdata->ir->id.product = wdata->hdev->product; +	wdata->ir->id.version = wdata->hdev->version; +	wdata->ir->name = WIIMOTE_NAME " IR"; + +	set_bit(EV_ABS, wdata->ir->evbit); +	set_bit(ABS_HAT0X, wdata->ir->absbit); +	set_bit(ABS_HAT0Y, wdata->ir->absbit); +	set_bit(ABS_HAT1X, wdata->ir->absbit); +	set_bit(ABS_HAT1Y, wdata->ir->absbit); +	set_bit(ABS_HAT2X, wdata->ir->absbit); +	set_bit(ABS_HAT2Y, wdata->ir->absbit); +	set_bit(ABS_HAT3X, wdata->ir->absbit); +	set_bit(ABS_HAT3Y, wdata->ir->absbit); +	input_set_abs_params(wdata->ir, ABS_HAT0X, 0, 1023, 2, 4); +	input_set_abs_params(wdata->ir, ABS_HAT0Y, 0, 767, 2, 4); +	input_set_abs_params(wdata->ir, ABS_HAT1X, 0, 1023, 2, 4); +	input_set_abs_params(wdata->ir, ABS_HAT1Y, 0, 767, 2, 4); +	input_set_abs_params(wdata->ir, ABS_HAT2X, 0, 1023, 2, 4); +	input_set_abs_params(wdata->ir, ABS_HAT2Y, 0, 767, 2, 4); +	input_set_abs_params(wdata->ir, ABS_HAT3X, 0, 1023, 2, 4); +	input_set_abs_params(wdata->ir, ABS_HAT3Y, 0, 767, 2, 4); +  	spin_lock_init(&wdata->qlock);  	INIT_WORK(&wdata->worker, wiimote_worker);  	spin_lock_init(&wdata->state.lock); +	init_completion(&wdata->state.ready); +	mutex_init(&wdata->state.sync);  	return wdata; + +err_ir: +	input_free_device(wdata->accel); +err_input: +	input_free_device(wdata->input); +err: +	kfree(wdata); +	return NULL;  }  static void wiimote_destroy(struct wiimote_data *wdata)  { +	wiimote_leds_destroy(wdata); + +	power_supply_unregister(&wdata->battery); +	input_unregister_device(wdata->accel); +	input_unregister_device(wdata->ir); +	input_unregister_device(wdata->input); +	cancel_work_sync(&wdata->worker); +	hid_hw_stop(wdata->hdev); +  	kfree(wdata);  } @@ -377,19 +1220,6 @@ static int wiimote_hid_probe(struct hid_device *hdev,  		return -ENOMEM;  	} -	ret = device_create_file(&hdev->dev, &dev_attr_led1); -	if (ret) -		goto err; -	ret = device_create_file(&hdev->dev, &dev_attr_led2); -	if (ret) -		goto err; -	ret = device_create_file(&hdev->dev, &dev_attr_led3); -	if (ret) -		goto err; -	ret = device_create_file(&hdev->dev, &dev_attr_led4); -	if (ret) -		goto err; -  	ret = hid_parse(hdev);  	if (ret) {  		hid_err(hdev, "HID parse failed\n"); @@ -402,15 +1232,41 @@ static int wiimote_hid_probe(struct hid_device *hdev,  		goto err;  	} -	ret = input_register_device(wdata->input); +	ret = input_register_device(wdata->accel);  	if (ret) {  		hid_err(hdev, "Cannot register input device\n");  		goto err_stop;  	} -	/* smp_wmb: Write wdata->xy first before wdata->ready is set to 1 */ -	smp_wmb(); -	atomic_set(&wdata->ready, 1); +	ret = input_register_device(wdata->ir); +	if (ret) { +		hid_err(hdev, "Cannot register input device\n"); +		goto err_ir; +	} + +	ret = input_register_device(wdata->input); +	if (ret) { +		hid_err(hdev, "Cannot register input device\n"); +		goto err_input; +	} + +	wdata->battery.properties = wiimote_battery_props; +	wdata->battery.num_properties = ARRAY_SIZE(wiimote_battery_props); +	wdata->battery.get_property = wiimote_battery_get_property; +	wdata->battery.name = "wiimote_battery"; +	wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY; +	wdata->battery.use_for_apm = 0; + +	ret = power_supply_register(&wdata->hdev->dev, &wdata->battery); +	if (ret) { +		hid_err(hdev, "Cannot register battery device\n"); +		goto err_battery; +	} + +	ret = wiimote_leds_create(wdata); +	if (ret) +		goto err_free; +  	hid_info(hdev, "New device registered\n");  	/* by default set led1 after device initialization */ @@ -420,15 +1276,26 @@ static int wiimote_hid_probe(struct hid_device *hdev,  	return 0; +err_free: +	wiimote_destroy(wdata); +	return ret; + +err_battery: +	input_unregister_device(wdata->input); +	wdata->input = NULL; +err_input: +	input_unregister_device(wdata->ir); +	wdata->ir = NULL; +err_ir: +	input_unregister_device(wdata->accel); +	wdata->accel = NULL;  err_stop:  	hid_hw_stop(hdev);  err: +	input_free_device(wdata->ir); +	input_free_device(wdata->accel);  	input_free_device(wdata->input); -	device_remove_file(&hdev->dev, &dev_attr_led1); -	device_remove_file(&hdev->dev, &dev_attr_led2); -	device_remove_file(&hdev->dev, &dev_attr_led3); -	device_remove_file(&hdev->dev, &dev_attr_led4); -	wiimote_destroy(wdata); +	kfree(wdata);  	return ret;  } @@ -437,16 +1304,6 @@ static void wiimote_hid_remove(struct hid_device *hdev)  	struct wiimote_data *wdata = hid_get_drvdata(hdev);  	hid_info(hdev, "Device removed\n"); - -	device_remove_file(&hdev->dev, &dev_attr_led1); -	device_remove_file(&hdev->dev, &dev_attr_led2); -	device_remove_file(&hdev->dev, &dev_attr_led3); -	device_remove_file(&hdev->dev, &dev_attr_led4); - -	hid_hw_stop(hdev); -	input_unregister_device(wdata->input); - -	cancel_work_sync(&wdata->worker);  	wiimote_destroy(wdata);  } diff --git a/drivers/hid/hid-zydacron.c b/drivers/hid/hid-zydacron.c index e90371508fd..1ad85f2257b 100644 --- a/drivers/hid/hid-zydacron.c +++ b/drivers/hid/hid-zydacron.c @@ -201,9 +201,7 @@ static void zc_remove(struct hid_device *hdev)  	struct zc_device *zc = hid_get_drvdata(hdev);  	hid_hw_stop(hdev); - -	if (NULL != zc) -		kfree(zc); +	kfree(zc);  }  static const struct hid_device_id zc_devices[] = { diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index c79578b5a78..cf7d6d58e79 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -259,7 +259,6 @@ static int hidraw_open(struct inode *inode, struct file *file)  	mutex_lock(&minors_lock);  	if (!hidraw_table[minor]) { -		kfree(list);  		err = -ENODEV;  		goto out_unlock;  	} @@ -272,8 +271,10 @@ static int hidraw_open(struct inode *inode, struct file *file)  	dev = hidraw_table[minor];  	if (!dev->open++) {  		err = hid_hw_power(dev->hid, PM_HINT_FULLON); -		if (err < 0) +		if (err < 0) { +			dev->open--;  			goto out_unlock; +		}  		err = hid_hw_open(dev->hid);  		if (err < 0) { @@ -285,6 +286,8 @@ static int hidraw_open(struct inode *inode, struct file *file)  out_unlock:  	mutex_unlock(&minors_lock);  out: +	if (err < 0) +		kfree(list);  	return err;  } @@ -510,13 +513,12 @@ void hidraw_disconnect(struct hid_device *hid)  {  	struct hidraw *hidraw = hid->hidraw; +	mutex_lock(&minors_lock);  	hidraw->exist = 0;  	device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor)); -	mutex_lock(&minors_lock);  	hidraw_table[hidraw->minor] = NULL; -	mutex_unlock(&minors_lock);  	if (hidraw->open) {  		hid_hw_close(hid); @@ -524,6 +526,7 @@ void hidraw_disconnect(struct hid_device *hid)  	} else {  		kfree(hidraw);  	} +	mutex_unlock(&minors_lock);  }  EXPORT_SYMBOL_GPL(hidraw_disconnect); diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index ad978f5748d..77e705c2209 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -1270,7 +1270,7 @@ static void hid_cancel_delayed_stuff(struct usbhid_device *usbhid)  static void hid_cease_io(struct usbhid_device *usbhid)  { -	del_timer(&usbhid->io_retry); +	del_timer_sync(&usbhid->io_retry);  	usb_kill_urb(usbhid->urbin);  	usb_kill_urb(usbhid->urbctrl);  	usb_kill_urb(usbhid->urbout); diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 621959d5cc4..4ea464151c3 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -47,6 +47,7 @@ static const struct hid_blacklist {  	{ USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },  	{ USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT }, +	{ USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },  	{ USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },  	{ USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },  	{ USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT }, @@ -79,16 +80,15 @@ static const struct hid_blacklist {  	{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U, HID_QUIRK_MULTI_INPUT },  	{ USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH, HID_QUIRK_MULTI_INPUT },  	{ USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH, HID_QUIRK_MULTI_INPUT }, -	{ USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },  	{ USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, -	{ USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },  	{ USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },  	{ USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE },  	{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },  	{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT }, +	{ USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },  	{ 0, 0 }  }; diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 7c1188b53c3..4ef02b269a7 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -641,6 +641,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)  			struct usb_device *dev = hid_to_usb_dev(hid);  			struct usbhid_device *usbhid = hid->driver_data; +			memset(&dinfo, 0, sizeof(dinfo)); +  			dinfo.bustype = BUS_USB;  			dinfo.busnum = dev->bus->busnum;  			dinfo.devnum = dev->devnum; diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 59d83e83da7..93238378664 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -36,17 +36,25 @@  #include <linux/cpu.h>  #include <linux/pci.h>  #include <linux/smp.h> +#include <linux/moduleparam.h>  #include <asm/msr.h>  #include <asm/processor.h>  #define DRVNAME	"coretemp" +/* + * force_tjmax only matters when TjMax can't be read from the CPU itself. + * When set, it replaces the driver's suboptimal heuristic. + */ +static int force_tjmax; +module_param_named(tjmax, force_tjmax, int, 0444); +MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); +  #define BASE_SYSFS_ATTR_NO	2	/* Sysfs Base attr no for coretemp */  #define NUM_REAL_CORES		16	/* Number of Real cores per cpu */  #define CORETEMP_NAME_LENGTH	17	/* String Length of attrs */  #define MAX_CORE_ATTRS		4	/* Maximum no of basic attrs */ -#define MAX_THRESH_ATTRS	3	/* Maximum no of Threshold attrs */ -#define TOTAL_ATTRS		(MAX_CORE_ATTRS + MAX_THRESH_ATTRS) +#define TOTAL_ATTRS		(MAX_CORE_ATTRS + 1)  #define MAX_CORE_DATA		(NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)  #ifdef CONFIG_SMP @@ -69,8 +77,6 @@   *		This value is passed as "id" field to rdmsr/wrmsr functions.   * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,   *		from where the temperature values should be read. - * @intrpt_reg: One of IA32_THERM_INTERRUPT or IA32_PACKAGE_THERM_INTERRUPT, - *		from where the thresholds are read.   * @attr_size:  Total number of pre-core attrs displayed in the sysfs.   * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data.   *		Otherwise, temp_data holds coretemp data. @@ -79,13 +85,11 @@  struct temp_data {  	int temp;  	int ttarget; -	int tmin;  	int tjmax;  	unsigned long last_updated;  	unsigned int cpu;  	u32 cpu_core_id;  	u32 status_reg; -	u32 intrpt_reg;  	int attr_size;  	bool is_pkg_data;  	bool valid; @@ -143,19 +147,6 @@ static ssize_t show_crit_alarm(struct device *dev,  	return sprintf(buf, "%d\n", (eax >> 5) & 1);  } -static ssize_t show_max_alarm(struct device *dev, -				struct device_attribute *devattr, char *buf) -{ -	u32 eax, edx; -	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); -	struct platform_data *pdata = dev_get_drvdata(dev); -	struct temp_data *tdata = pdata->core_data[attr->index]; - -	rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx); - -	return sprintf(buf, "%d\n", !!(eax & THERM_STATUS_THRESHOLD1)); -} -  static ssize_t show_tjmax(struct device *dev,  			struct device_attribute *devattr, char *buf)  { @@ -174,83 +165,6 @@ static ssize_t show_ttarget(struct device *dev,  	return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget);  } -static ssize_t store_ttarget(struct device *dev, -				struct device_attribute *devattr, -				const char *buf, size_t count) -{ -	struct platform_data *pdata = dev_get_drvdata(dev); -	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); -	struct temp_data *tdata = pdata->core_data[attr->index]; -	u32 eax, edx; -	unsigned long val; -	int diff; - -	if (strict_strtoul(buf, 10, &val)) -		return -EINVAL; - -	/* -	 * THERM_MASK_THRESHOLD1 is 7 bits wide. Values are entered in terms -	 * of milli degree celsius. Hence don't accept val > (127 * 1000) -	 */ -	if (val > tdata->tjmax || val > 127000) -		return -EINVAL; - -	diff = (tdata->tjmax - val) / 1000; - -	mutex_lock(&tdata->update_lock); -	rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx); -	eax = (eax & ~THERM_MASK_THRESHOLD1) | -				(diff << THERM_SHIFT_THRESHOLD1); -	wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx); -	tdata->ttarget = val; -	mutex_unlock(&tdata->update_lock); - -	return count; -} - -static ssize_t show_tmin(struct device *dev, -			struct device_attribute *devattr, char *buf) -{ -	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); -	struct platform_data *pdata = dev_get_drvdata(dev); - -	return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tmin); -} - -static ssize_t store_tmin(struct device *dev, -				struct device_attribute *devattr, -				const char *buf, size_t count) -{ -	struct platform_data *pdata = dev_get_drvdata(dev); -	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); -	struct temp_data *tdata = pdata->core_data[attr->index]; -	u32 eax, edx; -	unsigned long val; -	int diff; - -	if (strict_strtoul(buf, 10, &val)) -		return -EINVAL; - -	/* -	 * THERM_MASK_THRESHOLD0 is 7 bits wide. Values are entered in terms -	 * of milli degree celsius. Hence don't accept val > (127 * 1000) -	 */ -	if (val > tdata->tjmax || val > 127000) -		return -EINVAL; - -	diff = (tdata->tjmax - val) / 1000; - -	mutex_lock(&tdata->update_lock); -	rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx); -	eax = (eax & ~THERM_MASK_THRESHOLD0) | -				(diff << THERM_SHIFT_THRESHOLD0); -	wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx); -	tdata->tmin = val; -	mutex_unlock(&tdata->update_lock); - -	return count; -} -  static ssize_t show_temp(struct device *dev,  			struct device_attribute *devattr, char *buf)  { @@ -374,7 +288,6 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)  static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)  { -	/* The 100C is default for both mobile and non mobile CPUs */  	int err;  	u32 eax, edx;  	u32 val; @@ -385,7 +298,8 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)  	 */  	err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);  	if (err) { -		dev_warn(dev, "Unable to read TjMax from CPU.\n"); +		if (c->x86_model > 0xe && c->x86_model != 0x1c) +			dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);  	} else {  		val = (eax >> 16) & 0xff;  		/* @@ -393,11 +307,17 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)  		 * will be used  		 */  		if (val) { -			dev_info(dev, "TjMax is %d C.\n", val); +			dev_dbg(dev, "TjMax is %d degrees C\n", val);  			return val * 1000;  		}  	} +	if (force_tjmax) { +		dev_notice(dev, "TjMax forced to %d degrees C by user\n", +			   force_tjmax); +		return force_tjmax * 1000; +	} +  	/*  	 * An assumption is made for early CPUs and unreadable MSR.  	 * NOTE: the calculated value may not be correct. @@ -414,21 +334,6 @@ static void __devinit get_ucode_rev_on_cpu(void *edx)  	rdmsr(MSR_IA32_UCODE_REV, eax, *(u32 *)edx);  } -static int get_pkg_tjmax(unsigned int cpu, struct device *dev) -{ -	int err; -	u32 eax, edx, val; - -	err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); -	if (!err) { -		val = (eax >> 16) & 0xff; -		if (val) -			return val * 1000; -	} -	dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu); -	return 100000; /* Default TjMax: 100 degree celsius */ -} -  static int create_name_attr(struct platform_data *pdata, struct device *dev)  {  	sysfs_attr_init(&pdata->name_attr.attr); @@ -442,19 +347,14 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,  				int attr_no)  {  	int err, i; -	static ssize_t (*rd_ptr[TOTAL_ATTRS]) (struct device *dev, +	static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,  			struct device_attribute *devattr, char *buf) = {  			show_label, show_crit_alarm, show_temp, show_tjmax, -			show_max_alarm, show_ttarget, show_tmin }; -	static ssize_t (*rw_ptr[TOTAL_ATTRS]) (struct device *dev, -			struct device_attribute *devattr, const char *buf, -			size_t count) = { NULL, NULL, NULL, NULL, NULL, -					store_ttarget, store_tmin }; -	static const char *names[TOTAL_ATTRS] = { +			show_ttarget }; +	static const char *const names[TOTAL_ATTRS] = {  					"temp%d_label", "temp%d_crit_alarm",  					"temp%d_input", "temp%d_crit", -					"temp%d_max_alarm", "temp%d_max", -					"temp%d_max_hyst" }; +					"temp%d_max" };  	for (i = 0; i < tdata->attr_size; i++) {  		snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i], @@ -462,10 +362,6 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,  		sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);  		tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];  		tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO; -		if (rw_ptr[i]) { -			tdata->sd_attrs[i].dev_attr.attr.mode |= S_IWUSR; -			tdata->sd_attrs[i].dev_attr.store = rw_ptr[i]; -		}  		tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];  		tdata->sd_attrs[i].index = attr_no;  		err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr); @@ -481,9 +377,9 @@ exit_free:  } -static int __devinit chk_ucode_version(struct platform_device *pdev) +static int __cpuinit chk_ucode_version(unsigned int cpu)  { -	struct cpuinfo_x86 *c = &cpu_data(pdev->id); +	struct cpuinfo_x86 *c = &cpu_data(cpu);  	int err;  	u32 edx; @@ -494,17 +390,15 @@ static int __devinit chk_ucode_version(struct platform_device *pdev)  	 */  	if (c->x86_model == 0xe && c->x86_mask < 0xc) {  		/* check for microcode update */ -		err = smp_call_function_single(pdev->id, get_ucode_rev_on_cpu, +		err = smp_call_function_single(cpu, get_ucode_rev_on_cpu,  					       &edx, 1);  		if (err) { -			dev_err(&pdev->dev, -				"Cannot determine microcode revision of " -				"CPU#%u (%d)!\n", pdev->id, err); +			pr_err("Cannot determine microcode revision of " +			       "CPU#%u (%d)!\n", cpu, err);  			return -ENODEV;  		} else if (edx < 0x39) { -			dev_err(&pdev->dev, -				"Errata AE18 not fixed, update BIOS or " -				"microcode of the CPU!\n"); +			pr_err("Errata AE18 not fixed, update BIOS or " +			       "microcode of the CPU!\n");  			return -ENODEV;  		}  	} @@ -538,8 +432,6 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)  	tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS :  							MSR_IA32_THERM_STATUS; -	tdata->intrpt_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_INTERRUPT : -						MSR_IA32_THERM_INTERRUPT;  	tdata->is_pkg_data = pkg_flag;  	tdata->cpu = cpu;  	tdata->cpu_core_id = TO_CORE_ID(cpu); @@ -548,11 +440,11 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)  	return tdata;  } -static int create_core_data(struct platform_data *pdata, -				struct platform_device *pdev, +static int create_core_data(struct platform_device *pdev,  				unsigned int cpu, int pkg_flag)  {  	struct temp_data *tdata; +	struct platform_data *pdata = platform_get_drvdata(pdev);  	struct cpuinfo_x86 *c = &cpu_data(cpu);  	u32 eax, edx;  	int err, attr_no; @@ -588,20 +480,21 @@ static int create_core_data(struct platform_data *pdata,  		goto exit_free;  	/* We can access status register. Get Critical Temperature */ -	if (pkg_flag) -		tdata->tjmax = get_pkg_tjmax(pdev->id, &pdev->dev); -	else -		tdata->tjmax = get_tjmax(c, cpu, &pdev->dev); +	tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);  	/* -	 * Test if we can access the intrpt register. If so, increase the -	 * 'size' enough to have ttarget/tmin/max_alarm interfaces. -	 * Initialize ttarget with bits 16:22 of MSR_IA32_THERM_INTERRUPT +	 * Read the still undocumented bits 8:15 of IA32_TEMPERATURE_TARGET. +	 * The target temperature is available on older CPUs but not in this +	 * register. Atoms don't have the register at all.  	 */ -	err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx); -	if (!err) { -		tdata->attr_size += MAX_THRESH_ATTRS; -		tdata->ttarget = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000; +	if (c->x86_model > 0xe && c->x86_model != 0x1c) { +		err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, +					&eax, &edx); +		if (!err) { +			tdata->ttarget +			  = tdata->tjmax - ((eax >> 8) & 0xff) * 1000; +			tdata->attr_size++; +		}  	}  	pdata->core_data[attr_no] = tdata; @@ -613,22 +506,20 @@ static int create_core_data(struct platform_data *pdata,  	return 0;  exit_free: +	pdata->core_data[attr_no] = NULL;  	kfree(tdata);  	return err;  }  static void coretemp_add_core(unsigned int cpu, int pkg_flag)  { -	struct platform_data *pdata;  	struct platform_device *pdev = coretemp_get_pdev(cpu);  	int err;  	if (!pdev)  		return; -	pdata = platform_get_drvdata(pdev); - -	err = create_core_data(pdata, pdev, cpu, pkg_flag); +	err = create_core_data(pdev, cpu, pkg_flag);  	if (err)  		dev_err(&pdev->dev, "Adding Core %u failed\n", cpu);  } @@ -652,11 +543,6 @@ static int __devinit coretemp_probe(struct platform_device *pdev)  	struct platform_data *pdata;  	int err; -	/* Check the microcode version of the CPU */ -	err = chk_ucode_version(pdev); -	if (err) -		return err; -  	/* Initialize the per-package data structures */  	pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL);  	if (!pdata) @@ -666,7 +552,7 @@ static int __devinit coretemp_probe(struct platform_device *pdev)  	if (err)  		goto exit_free; -	pdata->phys_proc_id = TO_PHYS_ID(pdev->id); +	pdata->phys_proc_id = pdev->id;  	platform_set_drvdata(pdev, pdata);  	pdata->hwmon_dev = hwmon_device_register(&pdev->dev); @@ -718,7 +604,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)  	mutex_lock(&pdev_list_mutex); -	pdev = platform_device_alloc(DRVNAME, cpu); +	pdev = platform_device_alloc(DRVNAME, TO_PHYS_ID(cpu));  	if (!pdev) {  		err = -ENOMEM;  		pr_err("Device allocation failed\n"); @@ -738,7 +624,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)  	}  	pdev_entry->pdev = pdev; -	pdev_entry->phys_proc_id = TO_PHYS_ID(cpu); +	pdev_entry->phys_proc_id = pdev->id;  	list_add_tail(&pdev_entry->list, &pdev_list);  	mutex_unlock(&pdev_list_mutex); @@ -799,6 +685,10 @@ static void __cpuinit get_core_online(unsigned int cpu)  		return;  	if (!pdev) { +		/* Check the microcode version of the CPU */ +		if (chk_ucode_version(cpu)) +			return; +  		/*  		 * Alright, we have DTS support.  		 * We are bringing the _first_ core in this pkg diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c index 257957c69d9..4f7c3fc40a8 100644 --- a/drivers/hwmon/ds620.c +++ b/drivers/hwmon/ds620.c @@ -72,7 +72,7 @@ struct ds620_data {  	char valid;		/* !=0 if following fields are valid */  	unsigned long last_updated;	/* In jiffies */ -	u16 temp[3];		/* Register values, word */ +	s16 temp[3];		/* Register values, word */  };  /* diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c index c4c40be0edb..d22f241b6a6 100644 --- a/drivers/hwmon/i5k_amb.c +++ b/drivers/hwmon/i5k_amb.c @@ -114,7 +114,6 @@ struct i5k_amb_data {  	void __iomem *amb_mmio;  	struct i5k_device_attribute *attrs;  	unsigned int num_attrs; -	unsigned long chipset_id;  };  static ssize_t show_name(struct device *dev, struct device_attribute *devattr, @@ -444,8 +443,6 @@ static int __devinit i5k_find_amb_registers(struct i5k_amb_data *data,  		goto out;  	} -	data->chipset_id = devid; -  	res = 0;  out:  	pci_dev_put(pcidev); @@ -478,23 +475,13 @@ out:  	return res;  } -static unsigned long i5k_channel_pci_id(struct i5k_amb_data *data, -					unsigned long channel) -{ -	switch (data->chipset_id) { -	case PCI_DEVICE_ID_INTEL_5000_ERR: -		return PCI_DEVICE_ID_INTEL_5000_FBD0 + channel; -	case PCI_DEVICE_ID_INTEL_5400_ERR: -		return PCI_DEVICE_ID_INTEL_5400_FBD0 + channel; -	default: -		BUG(); -	} -} - -static unsigned long chipset_ids[] = { -	PCI_DEVICE_ID_INTEL_5000_ERR, -	PCI_DEVICE_ID_INTEL_5400_ERR, -	0 +static struct { +	unsigned long err; +	unsigned long fbd0; +} chipset_ids[] __devinitdata  = { +	{ PCI_DEVICE_ID_INTEL_5000_ERR, PCI_DEVICE_ID_INTEL_5000_FBD0 }, +	{ PCI_DEVICE_ID_INTEL_5400_ERR, PCI_DEVICE_ID_INTEL_5400_FBD0 }, +	{ 0, 0 }  };  #ifdef MODULE @@ -510,8 +497,7 @@ static int __devinit i5k_amb_probe(struct platform_device *pdev)  {  	struct i5k_amb_data *data;  	struct resource *reso; -	int i; -	int res = -ENODEV; +	int i, res;  	data = kzalloc(sizeof(*data), GFP_KERNEL);  	if (!data) @@ -520,22 +506,22 @@ static int __devinit i5k_amb_probe(struct platform_device *pdev)  	/* Figure out where the AMB registers live */  	i = 0;  	do { -		res = i5k_find_amb_registers(data, chipset_ids[i]); +		res = i5k_find_amb_registers(data, chipset_ids[i].err); +		if (res == 0) +			break;  		i++; -	} while (res && chipset_ids[i]); +	} while (chipset_ids[i].err);  	if (res)  		goto err;  	/* Copy the DIMM presence map for the first two channels */ -	res = i5k_channel_probe(&data->amb_present[0], -				i5k_channel_pci_id(data, 0)); +	res = i5k_channel_probe(&data->amb_present[0], chipset_ids[i].fbd0);  	if (res)  		goto err;  	/* Copy the DIMM presence map for the optional second two channels */ -	i5k_channel_probe(&data->amb_present[2], -			  i5k_channel_pci_id(data, 1)); +	i5k_channel_probe(&data->amb_present[2], chipset_ids[i].fbd0 + 1);  	/* Set up resource regions */  	reso = request_mem_region(data->amb_base, data->amb_len, DRVNAME); diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c index d94a24fdf4b..dd2d7b9620c 100644 --- a/drivers/hwmon/max16065.c +++ b/drivers/hwmon/max16065.c @@ -124,7 +124,7 @@ static inline int MV_TO_LIMIT(int mv, int range)  static inline int ADC_TO_CURR(int adc, int gain)  { -	return adc * 1400000 / gain * 255; +	return adc * 1400000 / (gain * 255);  }  /* diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index d7926f4336b..eab11615dce 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c @@ -211,8 +211,7 @@ static int lookup_comp(struct ntc_data *data,  	if (data->comp[mid].ohm <= ohm) {  		*i_low = mid;  		*i_high = mid - 1; -	} -	if (data->comp[mid].ohm > ohm) { +	} else {  		*i_low = mid + 1;  		*i_high = mid;  	} diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index a561c3a0e91..397fc59b568 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -978,6 +978,8 @@ static void pmbus_find_max_attr(struct i2c_client *client,  struct pmbus_limit_attr {  	u16 reg;		/* Limit register */  	bool update;		/* True if register needs updates */ +	bool low;		/* True if low limit; for limits with compare +				   functions only */  	const char *attr;	/* Attribute name */  	const char *alarm;	/* Alarm attribute name */  	u32 sbit;		/* Alarm attribute status bit */ @@ -1029,7 +1031,8 @@ static bool pmbus_add_limit_attrs(struct i2c_client *client,  				if (attr->compare) {  					pmbus_add_boolean_cmp(data, name,  						l->alarm, index, -						cbase, cindex, +						l->low ? cindex : cbase, +						l->low ? cbase : cindex,  						attr->sbase + page, l->sbit);  				} else {  					pmbus_add_boolean_reg(data, name, @@ -1366,11 +1369,13 @@ static const struct pmbus_sensor_attr power_attributes[] = {  static const struct pmbus_limit_attr temp_limit_attrs[] = {  	{  		.reg = PMBUS_UT_WARN_LIMIT, +		.low = true,  		.attr = "min",  		.alarm = "min_alarm",  		.sbit = PB_TEMP_UT_WARNING,  	}, {  		.reg = PMBUS_UT_FAULT_LIMIT, +		.low = true,  		.attr = "lcrit",  		.alarm = "lcrit_alarm",  		.sbit = PB_TEMP_UT_FAULT, @@ -1399,11 +1404,13 @@ static const struct pmbus_limit_attr temp_limit_attrs[] = {  static const struct pmbus_limit_attr temp_limit_attrs23[] = {  	{  		.reg = PMBUS_UT_WARN_LIMIT, +		.low = true,  		.attr = "min",  		.alarm = "min_alarm",  		.sbit = PB_TEMP_UT_WARNING,  	}, {  		.reg = PMBUS_UT_FAULT_LIMIT, +		.low = true,  		.attr = "lcrit",  		.alarm = "lcrit_alarm",  		.sbit = PB_TEMP_UT_FAULT, diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c index ace1c731973..d0ddb60155c 100644 --- a/drivers/hwmon/pmbus/ucd9000.c +++ b/drivers/hwmon/pmbus/ucd9000.c @@ -141,13 +141,11 @@ static int ucd9000_probe(struct i2c_client *client,  	block_buffer[ret] = '\0';  	dev_info(&client->dev, "Device ID %s\n", block_buffer); -	mid = NULL; -	for (i = 0; i < ARRAY_SIZE(ucd9000_id); i++) { -		mid = &ucd9000_id[i]; +	for (mid = ucd9000_id; mid->name[0]; mid++) {  		if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))  			break;  	} -	if (!mid || !strlen(mid->name)) { +	if (!mid->name[0]) {  		dev_err(&client->dev, "Unsupported device\n");  		return -ENODEV;  	} diff --git a/drivers/hwmon/pmbus/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c index ffcc1cf3609..c65e9da707c 100644 --- a/drivers/hwmon/pmbus/ucd9200.c +++ b/drivers/hwmon/pmbus/ucd9200.c @@ -68,13 +68,11 @@ static int ucd9200_probe(struct i2c_client *client,  	block_buffer[ret] = '\0';  	dev_info(&client->dev, "Device ID %s\n", block_buffer); -	mid = NULL; -	for (i = 0; i < ARRAY_SIZE(ucd9200_id); i++) { -		mid = &ucd9200_id[i]; +	for (mid = ucd9200_id; mid->name[0]; mid++) {  		if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))  			break;  	} -	if (!mid || !strlen(mid->name)) { +	if (!mid->name[0]) {  		dev_err(&client->dev, "Unsupported device\n");  		return -ENODEV;  	} diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index f2b377c56a3..36d7f270b14 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c @@ -390,7 +390,7 @@ temp_from_reg(u16 reg, s16 regval)  {  	if (is_word_sized(reg))  		return LM75_TEMP_FROM_REG(regval); -	return regval * 1000; +	return ((s8)regval) * 1000;  }  static inline u16 @@ -398,7 +398,8 @@ temp_to_reg(u16 reg, long temp)  {  	if (is_word_sized(reg))  		return LM75_TEMP_TO_REG(temp); -	return DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, -127000, 128000), 1000); +	return (s8)DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, -127000, 128000), +				     1000);  }  /* Some of analog inputs have internal scaling (2x), 8mV is ADC LSB */ @@ -1715,7 +1716,8 @@ static void w83627ehf_device_remove_files(struct device *dev)  }  /* Get the monitoring functions started */ -static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data) +static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data, +						   enum kinds kind)  {  	int i;  	u8 tmp, diode; @@ -1746,10 +1748,16 @@ static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data)  		w83627ehf_write_value(data, W83627EHF_REG_VBAT, tmp | 0x01);  	/* Get thermal sensor types */ -	diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE); +	switch (kind) { +	case w83627ehf: +		diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE); +		break; +	default: +		diode = 0x70; +	}  	for (i = 0; i < 3; i++) {  		if ((tmp & (0x02 << i))) -			data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 2; +			data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 3;  		else  			data->temp_type[i] = 4; /* thermistor */  	} @@ -2016,7 +2024,7 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)  	}  	/* Initialize the chip */ -	w83627ehf_init_device(data); +	w83627ehf_init_device(data, sio_data->kind);  	data->vrm = vid_which_vrm();  	superio_enter(sio_data->sioreg); diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c index 17cf1ab9552..8c2844e5691 100644 --- a/drivers/hwmon/w83791d.c +++ b/drivers/hwmon/w83791d.c @@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_client *client,  			  struct i2c_board_info *info);  static int w83791d_remove(struct i2c_client *client); -static int w83791d_read(struct i2c_client *client, u8 register); -static int w83791d_write(struct i2c_client *client, u8 register, u8 value); +static int w83791d_read(struct i2c_client *client, u8 reg); +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);  static struct w83791d_data *w83791d_update_device(struct device *dev);  #ifdef DEBUG diff --git a/drivers/i2c/busses/i2c-designware.c b/drivers/i2c/busses/i2c-designware.c index b7a51c43b18..1b42b50b599 100644 --- a/drivers/i2c/busses/i2c-designware.c +++ b/drivers/i2c/busses/i2c-designware.c @@ -390,7 +390,7 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)  	int tx_limit, rx_limit;  	u32 addr = msgs[dev->msg_write_idx].addr;  	u32 buf_len = dev->tx_buf_len; -	u8 *buf = dev->tx_buf;; +	u8 *buf = dev->tx_buf;  	intr_mask = DW_IC_INTR_DEFAULT_MASK; diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index 0c731ca69f1..b228e09c5d0 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c @@ -146,6 +146,7 @@ struct i2c_nmk_client {   * @stop: stop condition   * @xfer_complete: acknowledge completion for a I2C message   * @result: controller propogated result + * @regulator: pointer to i2c regulator   * @busy: Busy doing transfer   */  struct nmk_i2c_dev { @@ -417,12 +418,12 @@ static int read_i2c(struct nmk_i2c_dev *dev)  	writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask,  			dev->virtbase + I2C_IMSCR); -	timeout = wait_for_completion_interruptible_timeout( +	timeout = wait_for_completion_timeout(  		&dev->xfer_complete, dev->adap.timeout);  	if (timeout < 0) {  		dev_err(&dev->pdev->dev, -			"wait_for_completion_interruptible_timeout" +			"wait_for_completion_timeout"  			"returned %d waiting for event\n", timeout);  		status = timeout;  	} @@ -504,12 +505,12 @@ static int write_i2c(struct nmk_i2c_dev *dev)  	writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask,  			dev->virtbase + I2C_IMSCR); -	timeout = wait_for_completion_interruptible_timeout( +	timeout = wait_for_completion_timeout(  		&dev->xfer_complete, dev->adap.timeout);  	if (timeout < 0) {  		dev_err(&dev->pdev->dev, -			"wait_for_completion_interruptible_timeout" +			"wait_for_completion_timeout "  			"returned %d waiting for event\n", timeout);  		status = timeout;  	} diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 1a766cf74f6..2dfb6317685 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1139,41 +1139,12 @@ omap_i2c_remove(struct platform_device *pdev)  	return 0;  } -#ifdef CONFIG_SUSPEND -static int omap_i2c_suspend(struct device *dev) -{ -	if (!pm_runtime_suspended(dev)) -		if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) -			dev->bus->pm->runtime_suspend(dev); - -	return 0; -} - -static int omap_i2c_resume(struct device *dev) -{ -	if (!pm_runtime_suspended(dev)) -		if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) -			dev->bus->pm->runtime_resume(dev); - -	return 0; -} - -static struct dev_pm_ops omap_i2c_pm_ops = { -	.suspend = omap_i2c_suspend, -	.resume = omap_i2c_resume, -}; -#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops) -#else -#define OMAP_I2C_PM_OPS NULL -#endif -  static struct platform_driver omap_i2c_driver = {  	.probe		= omap_i2c_probe,  	.remove		= omap_i2c_remove,  	.driver		= {  		.name	= "omap_i2c",  		.owner	= THIS_MODULE, -		.pm	= OMAP_I2C_PM_OPS,  	},  }; diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c index 6659d269b84..b73da6cd6f9 100644 --- a/drivers/i2c/busses/i2c-pxa-pci.c +++ b/drivers/i2c/busses/i2c-pxa-pci.c @@ -109,12 +109,15 @@ static int __devinit ce4100_i2c_probe(struct pci_dev *dev,  		return -EINVAL;  	}  	sds = kzalloc(sizeof(*sds), GFP_KERNEL); -	if (!sds) +	if (!sds) { +		ret = -ENOMEM;  		goto err_mem; +	}  	for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) {  		sds->pdev[i] = add_i2c_device(dev, i);  		if (IS_ERR(sds->pdev[i])) { +			ret = PTR_ERR(sds->pdev[i]);  			while (--i >= 0)  				platform_device_unregister(sds->pdev[i]);  			goto err_dev_add; diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 2440b741197..3c94c4a81a5 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -270,14 +270,30 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)  	/* Rounds down to not include partial word at the end of buf */  	words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD; -	if (words_to_transfer > tx_fifo_avail) -		words_to_transfer = tx_fifo_avail; -	i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer); +	/* It's very common to have < 4 bytes, so optimize that case. */ +	if (words_to_transfer) { +		if (words_to_transfer > tx_fifo_avail) +			words_to_transfer = tx_fifo_avail; -	buf += words_to_transfer * BYTES_PER_FIFO_WORD; -	buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD; -	tx_fifo_avail -= words_to_transfer; +		/* +		 * Update state before writing to FIFO.  If this casues us +		 * to finish writing all bytes (AKA buf_remaining goes to 0) we +		 * have a potential for an interrupt (PACKET_XFER_COMPLETE is +		 * not maskable).  We need to make sure that the isr sees +		 * buf_remaining as 0 and doesn't call us back re-entrantly. +		 */ +		buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD; +		tx_fifo_avail -= words_to_transfer; +		i2c_dev->msg_buf_remaining = buf_remaining; +		i2c_dev->msg_buf = buf + +			words_to_transfer * BYTES_PER_FIFO_WORD; +		barrier(); + +		i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer); + +		buf += words_to_transfer * BYTES_PER_FIFO_WORD; +	}  	/*  	 * If there is a partial word at the end of buf, handle it manually to @@ -287,14 +303,15 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)  	if (tx_fifo_avail > 0 && buf_remaining > 0) {  		BUG_ON(buf_remaining > 3);  		memcpy(&val, buf, buf_remaining); + +		/* Again update before writing to FIFO to make sure isr sees. */ +		i2c_dev->msg_buf_remaining = 0; +		i2c_dev->msg_buf = NULL; +		barrier(); +  		i2c_writel(i2c_dev, val, I2C_TX_FIFO); -		buf_remaining = 0; -		tx_fifo_avail--;  	} -	BUG_ON(tx_fifo_avail > 0 && buf_remaining > 0); -	i2c_dev->msg_buf_remaining = buf_remaining; -	i2c_dev->msg_buf = buf;  	return 0;  } @@ -411,9 +428,10 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)  			tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);  	} -	if ((status & I2C_INT_PACKET_XFER_COMPLETE) && -			!i2c_dev->msg_buf_remaining) +	if (status & I2C_INT_PACKET_XFER_COMPLETE) { +		BUG_ON(i2c_dev->msg_buf_remaining);  		complete(&i2c_dev->msg_complete); +	}  	i2c_writel(i2c_dev, status, I2C_INT_STATUS);  	if (i2c_dev->is_dvc) @@ -531,7 +549,7 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],  static u32 tegra_i2c_func(struct i2c_adapter *adap)  { -	return I2C_FUNC_I2C; +	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;  }  static const struct i2c_algorithm tegra_i2c_algo = { @@ -719,6 +737,17 @@ static int tegra_i2c_resume(struct platform_device *pdev)  }  #endif +#if defined(CONFIG_OF) +/* Match table for of_platform binding */ +static const struct of_device_id tegra_i2c_of_match[] __devinitconst = { +	{ .compatible = "nvidia,tegra20-i2c", }, +	{}, +}; +MODULE_DEVICE_TABLE(of, tegra_i2c_of_match); +#else +#define tegra_i2c_of_match NULL +#endif +  static struct platform_driver tegra_i2c_driver = {  	.probe   = tegra_i2c_probe,  	.remove  = tegra_i2c_remove, @@ -729,6 +758,7 @@ static struct platform_driver tegra_i2c_driver = {  	.driver  = {  		.name  = "tegra-i2c",  		.owner = THIS_MODULE, +		.of_match_table = tegra_i2c_of_match,  	},  }; diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index 9827c5e686c..76b6d98bd29 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig @@ -327,7 +327,7 @@ config BLK_DEV_OPTI621  	select BLK_DEV_IDEPCI  	help  	  This is a driver for the OPTi 82C621 EIDE controller. -	  Please read the comments at the top of <file:drivers/ide/pci/opti621.c>. +	  Please read the comments at the top of <file:drivers/ide/opti621.c>.  config BLK_DEV_RZ1000  	tristate "RZ1000 chipset bugfix/support" @@ -365,7 +365,7 @@ config BLK_DEV_ALI15X3  	  normal dual channel support.  	  Please read the comments at the top of -	  <file:drivers/ide/pci/alim15x3.c>. +	  <file:drivers/ide/alim15x3.c>.  	  If unsure, say N. @@ -528,7 +528,7 @@ config BLK_DEV_NS87415  	  This driver adds detection and support for the NS87415 chip  	  (used mainly on SPARC64 and PA-RISC machines). -	  Please read the comments at the top of <file:drivers/ide/pci/ns87415.c>. +	  Please read the comments at the top of <file:drivers/ide/ns87415.c>.  config BLK_DEV_PDC202XX_OLD  	tristate "PROMISE PDC202{46|62|65|67} support" @@ -547,7 +547,7 @@ config BLK_DEV_PDC202XX_OLD  	  for more than one card.  	  Please read the comments at the top of -	  <file:drivers/ide/pci/pdc202xx_old.c>. +	  <file:drivers/ide/pdc202xx_old.c>.  	  If unsure, say N. @@ -593,7 +593,7 @@ config BLK_DEV_SIS5513  	  ATA100: SiS635, SiS645, SiS650, SiS730, SiS735, SiS740,  	  SiS745, SiS750 -	  Please read the comments at the top of <file:drivers/ide/pci/sis5513.c>. +	  Please read the comments at the top of <file:drivers/ide/sis5513.c>.  config BLK_DEV_SL82C105  	tristate "Winbond SL82c105 support" @@ -616,7 +616,7 @@ config BLK_DEV_SLC90E66  	  look-a-like to the PIIX4 it should be a nice addition.  	  Please read the comments at the top of -	  <file:drivers/ide/pci/slc90e66.c>. +	  <file:drivers/ide/slc90e66.c>.  config BLK_DEV_TRM290  	tristate "Tekram TRM290 chipset support" @@ -625,7 +625,7 @@ config BLK_DEV_TRM290  	  This driver adds support for bus master DMA transfers  	  using the Tekram TRM290 PCI IDE chip. Volunteers are  	  needed for further tweaking and development. -	  Please read the comments at the top of <file:drivers/ide/pci/trm290.c>. +	  Please read the comments at the top of <file:drivers/ide/trm290.c>.  config BLK_DEV_VIA82CXXX  	tristate "VIA82CXXX chipset support" @@ -681,7 +681,7 @@ config BLK_DEV_IDE_AU1XXX         select IDE_XFER_MODE  choice         prompt "IDE Mode for AMD Alchemy Au1200" -       default CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA +       default BLK_DEV_IDE_AU1XXX_PIO_DBDMA         depends on SOC_AU1200 && BLK_DEV_IDE_AU1XXX  config BLK_DEV_IDE_AU1XXX_PIO_DBDMA @@ -836,7 +836,7 @@ config BLK_DEV_ALI14XX  	  of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster  	  I/O speeds to be set as well.  	  See the files <file:Documentation/ide/ide.txt> and -	  <file:drivers/ide/legacy/ali14xx.c> for more info. +	  <file:drivers/ide/ali14xx.c> for more info.  config BLK_DEV_DTC2278  	tristate "DTC-2278 support" @@ -847,7 +847,7 @@ config BLK_DEV_DTC2278  	  boot parameter. It enables support for the secondary IDE interface  	  of the DTC-2278 card, and permits faster I/O speeds to be set as  	  well. See the <file:Documentation/ide/ide.txt> and -	  <file:drivers/ide/legacy/dtc2278.c> files for more info. +	  <file:drivers/ide/dtc2278.c> files for more info.  config BLK_DEV_HT6560B  	tristate "Holtek HT6560B support" @@ -858,7 +858,7 @@ config BLK_DEV_HT6560B  	  boot parameter. It enables support for the secondary IDE interface  	  of the Holtek card, and permits faster I/O speeds to be set as well.  	  See the <file:Documentation/ide/ide.txt> and -	  <file:drivers/ide/legacy/ht6560b.c> files for more info. +	  <file:drivers/ide/ht6560b.c> files for more info.  config BLK_DEV_QD65XX  	tristate "QDI QD65xx support" @@ -867,7 +867,7 @@ config BLK_DEV_QD65XX  	help  	  This driver is enabled at runtime using the "qd65xx.probe" kernel  	  boot parameter.  It permits faster I/O speeds to be set.  See the -	  <file:Documentation/ide/ide.txt> and <file:drivers/ide/legacy/qd65xx.c> +	  <file:Documentation/ide/ide.txt> and <file:drivers/ide/qd65xx.c>  	  for more info.  config BLK_DEV_UMC8672 @@ -879,7 +879,7 @@ config BLK_DEV_UMC8672  	  boot parameter. It enables support for the secondary IDE interface  	  of the UMC-8672, and permits faster I/O speeds to be set as well.  	  See the files <file:Documentation/ide/ide.txt> and -	  <file:drivers/ide/legacy/umc8672.c> for more info. +	  <file:drivers/ide/umc8672.c> for more info.  endif diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 274798068a5..16f69be820c 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -435,7 +435,12 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq)  	if (!(rq->cmd_flags & REQ_FLUSH))  		return BLKPREP_OK; -	cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); +	if (rq->special) { +		cmd = rq->special; +		memset(cmd, 0, sizeof(*cmd)); +	} else { +		cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); +	}  	/* FIXME: map struct ide_taskfile on rq->cmd[] */  	BUG_ON(cmd == NULL); diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 17bf9d95463..6cd642aaa4d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -287,7 +287,7 @@ void __free_ep(struct kref *kref)  	if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {  		cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);  		dst_release(ep->dst); -		l2t_release(L2DATA(ep->com.tdev), ep->l2t); +		l2t_release(ep->com.tdev, ep->l2t);  	}  	kfree(ep);  } @@ -1178,7 +1178,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)  		release_tid(ep->com.tdev, GET_TID(rpl), NULL);  	cxgb3_free_atid(ep->com.tdev, ep->atid);  	dst_release(ep->dst); -	l2t_release(L2DATA(ep->com.tdev), ep->l2t); +	l2t_release(ep->com.tdev, ep->l2t);  	put_ep(&ep->com);  	return CPL_RET_BUF_DONE;  } @@ -1377,7 +1377,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)  	if (!child_ep) {  		printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",  		       __func__); -		l2t_release(L2DATA(tdev), l2t); +		l2t_release(tdev, l2t);  		dst_release(dst);  		goto reject;  	} @@ -1956,7 +1956,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)  	if (!err)  		goto out; -	l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t); +	l2t_release(h->rdev.t3cdev_p, ep->l2t);  fail4:  	dst_release(ep->dst);  fail3: @@ -2127,7 +2127,7 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,  	PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,  	     l2t);  	dst_hold(new); -	l2t_release(L2DATA(ep->com.tdev), ep->l2t); +	l2t_release(ep->com.tdev, ep->l2t);  	ep->l2t = l2t;  	dst_release(old);  	ep->dst = new; diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c index 9882971827e..358cd7ee905 100644 --- a/drivers/input/joystick/analog.c +++ b/drivers/input/joystick/analog.c @@ -139,7 +139,7 @@ struct analog_port {  #include <linux/i8253.h>  #define GET_TIME(x)	do { if (cpu_has_tsc) rdtscl(x); else x = get_time_pit(); } while (0) -#define DELTA(x,y)	(cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? CLOCK_TICK_RATE / HZ : 0))) +#define DELTA(x,y)	(cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0)))  #define TIME_NAME	(cpu_has_tsc?"TSC":"PIT")  static unsigned int get_time_pit(void)  { diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c index 7b404e5443e..e34eeb8ae37 100644 --- a/drivers/input/keyboard/adp5588-keys.c +++ b/drivers/input/keyboard/adp5588-keys.c @@ -668,4 +668,3 @@ module_exit(adp5588_exit);  MODULE_LICENSE("GPL");  MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");  MODULE_DESCRIPTION("ADP5588/87 Keypad driver"); -MODULE_ALIAS("platform:adp5588-keys"); diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c index c8242dd190d..aa17e024d80 100644 --- a/drivers/input/keyboard/ep93xx_keypad.c +++ b/drivers/input/keyboard/ep93xx_keypad.c @@ -20,6 +20,7 @@   * flag.   */ +#include <linux/module.h>  #include <linux/platform_device.h>  #include <linux/interrupt.h>  #include <linux/clk.h> diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c index f270447ba95..a5a77915c65 100644 --- a/drivers/input/keyboard/tegra-kbc.c +++ b/drivers/input/keyboard/tegra-kbc.c @@ -702,7 +702,7 @@ err_iounmap:  err_free_mem_region:  	release_mem_region(res->start, resource_size(res));  err_free_mem: -	input_free_device(kbc->idev); +	input_free_device(input_dev);  	kfree(kbc);  	return err; diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c index e21deb1baa8..025417d74ca 100644 --- a/drivers/input/misc/ad714x-i2c.c +++ b/drivers/input/misc/ad714x-i2c.c @@ -1,7 +1,7 @@  /*   * AD714X CapTouch Programmable Controller driver (I2C bus)   * - * Copyright 2009 Analog Devices Inc. + * Copyright 2009-2011 Analog Devices Inc.   *   * Licensed under the GPL-2 or later.   */ @@ -27,54 +27,49 @@ static int ad714x_i2c_resume(struct device *dev)  static SIMPLE_DEV_PM_OPS(ad714x_i2c_pm, ad714x_i2c_suspend, ad714x_i2c_resume); -static int ad714x_i2c_write(struct device *dev, unsigned short reg, -				unsigned short data) +static int ad714x_i2c_write(struct ad714x_chip *chip, +			    unsigned short reg, unsigned short data)  { -	struct i2c_client *client = to_i2c_client(dev); -	int ret = 0; -	u8 *_reg = (u8 *)® -	u8 *_data = (u8 *)&data; +	struct i2c_client *client = to_i2c_client(chip->dev); +	int error; -	u8 tx[4] = { -		_reg[1], -		_reg[0], -		_data[1], -		_data[0] -	}; +	chip->xfer_buf[0] = cpu_to_be16(reg); +	chip->xfer_buf[1] = cpu_to_be16(data); -	ret = i2c_master_send(client, tx, 4); -	if (ret < 0) -		dev_err(&client->dev, "I2C write error\n"); +	error = i2c_master_send(client, (u8 *)chip->xfer_buf, +				2 * sizeof(*chip->xfer_buf)); +	if (unlikely(error < 0)) { +		dev_err(&client->dev, "I2C write error: %d\n", error); +		return error; +	} -	return ret; +	return 0;  } -static int ad714x_i2c_read(struct device *dev, unsigned short reg, -				unsigned short *data) +static int ad714x_i2c_read(struct ad714x_chip *chip, +			   unsigned short reg, unsigned short *data, size_t len)  { -	struct i2c_client *client = to_i2c_client(dev); -	int ret = 0; -	u8 *_reg = (u8 *)® -	u8 *_data = (u8 *)data; +	struct i2c_client *client = to_i2c_client(chip->dev); +	int i; +	int error; -	u8 tx[2] = { -		_reg[1], -		_reg[0] -	}; -	u8 rx[2]; +	chip->xfer_buf[0] = cpu_to_be16(reg); -	ret = i2c_master_send(client, tx, 2); -	if (ret >= 0) -		ret = i2c_master_recv(client, rx, 2); +	error = i2c_master_send(client, (u8 *)chip->xfer_buf, +				sizeof(*chip->xfer_buf)); +	if (error >= 0) +		error = i2c_master_recv(client, (u8 *)chip->xfer_buf, +					len * sizeof(*chip->xfer_buf)); -	if (unlikely(ret < 0)) { -		dev_err(&client->dev, "I2C read error\n"); -	} else { -		_data[0] = rx[1]; -		_data[1] = rx[0]; +	if (unlikely(error < 0)) { +		dev_err(&client->dev, "I2C read error: %d\n", error); +		return error;  	} -	return ret; +	for (i = 0; i < len; i++) +		data[i] = be16_to_cpu(chip->xfer_buf[i]); + +	return 0;  }  static int __devinit ad714x_i2c_probe(struct i2c_client *client, diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c index 4120dd54930..875b5081136 100644 --- a/drivers/input/misc/ad714x-spi.c +++ b/drivers/input/misc/ad714x-spi.c @@ -1,12 +1,12 @@  /*   * AD714X CapTouch Programmable Controller driver (SPI bus)   * - * Copyright 2009 Analog Devices Inc. + * Copyright 2009-2011 Analog Devices Inc.   *   * Licensed under the GPL-2 or later.   */ -#include <linux/input.h>	/* BUS_I2C */ +#include <linux/input.h>	/* BUS_SPI */  #include <linux/module.h>  #include <linux/spi/spi.h>  #include <linux/pm.h> @@ -30,30 +30,68 @@ static int ad714x_spi_resume(struct device *dev)  static SIMPLE_DEV_PM_OPS(ad714x_spi_pm, ad714x_spi_suspend, ad714x_spi_resume); -static int ad714x_spi_read(struct device *dev, unsigned short reg, -		unsigned short *data) +static int ad714x_spi_read(struct ad714x_chip *chip, +			   unsigned short reg, unsigned short *data, size_t len)  { -	struct spi_device *spi = to_spi_device(dev); -	unsigned short tx = AD714x_SPI_CMD_PREFIX | AD714x_SPI_READ | reg; +	struct spi_device *spi = to_spi_device(chip->dev); +	struct spi_message message; +	struct spi_transfer xfer[2]; +	int i; +	int error; -	return spi_write_then_read(spi, (u8 *)&tx, 2, (u8 *)data, 2); +	spi_message_init(&message); +	memset(xfer, 0, sizeof(xfer)); + +	chip->xfer_buf[0] = cpu_to_be16(AD714x_SPI_CMD_PREFIX | +					AD714x_SPI_READ | reg); +	xfer[0].tx_buf = &chip->xfer_buf[0]; +	xfer[0].len = sizeof(chip->xfer_buf[0]); +	spi_message_add_tail(&xfer[0], &message); + +	xfer[1].rx_buf = &chip->xfer_buf[1]; +	xfer[1].len = sizeof(chip->xfer_buf[1]) * len; +	spi_message_add_tail(&xfer[1], &message); + +	error = spi_sync(spi, &message); +	if (unlikely(error)) { +		dev_err(chip->dev, "SPI read error: %d\n", error); +		return error; +	} + +	for (i = 0; i < len; i++) +		data[i] = be16_to_cpu(chip->xfer_buf[i + 1]); + +	return 0;  } -static int ad714x_spi_write(struct device *dev, unsigned short reg, -		unsigned short data) +static int ad714x_spi_write(struct ad714x_chip *chip, +			    unsigned short reg, unsigned short data)  { -	struct spi_device *spi = to_spi_device(dev); -	unsigned short tx[2] = { -		AD714x_SPI_CMD_PREFIX | reg, -		data -	}; +	struct spi_device *spi = to_spi_device(chip->dev); +	int error; + +	chip->xfer_buf[0] = cpu_to_be16(AD714x_SPI_CMD_PREFIX | reg); +	chip->xfer_buf[1] = cpu_to_be16(data); + +	error = spi_write(spi, (u8 *)chip->xfer_buf, +			  2 * sizeof(*chip->xfer_buf)); +	if (unlikely(error)) { +		dev_err(chip->dev, "SPI write error: %d\n", error); +		return error; +	} -	return spi_write(spi, (u8 *)tx, 4); +	return 0;  }  static int __devinit ad714x_spi_probe(struct spi_device *spi)  {  	struct ad714x_chip *chip; +	int err; + +	spi->bits_per_word = 8; +	err = spi_setup(spi); +	if (err < 0) +		return err;  	chip = ad714x_probe(&spi->dev, BUS_SPI, spi->irq,  			    ad714x_spi_read, ad714x_spi_write); diff --git a/drivers/input/misc/ad714x.c b/drivers/input/misc/ad714x.c index c3a62c42cd2..ca42c7d2a3c 100644 --- a/drivers/input/misc/ad714x.c +++ b/drivers/input/misc/ad714x.c @@ -1,7 +1,7 @@  /*   * AD714X CapTouch Programmable Controller driver supporting AD7142/3/7/8/7A   * - * Copyright 2009 Analog Devices Inc. + * Copyright 2009-2011 Analog Devices Inc.   *   * Licensed under the GPL-2 or later.   */ @@ -59,7 +59,6 @@  #define STAGE11_AMBIENT		0x27D  #define PER_STAGE_REG_NUM      36 -#define STAGE_NUM              12  #define STAGE_CFGREG_NUM       8  #define SYS_CFGREG_NUM         8 @@ -124,27 +123,6 @@ struct ad714x_driver_data {   * information to integrate all things which will be private data   * of spi/i2c device   */ -struct ad714x_chip { -	unsigned short h_state; -	unsigned short l_state; -	unsigned short c_state; -	unsigned short adc_reg[STAGE_NUM]; -	unsigned short amb_reg[STAGE_NUM]; -	unsigned short sensor_val[STAGE_NUM]; - -	struct ad714x_platform_data *hw; -	struct ad714x_driver_data *sw; - -	int irq; -	struct device *dev; -	ad714x_read_t read; -	ad714x_write_t write; - -	struct mutex mutex; - -	unsigned product; -	unsigned version; -};  static void ad714x_use_com_int(struct ad714x_chip *ad714x,  				int start_stage, int end_stage) @@ -154,13 +132,13 @@ static void ad714x_use_com_int(struct ad714x_chip *ad714x,  	mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1); -	ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data); +	ad714x->read(ad714x, STG_COM_INT_EN_REG, &data, 1);  	data |= 1 << end_stage; -	ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data); +	ad714x->write(ad714x, STG_COM_INT_EN_REG, data); -	ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data); +	ad714x->read(ad714x, STG_HIGH_INT_EN_REG, &data, 1);  	data &= ~mask; -	ad714x->write(ad714x->dev, STG_HIGH_INT_EN_REG, data); +	ad714x->write(ad714x, STG_HIGH_INT_EN_REG, data);  }  static void ad714x_use_thr_int(struct ad714x_chip *ad714x, @@ -171,13 +149,13 @@ static void ad714x_use_thr_int(struct ad714x_chip *ad714x,  	mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1); -	ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data); +	ad714x->read(ad714x, STG_COM_INT_EN_REG, &data, 1);  	data &= ~(1 << end_stage); -	ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data); +	ad714x->write(ad714x, STG_COM_INT_EN_REG, data); -	ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data); +	ad714x->read(ad714x, STG_HIGH_INT_EN_REG, &data, 1);  	data |= mask; -	ad714x->write(ad714x->dev, STG_HIGH_INT_EN_REG, data); +	ad714x->write(ad714x, STG_HIGH_INT_EN_REG, data);  }  static int ad714x_cal_highest_stage(struct ad714x_chip *ad714x, @@ -273,15 +251,16 @@ static void ad714x_slider_cal_sensor_val(struct ad714x_chip *ad714x, int idx)  	struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx];  	int i; +	ad714x->read(ad714x, CDC_RESULT_S0 + hw->start_stage, +			&ad714x->adc_reg[hw->start_stage], +			hw->end_stage - hw->start_stage + 1); +  	for (i = hw->start_stage; i <= hw->end_stage; i++) { -		ad714x->read(ad714x->dev, CDC_RESULT_S0 + i, -			&ad714x->adc_reg[i]); -		ad714x->read(ad714x->dev, -				STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, -				&ad714x->amb_reg[i]); +		ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, +				&ad714x->amb_reg[i], 1); -		ad714x->sensor_val[i] = abs(ad714x->adc_reg[i] - -				ad714x->amb_reg[i]); +		ad714x->sensor_val[i] = +			abs(ad714x->adc_reg[i] - ad714x->amb_reg[i]);  	}  } @@ -444,15 +423,16 @@ static void ad714x_wheel_cal_sensor_val(struct ad714x_chip *ad714x, int idx)  	struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];  	int i; +	ad714x->read(ad714x, CDC_RESULT_S0 + hw->start_stage, +			&ad714x->adc_reg[hw->start_stage], +			hw->end_stage - hw->start_stage + 1); +  	for (i = hw->start_stage; i <= hw->end_stage; i++) { -		ad714x->read(ad714x->dev, CDC_RESULT_S0 + i, -			&ad714x->adc_reg[i]); -		ad714x->read(ad714x->dev, -				STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, -				&ad714x->amb_reg[i]); +		ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, +				&ad714x->amb_reg[i], 1);  		if (ad714x->adc_reg[i] > ad714x->amb_reg[i]) -			ad714x->sensor_val[i] = ad714x->adc_reg[i] - -				ad714x->amb_reg[i]; +			ad714x->sensor_val[i] = +				ad714x->adc_reg[i] - ad714x->amb_reg[i];  		else  			ad714x->sensor_val[i] = 0;  	} @@ -597,15 +577,16 @@ static void touchpad_cal_sensor_val(struct ad714x_chip *ad714x, int idx)  	struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];  	int i; +	ad714x->read(ad714x, CDC_RESULT_S0 + hw->x_start_stage, +			&ad714x->adc_reg[hw->x_start_stage], +			hw->x_end_stage - hw->x_start_stage + 1); +  	for (i = hw->x_start_stage; i <= hw->x_end_stage; i++) { -		ad714x->read(ad714x->dev, CDC_RESULT_S0 + i, -				&ad714x->adc_reg[i]); -		ad714x->read(ad714x->dev, -				STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, -				&ad714x->amb_reg[i]); +		ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, +				&ad714x->amb_reg[i], 1);  		if (ad714x->adc_reg[i] > ad714x->amb_reg[i]) -			ad714x->sensor_val[i] = ad714x->adc_reg[i] - -				ad714x->amb_reg[i]; +			ad714x->sensor_val[i] = +				ad714x->adc_reg[i] - ad714x->amb_reg[i];  		else  			ad714x->sensor_val[i] = 0;  	} @@ -891,7 +872,7 @@ static int ad714x_hw_detect(struct ad714x_chip *ad714x)  {  	unsigned short data; -	ad714x->read(ad714x->dev, AD714X_PARTID_REG, &data); +	ad714x->read(ad714x, AD714X_PARTID_REG, &data, 1);  	switch (data & 0xFFF0) {  	case AD7142_PARTID:  		ad714x->product = 0x7142; @@ -940,23 +921,20 @@ static void ad714x_hw_init(struct ad714x_chip *ad714x)  	for (i = 0; i < STAGE_NUM; i++) {  		reg_base = AD714X_STAGECFG_REG + i * STAGE_CFGREG_NUM;  		for (j = 0; j < STAGE_CFGREG_NUM; j++) -			ad714x->write(ad714x->dev, reg_base + j, +			ad714x->write(ad714x, reg_base + j,  					ad714x->hw->stage_cfg_reg[i][j]);  	}  	for (i = 0; i < SYS_CFGREG_NUM; i++) -		ad714x->write(ad714x->dev, AD714X_SYSCFG_REG + i, +		ad714x->write(ad714x, AD714X_SYSCFG_REG + i,  			ad714x->hw->sys_cfg_reg[i]);  	for (i = 0; i < SYS_CFGREG_NUM; i++) -		ad714x->read(ad714x->dev, AD714X_SYSCFG_REG + i, -			&data); +		ad714x->read(ad714x, AD714X_SYSCFG_REG + i, &data, 1); -	ad714x->write(ad714x->dev, AD714X_STG_CAL_EN_REG, 0xFFF); +	ad714x->write(ad714x, AD714X_STG_CAL_EN_REG, 0xFFF);  	/* clear all interrupts */ -	ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &data); -	ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &data); -	ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &data); +	ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3);  }  static irqreturn_t ad714x_interrupt_thread(int irq, void *data) @@ -966,9 +944,7 @@ static irqreturn_t ad714x_interrupt_thread(int irq, void *data)  	mutex_lock(&ad714x->mutex); -	ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &ad714x->l_state); -	ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &ad714x->h_state); -	ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &ad714x->c_state); +	ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3);  	for (i = 0; i < ad714x->hw->button_num; i++)  		ad714x_button_state_machine(ad714x, i); @@ -1245,7 +1221,7 @@ int ad714x_disable(struct ad714x_chip *ad714x)  	mutex_lock(&ad714x->mutex);  	data = ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL] | 0x3; -	ad714x->write(ad714x->dev, AD714X_PWR_CTRL, data); +	ad714x->write(ad714x, AD714X_PWR_CTRL, data);  	mutex_unlock(&ad714x->mutex); @@ -1255,24 +1231,20 @@ EXPORT_SYMBOL(ad714x_disable);  int ad714x_enable(struct ad714x_chip *ad714x)  { -	unsigned short data; -  	dev_dbg(ad714x->dev, "%s enter\n", __func__);  	mutex_lock(&ad714x->mutex);  	/* resume to non-shutdown mode */ -	ad714x->write(ad714x->dev, AD714X_PWR_CTRL, +	ad714x->write(ad714x, AD714X_PWR_CTRL,  			ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL]);  	/* make sure the interrupt output line is not low level after resume,  	 * otherwise we will get no chance to enter falling-edge irq again  	 */ -	ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &data); -	ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &data); -	ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &data); +	ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3);  	mutex_unlock(&ad714x->mutex); diff --git a/drivers/input/misc/ad714x.h b/drivers/input/misc/ad714x.h index 45c54fb13f0..3c85455aa66 100644 --- a/drivers/input/misc/ad714x.h +++ b/drivers/input/misc/ad714x.h @@ -1,7 +1,7 @@  /*   * AD714X CapTouch Programmable Controller driver (bus interfaces)   * - * Copyright 2009 Analog Devices Inc. + * Copyright 2009-2011 Analog Devices Inc.   *   * Licensed under the GPL-2 or later.   */ @@ -11,11 +11,40 @@  #include <linux/types.h> +#define STAGE_NUM              12 +  struct device; +struct ad714x_platform_data; +struct ad714x_driver_data;  struct ad714x_chip; -typedef int (*ad714x_read_t)(struct device *, unsigned short, unsigned short *); -typedef int (*ad714x_write_t)(struct device *, unsigned short, unsigned short); +typedef int (*ad714x_read_t)(struct ad714x_chip *, unsigned short, unsigned short *, size_t); +typedef int (*ad714x_write_t)(struct ad714x_chip *, unsigned short, unsigned short); + +struct ad714x_chip { +	unsigned short l_state; +	unsigned short h_state; +	unsigned short c_state; +	unsigned short adc_reg[STAGE_NUM]; +	unsigned short amb_reg[STAGE_NUM]; +	unsigned short sensor_val[STAGE_NUM]; + +	struct ad714x_platform_data *hw; +	struct ad714x_driver_data *sw; + +	int irq; +	struct device *dev; +	ad714x_read_t read; +	ad714x_write_t write; + +	struct mutex mutex; + +	unsigned product; +	unsigned version; + +	__be16 xfer_buf[16] ____cacheline_aligned; + +};  int ad714x_disable(struct ad714x_chip *ad714x);  int ad714x_enable(struct ad714x_chip *ad714x); diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c index b09c7d12721..ab860511f01 100644 --- a/drivers/input/misc/cm109.c +++ b/drivers/input/misc/cm109.c @@ -475,7 +475,7 @@ static void cm109_toggle_buzzer_sync(struct cm109_dev *dev, int on)  				le16_to_cpu(dev->ctl_req->wIndex),  				dev->ctl_data,  				USB_PKT_LEN, USB_CTRL_SET_TIMEOUT); -	if (error && error != EINTR) +	if (error < 0 && error != -EINTR)  		err("%s: usb_control_msg() failed %d", __func__, error);  } diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c index 6c76cf79299..0794778295f 100644 --- a/drivers/input/misc/mma8450.c +++ b/drivers/input/misc/mma8450.c @@ -234,7 +234,7 @@ static const struct of_device_id mma8450_dt_ids[] = {  	{ .compatible = "fsl,mma8450", },  	{ /* sentinel */ }  }; -MODULE_DEVICE_TABLE(i2c, mma8450_dt_ids); +MODULE_DEVICE_TABLE(of, mma8450_dt_ids);  static struct i2c_driver mma8450_driver = {  	.driver = { diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c index b95fac15b2e..f71dc728da5 100644 --- a/drivers/input/misc/mpu3050.c +++ b/drivers/input/misc/mpu3050.c @@ -282,7 +282,7 @@ err_free_irq:  err_pm_set_suspended:  	pm_runtime_set_suspended(&client->dev);  err_free_mem: -	input_unregister_device(idev); +	input_free_device(idev);  	kfree(sensor);  	return error;  } diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c index 2c8b84dd9da..2be21694fac 100644 --- a/drivers/input/misc/rotary_encoder.c +++ b/drivers/input/misc/rotary_encoder.c @@ -7,7 +7,7 @@   * state machine code inspired by code from Tim Ruetz   *   * A generic driver for rotary encoders connected to GPIO lines. - * See file:Documentation/input/rotary_encoder.txt for more information + * See file:Documentation/input/rotary-encoder.txt for more information   *   * This program is free software; you can redistribute it and/or modify   * it under the terms of the GNU General Public License version 2 as diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index 3126983c004..5ec617e28f7 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c @@ -67,6 +67,18 @@  #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI	0x0245  #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO	0x0246  #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS	0x0247 +/* MacbookAir4,1 (unibody, July 2011) */ +#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI	0x0249 +#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO	0x024a +#define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS	0x024b +/* MacbookAir4,2 (unibody, July 2011) */ +#define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI	0x024c +#define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO	0x024d +#define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS	0x024e +/* Macbook8,2 (unibody) */ +#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI	0x0252 +#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO	0x0253 +#define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS	0x0254  #define BCM5974_DEVICE(prod) {					\  	.match_flags = (USB_DEVICE_ID_MATCH_DEVICE |		\ @@ -104,6 +116,18 @@ static const struct usb_device_id bcm5974_table[] = {  	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI),  	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO),  	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), +	/* MacbookAir4,1 */ +	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI), +	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO), +	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS), +	/* MacbookAir4,2 */ +	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI), +	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ISO), +	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_JIS), +	/* MacbookPro8,2 */ +	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI), +	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO), +	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),  	/* Terminating entry */  	{}  }; @@ -294,6 +318,42 @@ static const struct bcm5974_config bcm5974_config_table[] = {  		{ DIM_X, DIM_X / SN_COORD, -4415, 5050 },  		{ DIM_Y, DIM_Y / SN_COORD, -55, 6680 }  	}, +	{ +		USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI, +		USB_DEVICE_ID_APPLE_WELLSPRING6_ISO, +		USB_DEVICE_ID_APPLE_WELLSPRING6_JIS, +		HAS_INTEGRATED_BUTTON, +		0x84, sizeof(struct bt_data), +		0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, +		{ DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, +		{ DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, +		{ DIM_X, DIM_X / SN_COORD, -4620, 5140 }, +		{ DIM_Y, DIM_Y / SN_COORD, -150, 6600 } +	}, +	{ +		USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI, +		USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO, +		USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS, +		HAS_INTEGRATED_BUTTON, +		0x84, sizeof(struct bt_data), +		0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, +		{ DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, +		{ DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, +		{ DIM_X, DIM_X / SN_COORD, -4750, 5280 }, +		{ DIM_Y, DIM_Y / SN_COORD, -150, 6730 } +	}, +	{ +		USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI, +		USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO, +		USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS, +		HAS_INTEGRATED_BUTTON, +		0x84, sizeof(struct bt_data), +		0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, +		{ DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, +		{ DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, +		{ DIM_X, DIM_X / SN_COORD, -4620, 5140 }, +		{ DIM_Y, DIM_Y / SN_COORD, -150, 6600 } +	},  	{}  }; diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c index 449c0a46dba..958b4eb6369 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/input/tablet/wacom_sys.c @@ -49,6 +49,7 @@ struct hid_descriptor {  #define USB_REQ_GET_REPORT	0x01  #define USB_REQ_SET_REPORT	0x09  #define WAC_HID_FEATURE_REPORT	0x03 +#define WAC_MSG_RETRIES		5  static int usb_get_report(struct usb_interface *intf, unsigned char type,  				unsigned char id, void *buf, int size) @@ -165,7 +166,7 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi  			report,  			hid_desc->wDescriptorLength,  			5000); /* 5 secs */ -	} while (result < 0 && limit++ < 5); +	} while (result < 0 && limit++ < WAC_MSG_RETRIES);  	/* No need to parse the Descriptor. It isn't an error though */  	if (result < 0) @@ -228,13 +229,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi  							get_unaligned_le16(&report[i + 3]);  						i += 4;  					} -				} else if (usage == WCM_DIGITIZER) { -					/* max pressure isn't reported -					features->pressure_max = (unsigned short) -							(report[i+4] << 8  | report[i + 3]); -					*/ -					features->pressure_max = 255; -					i += 4;  				}  				break; @@ -290,13 +284,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi  				pen = 1;  				i++;  				break; - -			case HID_USAGE_UNDEFINED: -				if (usage == WCM_DESKTOP && finger) /* capacity */ -					features->pressure_max = -						get_unaligned_le16(&report[i + 3]); -				i += 4; -				break;  			}  			break; @@ -319,24 +306,26 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat  	int limit = 0, report_id = 2;  	int error = -ENOMEM; -	rep_data = kmalloc(2, GFP_KERNEL); +	rep_data = kmalloc(4, GFP_KERNEL);  	if (!rep_data)  		return error; -	/* ask to report tablet data if it is 2FGT Tablet PC or +	/* ask to report tablet data if it is MT Tablet PC or  	 * not a Tablet PC */  	if (features->type == TABLETPC2FG) {  		do {  			rep_data[0] = 3;  			rep_data[1] = 4; +			rep_data[2] = 0; +			rep_data[3] = 0;  			report_id = 3;  			error = usb_set_report(intf, WAC_HID_FEATURE_REPORT, -				report_id, rep_data, 2); +				report_id, rep_data, 4);  			if (error >= 0)  				error = usb_get_report(intf,  					WAC_HID_FEATURE_REPORT, report_id, -					rep_data, 3); -		} while ((error < 0 || rep_data[1] != 4) && limit++ < 5); +					rep_data, 4); +		} while ((error < 0 || rep_data[1] != 4) && limit++ < WAC_MSG_RETRIES);  	} else if (features->type != TABLETPC) {  		do {  			rep_data[0] = 2; @@ -347,7 +336,7 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat  				error = usb_get_report(intf,  					WAC_HID_FEATURE_REPORT, report_id,  					rep_data, 2); -		} while ((error < 0 || rep_data[1] != 2) && limit++ < 5); +		} while ((error < 0 || rep_data[1] != 2) && limit++ < WAC_MSG_RETRIES);  	}  	kfree(rep_data); diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 03ebcc8b24b..9dea71849f4 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -800,25 +800,26 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)  	int i;  	for (i = 0; i < 2; i++) { -		int p = data[9 * i + 2]; -		bool touch = p && !wacom->shared->stylus_in_proximity; +		int offset = (data[1] & 0x80) ? (8 * i) : (9 * i); +		bool touch = data[offset + 3] & 0x80; -		input_mt_slot(input, i); -		input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);  		/*  		 * Touch events need to be disabled while stylus is  		 * in proximity because user's hand is resting on touchpad  		 * and sending unwanted events.  User expects tablet buttons  		 * to continue working though.  		 */ +		touch = touch && !wacom->shared->stylus_in_proximity; + +		input_mt_slot(input, i); +		input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);  		if (touch) { -			int x = get_unaligned_be16(&data[9 * i + 3]) & 0x7ff; -			int y = get_unaligned_be16(&data[9 * i + 5]) & 0x7ff; +			int x = get_unaligned_be16(&data[offset + 3]) & 0x7ff; +			int y = get_unaligned_be16(&data[offset + 5]) & 0x7ff;  			if (features->quirks & WACOM_QUIRK_BBTOUCH_LOWRES) {  				x <<= 5;  				y <<= 5;  			} -			input_report_abs(input, ABS_MT_PRESSURE, p);  			input_report_abs(input, ABS_MT_POSITION_X, x);  			input_report_abs(input, ABS_MT_POSITION_Y, y);  		} @@ -1056,10 +1057,11 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,  			     features->x_fuzz, 0);  	input_set_abs_params(input_dev, ABS_Y, 0, features->y_max,  			     features->y_fuzz, 0); -	input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, -			     features->pressure_fuzz, 0);  	if (features->device_type == BTN_TOOL_PEN) { +		input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, +			     features->pressure_fuzz, 0); +  		/* penabled devices have fixed resolution for each model */  		input_abs_set_res(input_dev, ABS_X, features->x_resolution);  		input_abs_set_res(input_dev, ABS_Y, features->y_resolution); @@ -1098,6 +1100,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,  		__set_bit(BTN_TOOL_MOUSE, input_dev->keybit);  		__set_bit(BTN_STYLUS, input_dev->keybit);  		__set_bit(BTN_STYLUS2, input_dev->keybit); + +		__set_bit(INPUT_PROP_POINTER, input_dev->propbit);  		break;  	case WACOM_21UX2: @@ -1120,12 +1124,12 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,  		for (i = 0; i < 8; i++)  			__set_bit(BTN_0 + i, input_dev->keybit); -		if (wacom_wac->features.type != WACOM_21UX2) { -			input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); -			input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); -		} - +		input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); +		input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);  		input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); + +		__set_bit(INPUT_PROP_DIRECT, input_dev->propbit); +  		wacom_setup_cintiq(wacom_wac);  		break; @@ -1150,6 +1154,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,  		/* fall through */  	case INTUOS: +		__set_bit(INPUT_PROP_POINTER, input_dev->propbit); +  		wacom_setup_intuos(wacom_wac);  		break; @@ -1165,6 +1171,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,  		input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);  		wacom_setup_intuos(wacom_wac); + +		__set_bit(INPUT_PROP_POINTER, input_dev->propbit);  		break;  	case TABLETPC2FG: @@ -1183,26 +1191,40 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,  	case TABLETPC:  		__clear_bit(ABS_MISC, input_dev->absbit); +		__set_bit(INPUT_PROP_DIRECT, input_dev->propbit); +  		if (features->device_type != BTN_TOOL_PEN)  			break;  /* no need to process stylus stuff */  		/* fall through */  	case PL: -	case PTU:  	case DTU:  		__set_bit(BTN_TOOL_PEN, input_dev->keybit); +		__set_bit(BTN_TOOL_RUBBER, input_dev->keybit);  		__set_bit(BTN_STYLUS, input_dev->keybit);  		__set_bit(BTN_STYLUS2, input_dev->keybit); + +		__set_bit(INPUT_PROP_DIRECT, input_dev->propbit); +		break; + +	case PTU: +		__set_bit(BTN_STYLUS2, input_dev->keybit);  		/* fall through */  	case PENPARTNER: +		__set_bit(BTN_TOOL_PEN, input_dev->keybit);  		__set_bit(BTN_TOOL_RUBBER, input_dev->keybit); +		__set_bit(BTN_STYLUS, input_dev->keybit); + +		__set_bit(INPUT_PROP_POINTER, input_dev->propbit);  		break;  	case BAMBOO_PT:  		__clear_bit(ABS_MISC, input_dev->absbit); +		__set_bit(INPUT_PROP_POINTER, input_dev->propbit); +  		if (features->device_type == BTN_TOOL_DOUBLETAP) {  			__set_bit(BTN_LEFT, input_dev->keybit);  			__set_bit(BTN_FORWARD, input_dev->keybit); @@ -1460,6 +1482,9 @@ static const struct wacom_features wacom_features_0xD3 =  static const struct wacom_features wacom_features_0xD4 =  	{ "Wacom Bamboo Pen",     WACOM_PKGLEN_BBFUN,     14720,  9200, 1023,  	  63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; +static const struct wacom_features wacom_features_0xD5 = +	{ "Wacom Bamboo Pen 6x8",     WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, +	  63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };  static const struct wacom_features wacom_features_0xD6 =  	{ "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN,   14720,  9200, 1023,  	  63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; @@ -1564,6 +1589,7 @@ const struct usb_device_id wacom_ids[] = {  	{ USB_DEVICE_WACOM(0xD2) },  	{ USB_DEVICE_WACOM(0xD3) },  	{ USB_DEVICE_WACOM(0xD4) }, +	{ USB_DEVICE_WACOM(0xD5) },  	{ USB_DEVICE_WACOM(0xD6) },  	{ USB_DEVICE_WACOM(0xD7) },  	{ USB_DEVICE_WACOM(0xD8) }, diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index ae00604a6a8..f5d66859f23 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -244,6 +244,7 @@ struct mxt_finger {  	int x;  	int y;  	int area; +	int pressure;  };  /* Each client has this additional data */ @@ -536,6 +537,8 @@ static void mxt_input_report(struct mxt_data *data, int single_id)  					finger[id].x);  			input_report_abs(input_dev, ABS_MT_POSITION_Y,  					finger[id].y); +			input_report_abs(input_dev, ABS_MT_PRESSURE, +					finger[id].pressure);  		} else {  			finger[id].status = 0;  		} @@ -546,6 +549,8 @@ static void mxt_input_report(struct mxt_data *data, int single_id)  	if (status != MXT_RELEASE) {  		input_report_abs(input_dev, ABS_X, finger[single_id].x);  		input_report_abs(input_dev, ABS_Y, finger[single_id].y); +		input_report_abs(input_dev, +				 ABS_PRESSURE, finger[single_id].pressure);  	}  	input_sync(input_dev); @@ -560,6 +565,7 @@ static void mxt_input_touchevent(struct mxt_data *data,  	int x;  	int y;  	int area; +	int pressure;  	/* Check the touch is present on the screen */  	if (!(status & MXT_DETECT)) { @@ -584,6 +590,7 @@ static void mxt_input_touchevent(struct mxt_data *data,  		y = y >> 2;  	area = message->message[4]; +	pressure = message->message[5];  	dev_dbg(dev, "[%d] %s x: %d, y: %d, area: %d\n", id,  		status & MXT_MOVE ? "moved" : "pressed", @@ -594,6 +601,7 @@ static void mxt_input_touchevent(struct mxt_data *data,  	finger[id].x = x;  	finger[id].y = y;  	finger[id].area = area; +	finger[id].pressure = pressure;  	mxt_input_report(data, id);  } @@ -1116,6 +1124,8 @@ static int __devinit mxt_probe(struct i2c_client *client,  			     0, data->max_x, 0, 0);  	input_set_abs_params(input_dev, ABS_Y,  			     0, data->max_y, 0, 0); +	input_set_abs_params(input_dev, ABS_PRESSURE, +			     0, 255, 0, 0);  	/* For multi touch */  	input_mt_init_slots(input_dev, MXT_MAX_FINGER); @@ -1125,6 +1135,8 @@ static int __devinit mxt_probe(struct i2c_client *client,  			     0, data->max_x, 0, 0);  	input_set_abs_params(input_dev, ABS_MT_POSITION_Y,  			     0, data->max_y, 0, 0); +	input_set_abs_params(input_dev, ABS_MT_PRESSURE, +			     0, 255, 0, 0);  	input_set_drvdata(input_dev, data);  	i2c_set_clientdata(client, data); diff --git a/drivers/input/touchscreen/max11801_ts.c b/drivers/input/touchscreen/max11801_ts.c index 4f2713d9279..4627fe55b40 100644 --- a/drivers/input/touchscreen/max11801_ts.c +++ b/drivers/input/touchscreen/max11801_ts.c @@ -9,7 +9,8 @@   *   * This program is free software; you can redistribute it and/or modify   * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License. + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version.   */  /* diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c index 089b0a0f3d8..0e8f63e5b36 100644 --- a/drivers/input/touchscreen/tnetv107x-ts.c +++ b/drivers/input/touchscreen/tnetv107x-ts.c @@ -13,6 +13,7 @@   * GNU General Public License for more details.   */ +#include <linux/module.h>  #include <linux/kernel.h>  #include <linux/err.h>  #include <linux/errno.h> diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c index c14412ef464..9941d39df43 100644 --- a/drivers/input/touchscreen/wacom_w8001.c +++ b/drivers/input/touchscreen/wacom_w8001.c @@ -383,6 +383,8 @@ static int w8001_setup(struct w8001 *w8001)  	dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);  	strlcat(w8001->name, "Wacom Serial", sizeof(w8001->name)); +	__set_bit(INPUT_PROP_DIRECT, dev->propbit); +  	/* penabled? */  	error = w8001_command(w8001, W8001_CMD_QUERY, true);  	if (!error) { diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index a14f8dc2346..0e4227f457a 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -605,7 +605,9 @@ static void build_inv_all(struct iommu_cmd *cmd)   * Writes the command to the IOMMUs command buffer and informs the   * hardware about the new command.   */ -static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) +static int iommu_queue_command_sync(struct amd_iommu *iommu, +				    struct iommu_cmd *cmd, +				    bool sync)  {  	u32 left, tail, head, next_tail;  	unsigned long flags; @@ -639,13 +641,18 @@ again:  	copy_cmd_to_buffer(iommu, cmd, tail);  	/* We need to sync now to make sure all commands are processed */ -	iommu->need_sync = true; +	iommu->need_sync = sync;  	spin_unlock_irqrestore(&iommu->lock, flags);  	return 0;  } +static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) +{ +	return iommu_queue_command_sync(iommu, cmd, true); +} +  /*   * This function queues a completion wait command into the command   * buffer of an IOMMU @@ -661,7 +668,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)  	build_completion_wait(&cmd, (u64)&sem); -	ret = iommu_queue_command(iommu, &cmd); +	ret = iommu_queue_command_sync(iommu, &cmd, false);  	if (ret)  		return ret; @@ -840,14 +847,9 @@ static void domain_flush_complete(struct protection_domain *domain)  static void domain_flush_devices(struct protection_domain *domain)  {  	struct iommu_dev_data *dev_data; -	unsigned long flags; - -	spin_lock_irqsave(&domain->lock, flags);  	list_for_each_entry(dev_data, &domain->dev_list, list)  		device_flush_dte(dev_data); - -	spin_unlock_irqrestore(&domain->lock, flags);  }  /**************************************************************************** diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 3dc9befa5ae..6dcc7e2d54d 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -1388,7 +1388,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)  		return ret;  	} -	ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu); +	ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);  	if (ret)  		printk(KERN_ERR "IOMMU: can't request irq\n");  	return ret; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index c621c98c99d..a88f3cbb100 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -306,6 +306,11 @@ static inline bool dma_pte_present(struct dma_pte *pte)  	return (pte->val & 3) != 0;  } +static inline bool dma_pte_superpage(struct dma_pte *pte) +{ +	return (pte->val & (1 << 7)); +} +  static inline int first_pte_in_page(struct dma_pte *pte)  {  	return !((unsigned long)pte & ~VTD_PAGE_MASK); @@ -404,6 +409,9 @@ static int dmar_forcedac;  static int intel_iommu_strict;  static int intel_iommu_superpage = 1; +int intel_iommu_gfx_mapped; +EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); +  #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))  static DEFINE_SPINLOCK(device_domain_lock);  static LIST_HEAD(device_domain_list); @@ -577,17 +585,18 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain)  static void domain_update_iommu_superpage(struct dmar_domain *domain)  { -	int i, mask = 0xf; +	struct dmar_drhd_unit *drhd; +	struct intel_iommu *iommu = NULL; +	int mask = 0xf;  	if (!intel_iommu_superpage) {  		domain->iommu_superpage = 0;  		return;  	} -	domain->iommu_superpage = 4; /* 1TiB */ - -	for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) { -		mask |= cap_super_page_val(g_iommus[i]->cap); +	/* set iommu_superpage to the smallest common denominator */ +	for_each_active_iommu(iommu, drhd) { +		mask &= cap_super_page_val(iommu->cap);  		if (!mask) {  			break;  		} @@ -730,29 +739,23 @@ out:  }  static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, -				      unsigned long pfn, int large_level) +				      unsigned long pfn, int target_level)  {  	int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;  	struct dma_pte *parent, *pte = NULL;  	int level = agaw_to_level(domain->agaw); -	int offset, target_level; +	int offset;  	BUG_ON(!domain->pgd);  	BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);  	parent = domain->pgd; -	/* Search pte */ -	if (!large_level) -		target_level = 1; -	else -		target_level = large_level; -  	while (level > 0) {  		void *tmp_page;  		offset = pfn_level_offset(pfn, level);  		pte = &parent[offset]; -		if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE)) +		if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))  			break;  		if (level == target_level)  			break; @@ -816,13 +819,14 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,  }  /* clear last level pte, a tlb flush should be followed */ -static void dma_pte_clear_range(struct dmar_domain *domain, +static int dma_pte_clear_range(struct dmar_domain *domain,  				unsigned long start_pfn,  				unsigned long last_pfn)  {  	int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;  	unsigned int large_page = 1;  	struct dma_pte *first_pte, *pte; +	int order;  	BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);  	BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); @@ -846,6 +850,9 @@ static void dma_pte_clear_range(struct dmar_domain *domain,  				   (void *)pte - (void *)first_pte);  	} while (start_pfn && start_pfn <= last_pfn); + +	order = (large_page - 1) * 9; +	return order;  }  /* free page table pages. last level pte should already be cleared */ @@ -3226,9 +3233,6 @@ static void __init init_no_remapping_devices(void)  		}  	} -	if (dmar_map_gfx) -		return; -  	for_each_drhd_unit(drhd) {  		int i;  		if (drhd->ignored || drhd->include_all) @@ -3236,18 +3240,23 @@ static void __init init_no_remapping_devices(void)  		for (i = 0; i < drhd->devices_cnt; i++)  			if (drhd->devices[i] && -				!IS_GFX_DEVICE(drhd->devices[i])) +			    !IS_GFX_DEVICE(drhd->devices[i]))  				break;  		if (i < drhd->devices_cnt)  			continue; -		/* bypass IOMMU if it is just for gfx devices */ -		drhd->ignored = 1; -		for (i = 0; i < drhd->devices_cnt; i++) { -			if (!drhd->devices[i]) -				continue; -			drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; +		/* This IOMMU has *only* gfx devices. Either bypass it or +		   set the gfx_mapped flag, as appropriate */ +		if (dmar_map_gfx) { +			intel_iommu_gfx_mapped = 1; +		} else { +			drhd->ignored = 1; +			for (i = 0; i < drhd->devices_cnt; i++) { +				if (!drhd->devices[i]) +					continue; +				drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; +			}  		}  	}  } @@ -3568,6 +3577,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,  			found = 1;  	} +	spin_unlock_irqrestore(&device_domain_lock, flags); +  	if (found == 0) {  		unsigned long tmp_flags;  		spin_lock_irqsave(&domain->iommu_lock, tmp_flags); @@ -3584,8 +3595,6 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,  			spin_unlock_irqrestore(&iommu->lock, tmp_flags);  		}  	} - -	spin_unlock_irqrestore(&device_domain_lock, flags);  }  static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) @@ -3739,6 +3748,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain)  		vm_domain_exit(dmar_domain);  		return -ENOMEM;  	} +	domain_update_iommu_cap(dmar_domain);  	domain->priv = dmar_domain;  	return 0; @@ -3864,14 +3874,15 @@ static int intel_iommu_unmap(struct iommu_domain *domain,  {  	struct dmar_domain *dmar_domain = domain->priv;  	size_t size = PAGE_SIZE << gfp_order; +	int order; -	dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, +	order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,  			    (iova + size - 1) >> VTD_PAGE_SHIFT);  	if (dmar_domain->max_addr == iova + size)  		dmar_domain->max_addr = iova; -	return gfp_order; +	return order;  }  static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, @@ -3950,7 +3961,11 @@ static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)  	if (!(ggc & GGC_MEMORY_VT_ENABLED)) {  		printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");  		dmar_map_gfx = 0; -	} +	} else if (dmar_map_gfx) { +		/* we have to ensure the gfx device is idle before we flush */ +		printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n"); +		intel_iommu_strict = 1; +       }  }  DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);  DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c index 6ed82add6ff..6ddb795e31c 100644 --- a/drivers/isdn/i4l/isdn_common.c +++ b/drivers/isdn/i4l/isdn_common.c @@ -2308,11 +2308,11 @@ static int __init isdn_init(void)  	int i;  	char tmprev[50]; -	if (!(dev = vmalloc(sizeof(isdn_dev)))) { +	dev = vzalloc(sizeof(isdn_dev)); +	if (!dev) {  		printk(KERN_WARNING "isdn: Could not allocate device-struct.\n");  		return -EIO;  	} -	memset((char *) dev, 0, sizeof(isdn_dev));  	init_timer(&dev->timer);  	dev->timer.function = isdn_timer_funct;  	spin_lock_init(&dev->lock); diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c index 2877291a9ed..0c41553ce68 100644 --- a/drivers/isdn/mISDN/dsp_core.c +++ b/drivers/isdn/mISDN/dsp_core.c @@ -1052,12 +1052,11 @@ dspcreate(struct channel_req *crq)  	if (crq->protocol != ISDN_P_B_L2DSP  	 && crq->protocol != ISDN_P_B_L2DSPHDLC)  		return -EPROTONOSUPPORT; -	ndsp = vmalloc(sizeof(struct dsp)); +	ndsp = vzalloc(sizeof(struct dsp));  	if (!ndsp) {  		printk(KERN_ERR "%s: vmalloc struct dsp failed\n", __func__);  		return -ENOMEM;  	} -	memset(ndsp, 0, sizeof(struct dsp));  	if (dsp_debug & DEBUG_DSP_CTRL)  		printk(KERN_DEBUG "%s: creating new dsp instance\n", __func__); diff --git a/drivers/isdn/mISDN/l1oip_codec.c b/drivers/isdn/mISDN/l1oip_codec.c index bbfd1b863ed..5a89972624d 100644 --- a/drivers/isdn/mISDN/l1oip_codec.c +++ b/drivers/isdn/mISDN/l1oip_codec.c @@ -330,14 +330,12 @@ l1oip_4bit_alloc(int ulaw)  		return 0;  	/* alloc conversion tables */ -	table_com = vmalloc(65536); -	table_dec = vmalloc(512); +	table_com = vzalloc(65536); +	table_dec = vzalloc(512);  	if (!table_com || !table_dec) {  		l1oip_4bit_free();  		return -ENOMEM;  	} -	memset(table_com, 0, 65536); -	memset(table_dec, 0, 512);  	/* generate compression table */  	i1 = 0;  	while (i1 < 256) { diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index b591e726a6f..807c875f1c2 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -400,7 +400,7 @@ config LEDS_TRIGGER_TIMER  	  This allows LEDs to be controlled by a programmable timer  	  via sysfs. Some LED hardware can be programmed to start  	  blinking the LED without any further software interaction. -	  For more details read Documentation/leds-class.txt. +	  For more details read Documentation/leds/leds-class.txt.  	  If unsure, say Y. diff --git a/drivers/leds/leds-ams-delta.c b/drivers/leds/leds-ams-delta.c index b9826032450..8c00937bf7e 100644 --- a/drivers/leds/leds-ams-delta.c +++ b/drivers/leds/leds-ams-delta.c @@ -8,6 +8,7 @@   * published by the Free Software Foundation.   */ +#include <linux/module.h>  #include <linux/kernel.h>  #include <linux/init.h>  #include <linux/platform_device.h> diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c index 3ebe3824662..ea2185531f8 100644 --- a/drivers/leds/leds-bd2802.c +++ b/drivers/leds/leds-bd2802.c @@ -662,6 +662,11 @@ failed_unregister_led1_R:  static void bd2802_unregister_led_classdev(struct bd2802_led *led)  {  	cancel_work_sync(&led->work); +	led_classdev_unregister(&led->cdev_led2b); +	led_classdev_unregister(&led->cdev_led2g); +	led_classdev_unregister(&led->cdev_led2r); +	led_classdev_unregister(&led->cdev_led1b); +	led_classdev_unregister(&led->cdev_led1g);  	led_classdev_unregister(&led->cdev_led1r);  } diff --git a/drivers/leds/leds-hp6xx.c b/drivers/leds/leds-hp6xx.c index e4ce1fd4633..bcfbd3a60ea 100644 --- a/drivers/leds/leds-hp6xx.c +++ b/drivers/leds/leds-hp6xx.c @@ -10,6 +10,7 @@   * published by the Free Software Foundation.   */ +#include <linux/module.h>  #include <linux/kernel.h>  #include <linux/init.h>  #include <linux/platform_device.h> diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c index d87c9d02f78..328c64c0841 100644 --- a/drivers/leds/ledtrig-timer.c +++ b/drivers/leds/ledtrig-timer.c @@ -41,6 +41,7 @@ static ssize_t led_delay_on_store(struct device *dev,  	if (count == size) {  		led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off); +		led_cdev->blink_delay_on = state;  		ret = count;  	} @@ -69,6 +70,7 @@ static ssize_t led_delay_off_store(struct device *dev,  	if (count == size) {  		led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state); +		led_cdev->blink_delay_off = state;  		ret = count;  	} diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 49da55c1528..8c2a000cf3f 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1698,6 +1698,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)  	}  	ti->num_flush_requests = 1; +	ti->discard_zeroes_data_unsupported = 1; +  	return 0;  bad: diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 89f73ca22cf..f84c08029b2 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -81,8 +81,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,  		 * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>  		 */  		if (!strcasecmp(arg_name, "corrupt_bio_byte")) { -			if (!argc) +			if (!argc) {  				ti->error = "Feature corrupt_bio_byte requires parameters"; +				return -EINVAL; +			}  			r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);  			if (r) diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index f8214702963..32ac70861d6 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -628,6 +628,7 @@ void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,  	job->kc = kc;  	job->fn = fn;  	job->context = context; +	job->master_job = job;  	atomic_inc(&kc->nr_jobs); diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index a002dd85db1..86df8b2cf92 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -449,7 +449,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,  				rs->ti->error = "write_mostly option is only valid for RAID1";  				return -EINVAL;  			} -			if (value > rs->md.raid_disks) { +			if (value >= rs->md.raid_disks) {  				rs->ti->error = "Invalid write_mostly drive index given";  				return -EINVAL;  			} diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 986b8754bb0..bc04518e9d8 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1238,14 +1238,15 @@ static void dm_table_set_integrity(struct dm_table *t)  		return;  	template_disk = dm_table_get_integrity_disk(t, true); -	if (!template_disk && -	    blk_integrity_is_initialized(dm_disk(t->md))) { +	if (template_disk) +		blk_integrity_register(dm_disk(t->md), +				       blk_get_integrity(template_disk)); +	else if (blk_integrity_is_initialized(dm_disk(t->md)))  		DMWARN("%s: device no longer has a valid integrity profile",  		       dm_device_name(t->md)); -		return; -	} -	blk_integrity_register(dm_disk(t->md), -			       blk_get_integrity(template_disk)); +	else +		DMWARN("%s: unable to establish an integrity profile", +		       dm_device_name(t->md));  }  static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, @@ -1282,6 +1283,22 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)  	return 0;  } +static bool dm_table_discard_zeroes_data(struct dm_table *t) +{ +	struct dm_target *ti; +	unsigned i = 0; + +	/* Ensure that all targets supports discard_zeroes_data. */ +	while (i < dm_table_get_num_targets(t)) { +		ti = dm_table_get_target(t, i++); + +		if (ti->discard_zeroes_data_unsupported) +			return 0; +	} + +	return 1; +} +  void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,  			       struct queue_limits *limits)  { @@ -1304,6 +1321,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,  	}  	blk_queue_flush(q, flush); +	if (!dm_table_discard_zeroes_data(t)) +		q->limits.discard_zeroes_data = 0; +  	dm_table_set_integrity(t);  	/* diff --git a/drivers/md/linear.h b/drivers/md/linear.h index 0ce29b61605..2f2da05b2ce 100644 --- a/drivers/md/linear.h +++ b/drivers/md/linear.h @@ -10,9 +10,9 @@ typedef struct dev_info dev_info_t;  struct linear_private_data  { +	struct rcu_head		rcu;  	sector_t		array_sectors;  	dev_info_t		disks[0]; -	struct rcu_head		rcu;  }; diff --git a/drivers/md/md.c b/drivers/md/md.c index 8e221a20f5d..5c95ccb5950 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -61,6 +61,11 @@  static void autostart_arrays(int part);  #endif +/* pers_list is a list of registered personalities protected + * by pers_lock. + * pers_lock does extra service to protect accesses to + * mddev->thread when the mutex cannot be held. + */  static LIST_HEAD(pers_list);  static DEFINE_SPINLOCK(pers_lock); @@ -739,7 +744,12 @@ static void mddev_unlock(mddev_t * mddev)  	} else  		mutex_unlock(&mddev->reconfig_mutex); +	/* was we've dropped the mutex we need a spinlock to +	 * make sur the thread doesn't disappear +	 */ +	spin_lock(&pers_lock);  	md_wakeup_thread(mddev->thread); +	spin_unlock(&pers_lock);  }  static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) @@ -848,7 +858,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,  	bio->bi_end_io = super_written;  	atomic_inc(&mddev->pending_writes); -	submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio); +	submit_bio(WRITE_FLUSH_FUA, bio);  }  void md_super_wait(mddev_t *mddev) @@ -1138,8 +1148,11 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version  			ret = 0;  	}  	rdev->sectors = rdev->sb_start; +	/* Limit to 4TB as metadata cannot record more than that */ +	if (rdev->sectors >= (2ULL << 32)) +		rdev->sectors = (2ULL << 32) - 2; -	if (rdev->sectors < sb->size * 2 && sb->level > 1) +	if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)  		/* "this cannot possibly happen" ... */  		ret = -EINVAL; @@ -1173,7 +1186,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)  		mddev->clevel[0] = 0;  		mddev->layout = sb->layout;  		mddev->raid_disks = sb->raid_disks; -		mddev->dev_sectors = sb->size * 2; +		mddev->dev_sectors = ((sector_t)sb->size) * 2;  		mddev->events = ev1;  		mddev->bitmap_info.offset = 0;  		mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; @@ -1415,6 +1428,11 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)  	rdev->sb_start = calc_dev_sboffset(rdev);  	if (!num_sectors || num_sectors > rdev->sb_start)  		num_sectors = rdev->sb_start; +	/* Limit to 4TB as metadata cannot record more than that. +	 * 4TB == 2^32 KB, or 2*2^32 sectors. +	 */ +	if (num_sectors >= (2ULL << 32)) +		num_sectors = (2ULL << 32) - 2;  	md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,  		       rdev->sb_page);  	md_super_wait(rdev->mddev); @@ -1738,6 +1756,11 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)  	sb->level = cpu_to_le32(mddev->level);  	sb->layout = cpu_to_le32(mddev->layout); +	if (test_bit(WriteMostly, &rdev->flags)) +		sb->devflags |= WriteMostly1; +	else +		sb->devflags &= ~WriteMostly1; +  	if (mddev->bitmap && mddev->bitmap_info.file == NULL) {  		sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);  		sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); @@ -2561,7 +2584,10 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)  	int err = -EINVAL;  	if (cmd_match(buf, "faulty") && rdev->mddev->pers) {  		md_error(rdev->mddev, rdev); -		err = 0; +		if (test_bit(Faulty, &rdev->flags)) +			err = 0; +		else +			err = -EBUSY;  	} else if (cmd_match(buf, "remove")) {  		if (rdev->raid_disk >= 0)  			err = -EBUSY; @@ -2584,7 +2610,7 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)  		err = 0;  	} else if (cmd_match(buf, "-blocked")) {  		if (!test_bit(Faulty, &rdev->flags) && -		    test_bit(BlockedBadBlocks, &rdev->flags)) { +		    rdev->badblocks.unacked_exist) {  			/* metadata handler doesn't understand badblocks,  			 * so we need to fail the device  			 */ @@ -5983,6 +6009,8 @@ static int set_disk_faulty(mddev_t *mddev, dev_t dev)  		return -ENODEV;  	md_error(mddev, rdev); +	if (!test_bit(Faulty, &rdev->flags)) +		return -EBUSY;  	return 0;  } @@ -6411,11 +6439,18 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,  	return thread;  } -void md_unregister_thread(mdk_thread_t *thread) +void md_unregister_thread(mdk_thread_t **threadp)  { +	mdk_thread_t *thread = *threadp;  	if (!thread)  		return;  	dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); +	/* Locking ensures that mddev_unlock does not wake_up a +	 * non-existent thread +	 */ +	spin_lock(&pers_lock); +	*threadp = NULL; +	spin_unlock(&pers_lock);  	kthread_stop(thread->tsk);  	kfree(thread); @@ -7322,8 +7357,7 @@ static void reap_sync_thread(mddev_t *mddev)  	mdk_rdev_t *rdev;  	/* resync has finished, collect result */ -	md_unregister_thread(mddev->sync_thread); -	mddev->sync_thread = NULL; +	md_unregister_thread(&mddev->sync_thread);  	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&  	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {  		/* success...*/ diff --git a/drivers/md/md.h b/drivers/md/md.h index 1e586bb4452..0a309dc29b4 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -560,7 +560,7 @@ extern int register_md_personality(struct mdk_personality *p);  extern int unregister_md_personality(struct mdk_personality *p);  extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),  				mddev_t *mddev, const char *name); -extern void md_unregister_thread(mdk_thread_t *thread); +extern void md_unregister_thread(mdk_thread_t **threadp);  extern void md_wakeup_thread(mdk_thread_t *thread);  extern void md_check_recovery(mddev_t *mddev);  extern void md_write_start(mddev_t *mddev, struct bio *bi); diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 3535c23af28..d5b5fb30017 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -514,8 +514,7 @@ static int multipath_stop (mddev_t *mddev)  {  	multipath_conf_t *conf = mddev->private; -	md_unregister_thread(mddev->thread); -	mddev->thread = NULL; +	md_unregister_thread(&mddev->thread);  	blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/  	mempool_destroy(conf->pool);  	kfree(conf->multipaths); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 32323f0afd8..d9587dffe53 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1099,12 +1099,11 @@ read_again:  		bio_list_add(&conf->pending_bio_list, mbio);  		spin_unlock_irqrestore(&conf->device_lock, flags);  	} -	r1_bio_write_done(r1_bio); - -	/* In case raid1d snuck in to freeze_array */ -	wake_up(&conf->wait_barrier); - +	/* Mustn't call r1_bio_write_done before this next test, +	 * as it could result in the bio being freed. +	 */  	if (sectors_handled < (bio->bi_size >> 9)) { +		r1_bio_write_done(r1_bio);  		/* We need another r1_bio.  It has already been counted  		 * in bio->bi_phys_segments  		 */ @@ -1117,6 +1116,11 @@ read_again:  		goto retry_write;  	} +	r1_bio_write_done(r1_bio); + +	/* In case raid1d snuck in to freeze_array */ +	wake_up(&conf->wait_barrier); +  	if (do_sync || !bitmap || !plugged)  		md_wakeup_thread(mddev->thread); @@ -2558,8 +2562,7 @@ static int stop(mddev_t *mddev)  	raise_barrier(conf);  	lower_barrier(conf); -	md_unregister_thread(mddev->thread); -	mddev->thread = NULL; +	md_unregister_thread(&mddev->thread);  	if (conf->r1bio_pool)  		mempool_destroy(conf->r1bio_pool);  	kfree(conf->mirrors); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 8b29cd4f01c..0cd9672cf9c 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -337,6 +337,21 @@ static void close_write(r10bio_t *r10_bio)  	md_write_end(r10_bio->mddev);  } +static void one_write_done(r10bio_t *r10_bio) +{ +	if (atomic_dec_and_test(&r10_bio->remaining)) { +		if (test_bit(R10BIO_WriteError, &r10_bio->state)) +			reschedule_retry(r10_bio); +		else { +			close_write(r10_bio); +			if (test_bit(R10BIO_MadeGood, &r10_bio->state)) +				reschedule_retry(r10_bio); +			else +				raid_end_bio_io(r10_bio); +		} +	} +} +  static void raid10_end_write_request(struct bio *bio, int error)  {  	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); @@ -387,17 +402,7 @@ static void raid10_end_write_request(struct bio *bio, int error)  	 * Let's see if all mirrored write operations have finished  	 * already.  	 */ -	if (atomic_dec_and_test(&r10_bio->remaining)) { -		if (test_bit(R10BIO_WriteError, &r10_bio->state)) -			reschedule_retry(r10_bio); -		else { -			close_write(r10_bio); -			if (test_bit(R10BIO_MadeGood, &r10_bio->state)) -				reschedule_retry(r10_bio); -			else -				raid_end_bio_io(r10_bio); -		} -	} +	one_write_done(r10_bio);  	if (dec_rdev)  		rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);  } @@ -1127,20 +1132,12 @@ retry_write:  		spin_unlock_irqrestore(&conf->device_lock, flags);  	} -	if (atomic_dec_and_test(&r10_bio->remaining)) { -		/* This matches the end of raid10_end_write_request() */ -		bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, -				r10_bio->sectors, -				!test_bit(R10BIO_Degraded, &r10_bio->state), -				0); -		md_write_end(mddev); -		raid_end_bio_io(r10_bio); -	} - -	/* In case raid10d snuck in to freeze_array */ -	wake_up(&conf->wait_barrier); +	/* Don't remove the bias on 'remaining' (one_write_done) until +	 * after checking if we need to go around again. +	 */  	if (sectors_handled < (bio->bi_size >> 9)) { +		one_write_done(r10_bio);  		/* We need another r10_bio.  It has already been counted  		 * in bio->bi_phys_segments.  		 */ @@ -1154,6 +1151,10 @@ retry_write:  		r10_bio->state = 0;  		goto retry_write;  	} +	one_write_done(r10_bio); + +	/* In case raid10d snuck in to freeze_array */ +	wake_up(&conf->wait_barrier);  	if (do_sync || !mddev->bitmap || !plugged)  		md_wakeup_thread(mddev->thread); @@ -2954,7 +2955,7 @@ static int run(mddev_t *mddev)  	return 0;  out_free_conf: -	md_unregister_thread(mddev->thread); +	md_unregister_thread(&mddev->thread);  	if (conf->r10bio_pool)  		mempool_destroy(conf->r10bio_pool);  	safe_put_page(conf->tmppage); @@ -2972,8 +2973,7 @@ static int stop(mddev_t *mddev)  	raise_barrier(conf, 0);  	lower_barrier(conf); -	md_unregister_thread(mddev->thread); -	mddev->thread = NULL; +	md_unregister_thread(&mddev->thread);  	blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/  	if (conf->r10bio_pool)  		mempool_destroy(conf->r10bio_pool); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index dbae459fb02..ac5e8b57e50 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3336,7 +3336,7 @@ static void handle_stripe(struct stripe_head *sh)  finish:  	/* wait for this device to become unblocked */ -	if (unlikely(s.blocked_rdev)) +	if (conf->mddev->external && unlikely(s.blocked_rdev))  		md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);  	if (s.handle_bad_blocks) @@ -4941,8 +4941,7 @@ static int run(mddev_t *mddev)  	return 0;  abort: -	md_unregister_thread(mddev->thread); -	mddev->thread = NULL; +	md_unregister_thread(&mddev->thread);  	if (conf) {  		print_raid5_conf(conf);  		free_conf(conf); @@ -4956,8 +4955,7 @@ static int stop(mddev_t *mddev)  {  	raid5_conf_t *conf = mddev->private; -	md_unregister_thread(mddev->thread); -	mddev->thread = NULL; +	md_unregister_thread(&mddev->thread);  	if (mddev->queue)  		mddev->queue->backing_dev_info.congested_fn = NULL;  	free_conf(conf); diff --git a/drivers/media/dvb/dvb-usb/af9005-remote.c b/drivers/media/dvb/dvb-usb/af9005-remote.c index c3bc64ed405..7e3961d0db6 100644 --- a/drivers/media/dvb/dvb-usb/af9005-remote.c +++ b/drivers/media/dvb/dvb-usb/af9005-remote.c @@ -21,7 +21,7 @@   * along with this program; if not, write to the Free Software   * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.   * - * see Documentation/dvb/REDME.dvb-usb for more information + * see Documentation/dvb/README.dvb-usb for more information   */  #include "af9005.h"  /* debug */ diff --git a/drivers/media/dvb/dvb-usb/af9005.c b/drivers/media/dvb/dvb-usb/af9005.c index 51f6439dcfd..0351c0e52dd 100644 --- a/drivers/media/dvb/dvb-usb/af9005.c +++ b/drivers/media/dvb/dvb-usb/af9005.c @@ -19,7 +19,7 @@   * along with this program; if not, write to the Free Software   * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.   * - * see Documentation/dvb/REDME.dvb-usb for more information + * see Documentation/dvb/README.dvb-usb for more information   */  #include "af9005.h" diff --git a/drivers/media/dvb/dvb-usb/vp7045.c b/drivers/media/dvb/dvb-usb/vp7045.c index 3db89e3cb0b..536c16c943b 100644 --- a/drivers/media/dvb/dvb-usb/vp7045.c +++ b/drivers/media/dvb/dvb-usb/vp7045.c @@ -224,26 +224,8 @@ static struct dvb_usb_device_properties vp7045_properties;  static int vp7045_usb_probe(struct usb_interface *intf,  		const struct usb_device_id *id)  { -	struct dvb_usb_device *d; -	int ret = dvb_usb_device_init(intf, &vp7045_properties, -				   THIS_MODULE, &d, adapter_nr); -	if (ret) -		return ret; - -	d->priv = kmalloc(20, GFP_KERNEL); -	if (!d->priv) { -		dvb_usb_device_exit(intf); -		return -ENOMEM; -	} - -	return ret; -} - -static void vp7045_usb_disconnect(struct usb_interface *intf) -{ -	struct dvb_usb_device *d = usb_get_intfdata(intf); -	kfree(d->priv); -	dvb_usb_device_exit(intf); +	return dvb_usb_device_init(intf, &vp7045_properties, +				   THIS_MODULE, NULL, adapter_nr);  }  static struct usb_device_id vp7045_usb_table [] = { @@ -258,7 +240,7 @@ MODULE_DEVICE_TABLE(usb, vp7045_usb_table);  static struct dvb_usb_device_properties vp7045_properties = {  	.usb_ctrl = CYPRESS_FX2,  	.firmware = "dvb-usb-vp7045-01.fw", -	.size_of_priv = sizeof(u8 *), +	.size_of_priv = 20,  	.num_adapters = 1,  	.adapter = { @@ -305,7 +287,7 @@ static struct dvb_usb_device_properties vp7045_properties = {  static struct usb_driver vp7045_usb_driver = {  	.name		= "dvb_usb_vp7045",  	.probe		= vp7045_usb_probe, -	.disconnect	= vp7045_usb_disconnect, +	.disconnect	= dvb_usb_device_exit,  	.id_table	= vp7045_usb_table,  }; diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h index ba917359fa6..404f63a6f26 100644 --- a/drivers/media/dvb/frontends/dib3000.h +++ b/drivers/media/dvb/frontends/dib3000.h @@ -17,7 +17,7 @@   *  Amaury Demol (ademol@dibcom.fr) from DiBcom for providing specs and driver   *  sources, on which this driver (and the dvb-dibusb) are based.   * - * see Documentation/dvb/README.dibusb for more information + * see Documentation/dvb/README.dvb-usb for more information   *   */ diff --git a/drivers/media/dvb/frontends/dib3000mb.c b/drivers/media/dvb/frontends/dib3000mb.c index e80c5979636..437904cbf3e 100644 --- a/drivers/media/dvb/frontends/dib3000mb.c +++ b/drivers/media/dvb/frontends/dib3000mb.c @@ -17,7 +17,7 @@   *  Amaury Demol (ademol@dibcom.fr) from DiBcom for providing specs and driver   *  sources, on which this driver (and the dvb-dibusb) are based.   * - * see Documentation/dvb/README.dibusb for more information + * see Documentation/dvb/README.dvb-usb for more information   *   */ diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c index 8c0e1927697..ec1d52f3890 100644 --- a/drivers/media/radio/wl128x/fmdrv_v4l2.c +++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c @@ -402,7 +402,7 @@ static int fm_v4l2_vidioc_s_hw_freq_seek(struct file *file, void *priv,  static int fm_v4l2_vidioc_g_modulator(struct file *file, void *priv,  		struct v4l2_modulator *mod)  { -	struct fmdev *fmdev = video_drvdata(file);; +	struct fmdev *fmdev = video_drvdata(file);  	if (mod->index != 0)  		return -EINVAL; diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c index eae05b50047..144f3f55d76 100644 --- a/drivers/media/rc/nuvoton-cir.c +++ b/drivers/media/rc/nuvoton-cir.c @@ -618,7 +618,6 @@ static void nvt_dump_rx_buf(struct nvt_dev *nvt)  static void nvt_process_rx_ir_data(struct nvt_dev *nvt)  {  	DEFINE_IR_RAW_EVENT(rawir); -	unsigned int count;  	u32 carrier;  	u8 sample;  	int i; @@ -631,65 +630,38 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)  	if (nvt->carrier_detect_enabled)  		carrier = nvt_rx_carrier_detect(nvt); -	count = nvt->pkts; -	nvt_dbg_verbose("Processing buffer of len %d", count); +	nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);  	init_ir_raw_event(&rawir); -	for (i = 0; i < count; i++) { -		nvt->pkts--; +	for (i = 0; i < nvt->pkts; i++) {  		sample = nvt->buf[i];  		rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);  		rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)  					  * SAMPLE_PERIOD); -		if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) { -			if (nvt->rawir.pulse == rawir.pulse) -				nvt->rawir.duration += rawir.duration; -			else { -				nvt->rawir.duration = rawir.duration; -				nvt->rawir.pulse = rawir.pulse; -			} -			continue; -		} - -		rawir.duration += nvt->rawir.duration; +		nvt_dbg("Storing %s with duration %d", +			rawir.pulse ? "pulse" : "space", rawir.duration); -		init_ir_raw_event(&nvt->rawir); -		nvt->rawir.duration = 0; -		nvt->rawir.pulse = rawir.pulse; - -		if (sample == BUF_PULSE_BIT) -			rawir.pulse = false; - -		if (rawir.duration) { -			nvt_dbg("Storing %s with duration %d", -				rawir.pulse ? "pulse" : "space", -				rawir.duration); - -			ir_raw_event_store_with_filter(nvt->rdev, &rawir); -		} +		ir_raw_event_store_with_filter(nvt->rdev, &rawir);  		/*  		 * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE  		 * indicates end of IR signal, but new data incoming. In both  		 * cases, it means we're ready to call ir_raw_event_handle  		 */ -		if ((sample == BUF_PULSE_BIT) && nvt->pkts) { +		if ((sample == BUF_PULSE_BIT) && (i + 1 < nvt->pkts)) {  			nvt_dbg("Calling ir_raw_event_handle (signal end)\n");  			ir_raw_event_handle(nvt->rdev);  		}  	} +	nvt->pkts = 0; +  	nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n");  	ir_raw_event_handle(nvt->rdev); -	if (nvt->pkts) { -		nvt_dbg("Odd, pkts should be 0 now... (its %u)", nvt->pkts); -		nvt->pkts = 0; -	} -  	nvt_dbg_verbose("%s done", __func__);  } @@ -1048,7 +1020,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)  	spin_lock_init(&nvt->nvt_lock);  	spin_lock_init(&nvt->tx.lock); -	init_ir_raw_event(&nvt->rawir);  	ret = -EBUSY;  	/* now claim resources */ diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h index 1241fc89a36..0d5e0872a2e 100644 --- a/drivers/media/rc/nuvoton-cir.h +++ b/drivers/media/rc/nuvoton-cir.h @@ -67,7 +67,6 @@ static int debug;  struct nvt_dev {  	struct pnp_dev *pdev;  	struct rc_dev *rdev; -	struct ir_raw_event rawir;  	spinlock_t nvt_lock; diff --git a/drivers/media/video/cx18/cx18-mailbox.h b/drivers/media/video/cx18/cx18-mailbox.h index 05fe6bdbe06..b63fdfaac49 100644 --- a/drivers/media/video/cx18/cx18-mailbox.h +++ b/drivers/media/video/cx18/cx18-mailbox.h @@ -69,7 +69,7 @@ struct cx18_mailbox {      /* Each command can have up to 6 arguments */      u32       args[MAX_MB_ARGUMENTS];      /* The return code can be one of the codes in the file cx23418.h. If the -       command is completed successfuly, the error will be ERR_SYS_SUCCESS. +       command is completed successfully, the error will be ERR_SYS_SUCCESS.         If it is pending, the code is ERR_SYS_PENDING. If it failed, the error         code would indicate the task from which the error originated and will         be one of the errors in cx23418.h. In that case, the following diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c index 0800433b209..18305c89083 100644 --- a/drivers/media/video/gspca/ov519.c +++ b/drivers/media/video/gspca/ov519.c @@ -2858,7 +2858,6 @@ static void ov7xx0_configure(struct sd *sd)  			case 0x60:  				PDEBUG(D_PROBE, "Sensor is a OV7660");  				sd->sensor = SEN_OV7660; -				sd->invert_led = 0;  				break;  			default:  				PDEBUG(D_PROBE, "Unknown sensor: 0x76%x", low); @@ -3337,7 +3336,6 @@ static int sd_config(struct gspca_dev *gspca_dev,  	case BRIDGE_OV519:  		cam->cam_mode = ov519_vga_mode;  		cam->nmodes = ARRAY_SIZE(ov519_vga_mode); -		sd->invert_led = !sd->invert_led;  		break;  	case BRIDGE_OVFX2:  		cam->cam_mode = ov519_vga_mode; @@ -5005,24 +5003,24 @@ static const struct sd_desc sd_desc = {  /* -- module initialisation -- */  static const struct usb_device_id device_table[] = {  	{USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF }, -	{USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 }, -	{USB_DEVICE(0x041e, 0x405f), +	{USB_DEVICE(0x041e, 0x4052),  		.driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, +	{USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 },  	{USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 },  	{USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 }, -	{USB_DEVICE(0x041e, 0x4064), -		.driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, +	{USB_DEVICE(0x041e, 0x4064), .driver_info = BRIDGE_OV519 },  	{USB_DEVICE(0x041e, 0x4067), .driver_info = BRIDGE_OV519 }, -	{USB_DEVICE(0x041e, 0x4068), +	{USB_DEVICE(0x041e, 0x4068), .driver_info = BRIDGE_OV519 }, +	{USB_DEVICE(0x045e, 0x028c),  		.driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, -	{USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 },  	{USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 }, -	{USB_DEVICE(0x054c, 0x0155), -		.driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, +	{USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 },  	{USB_DEVICE(0x05a9, 0x0511), .driver_info = BRIDGE_OV511 },  	{USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 }, -	{USB_DEVICE(0x05a9, 0x0519), .driver_info = BRIDGE_OV519 }, -	{USB_DEVICE(0x05a9, 0x0530), .driver_info = BRIDGE_OV519 }, +	{USB_DEVICE(0x05a9, 0x0519), +		.driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, +	{USB_DEVICE(0x05a9, 0x0530), +		.driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },  	{USB_DEVICE(0x05a9, 0x2800), .driver_info = BRIDGE_OVFX2 },  	{USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 },  	{USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 }, diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c index 81b8a600783..c477ad11f10 100644 --- a/drivers/media/video/gspca/sonixj.c +++ b/drivers/media/video/gspca/sonixj.c @@ -2386,7 +2386,7 @@ static int sd_start(struct gspca_dev *gspca_dev)  		reg_w1(gspca_dev, 0x01, 0x22);  		msleep(100);  		reg01 = SCL_SEL_OD | S_PDN_INV; -		reg17 &= MCK_SIZE_MASK; +		reg17 &= ~MCK_SIZE_MASK;  		reg17 |= 0x04;		/* clock / 4 */  		break;  	} @@ -2532,6 +2532,10 @@ static int sd_start(struct gspca_dev *gspca_dev)  		if (!mode) {			/* if 640x480 */  			reg17 &= ~MCK_SIZE_MASK;  			reg17 |= 0x04;		/* clock / 4 */ +		} else { +			reg01 &= ~SYS_SEL_48M;	/* clk 24Mz */ +			reg17 &= ~MCK_SIZE_MASK; +			reg17 |= 0x02;		/* clock / 2 */  		}  		break;  	case SENSOR_OV7630: diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c index b5ef3622244..b3a5ecdb33a 100644 --- a/drivers/media/video/omap/omap_vout.c +++ b/drivers/media/video/omap/omap_vout.c @@ -2194,19 +2194,6 @@ static int __init omap_vout_probe(struct platform_device *pdev)  					"'%s' Display already enabled\n",  					def_display->name);  			} -			/* set the update mode */ -			if (def_display->caps & -					OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { -				if (dssdrv->enable_te) -					dssdrv->enable_te(def_display, 0); -				if (dssdrv->set_update_mode) -					dssdrv->set_update_mode(def_display, -							OMAP_DSS_UPDATE_MANUAL); -			} else { -				if (dssdrv->set_update_mode) -					dssdrv->set_update_mode(def_display, -							OMAP_DSS_UPDATE_AUTO); -			}  		}  	} diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c index 9d3459de04b..80796eb0c53 100644 --- a/drivers/media/video/omap3isp/ispccdc.c +++ b/drivers/media/video/omap3isp/ispccdc.c @@ -31,6 +31,7 @@  #include <linux/dma-mapping.h>  #include <linux/mm.h>  #include <linux/sched.h> +#include <linux/slab.h>  #include <media/v4l2-event.h>  #include "isp.h" diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c index e9a0e94b999..8c70e64444e 100644 --- a/drivers/media/video/pwc/pwc-v4l.c +++ b/drivers/media/video/pwc/pwc-v4l.c @@ -338,7 +338,7 @@ int pwc_init_controls(struct pwc_device *pdev)  	if (pdev->restore_factory)  		pdev->restore_factory->flags = V4L2_CTRL_FLAG_UPDATE; -	if (!pdev->features & FEATURE_MOTOR_PANTILT) +	if (!(pdev->features & FEATURE_MOTOR_PANTILT))  		return hdl->error;  	/* Motor pan / tilt / reset */ diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c index d29f9c2d085..e4100b1f68d 100644 --- a/drivers/media/video/uvc/uvc_driver.c +++ b/drivers/media/video/uvc/uvc_driver.c @@ -1961,7 +1961,7 @@ static int __uvc_resume(struct usb_interface *intf, int reset)  	list_for_each_entry(stream, &dev->streams, list) {  		if (stream->intf == intf) -			return uvc_video_resume(stream); +			return uvc_video_resume(stream, reset);  	}  	uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface " diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c index 48fea373c25..29e239911d0 100644 --- a/drivers/media/video/uvc/uvc_entity.c +++ b/drivers/media/video/uvc/uvc_entity.c @@ -49,7 +49,7 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,  		if (remote == NULL)  			return -EINVAL; -		source = (UVC_ENTITY_TYPE(remote) != UVC_TT_STREAMING) +		source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)  		       ? (remote->vdev ? &remote->vdev->entity : NULL)  		       : &remote->subdev.entity;  		if (source == NULL) diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c index 8244167c891..ffd1158628b 100644 --- a/drivers/media/video/uvc/uvc_video.c +++ b/drivers/media/video/uvc/uvc_video.c @@ -1104,10 +1104,18 @@ int uvc_video_suspend(struct uvc_streaming *stream)   * buffers, making sure userspace applications are notified of the problem   * instead of waiting forever.   */ -int uvc_video_resume(struct uvc_streaming *stream) +int uvc_video_resume(struct uvc_streaming *stream, int reset)  {  	int ret; +	/* If the bus has been reset on resume, set the alternate setting to 0. +	 * This should be the default value, but some devices crash or otherwise +	 * misbehave if they don't receive a SET_INTERFACE request before any +	 * other video control request. +	 */ +	if (reset) +		usb_set_interface(stream->dev->udev, stream->intfnum, 0); +  	stream->frozen = 0;  	ret = uvc_commit_video(stream, &stream->ctrl); diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h index df32a43ca86..cbdd49bf8b6 100644 --- a/drivers/media/video/uvc/uvcvideo.h +++ b/drivers/media/video/uvc/uvcvideo.h @@ -638,7 +638,7 @@ extern void uvc_mc_cleanup_entity(struct uvc_entity *entity);  /* Video */  extern int uvc_video_init(struct uvc_streaming *stream);  extern int uvc_video_suspend(struct uvc_streaming *stream); -extern int uvc_video_resume(struct uvc_streaming *stream); +extern int uvc_video_resume(struct uvc_streaming *stream, int reset);  extern int uvc_video_enable(struct uvc_streaming *stream, int enable);  extern int uvc_probe_video(struct uvc_streaming *stream,  		struct uvc_streaming_control *probe); diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c index 06f14008b34..a5c9ed128b9 100644 --- a/drivers/media/video/v4l2-dev.c +++ b/drivers/media/video/v4l2-dev.c @@ -173,6 +173,17 @@ static void v4l2_device_release(struct device *cd)  		media_device_unregister_entity(&vdev->entity);  #endif +	/* Do not call v4l2_device_put if there is no release callback set. +	 * Drivers that have no v4l2_device release callback might free the +	 * v4l2_dev instance in the video_device release callback below, so we +	 * must perform this check here. +	 * +	 * TODO: In the long run all drivers that use v4l2_device should use the +	 * v4l2_device release callback. This check will then be unnecessary. +	 */ +	if (v4l2_dev && v4l2_dev->release == NULL) +		v4l2_dev = NULL; +  	/* Release video_device and perform other  	   cleanups as needed. */  	vdev->release(vdev); diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c index c72856c4143..e6a2c3b302d 100644 --- a/drivers/media/video/v4l2-device.c +++ b/drivers/media/video/v4l2-device.c @@ -38,6 +38,7 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)  	mutex_init(&v4l2_dev->ioctl_lock);  	v4l2_prio_init(&v4l2_dev->prio);  	kref_init(&v4l2_dev->ref); +	get_device(dev);  	v4l2_dev->dev = dev;  	if (dev == NULL) {  		/* If dev == NULL, then name must be filled in by the caller */ @@ -93,6 +94,7 @@ void v4l2_device_disconnect(struct v4l2_device *v4l2_dev)  	if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev)  		dev_set_drvdata(v4l2_dev->dev, NULL); +	put_device(v4l2_dev->dev);  	v4l2_dev->dev = NULL;  }  EXPORT_SYMBOL_GPL(v4l2_device_disconnect); diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c index 85d3048c1d6..bb7f17f2a33 100644 --- a/drivers/media/video/via-camera.c +++ b/drivers/media/video/via-camera.c @@ -1332,6 +1332,8 @@ static __devinit bool viacam_serial_is_enabled(void)  	struct pci_bus *pbus = pci_find_bus(0, 0);  	u8 cbyte; +	if (!pbus) +		return false;  	pci_bus_read_config_byte(pbus, VIACAM_SERIAL_DEVFN,  			VIACAM_SERIAL_CREG, &cbyte);  	if ((cbyte & VIACAM_SERIAL_BIT) == 0) diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c index 21131c7b0f1..563654c9b19 100644 --- a/drivers/mfd/jz4740-adc.c +++ b/drivers/mfd/jz4740-adc.c @@ -273,7 +273,7 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev)  	ct->regs.ack = JZ_REG_ADC_STATUS;  	ct->chip.irq_mask = irq_gc_mask_set_bit;  	ct->chip.irq_unmask = irq_gc_mask_clr_bit; -	ct->chip.irq_ack = irq_gc_ack; +	ct->chip.irq_ack = irq_gc_ack_set_bit;  	irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL); diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c index 5d1fca0277e..f83103b8970 100644 --- a/drivers/mfd/max8997.c +++ b/drivers/mfd/max8997.c @@ -135,10 +135,13 @@ static int max8997_i2c_probe(struct i2c_client *i2c,  	max8997->dev = &i2c->dev;  	max8997->i2c = i2c;  	max8997->type = id->driver_data; +	max8997->irq = i2c->irq;  	if (!pdata)  		goto err; +	max8997->irq_base = pdata->irq_base; +	max8997->ono = pdata->ono;  	max8997->wakeup = pdata->wakeup;  	mutex_init(&max8997->iolock); @@ -152,6 +155,8 @@ static int max8997_i2c_probe(struct i2c_client *i2c,  	pm_runtime_set_active(max8997->dev); +	max8997_irq_init(max8997); +  	mfd_add_devices(max8997->dev, -1, max8997_devs,  			ARRAY_SIZE(max8997_devs),  			NULL, 0); diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index 29601e7d606..86e14583a08 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c @@ -17,6 +17,7 @@   * along with this program.  If not, see <http://www.gnu.org/licenses/>.   */  #include <linux/kernel.h> +#include <linux/module.h>  #include <linux/types.h>  #include <linux/slab.h>  #include <linux/delay.h> @@ -676,7 +677,6 @@ static void usbhs_omap_tll_init(struct device *dev, u8 tll_channel_count)  				| OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF  				| OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE); -			reg |= (1 << (i + 1));  		} else  			continue; diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c index 2bfad5c86cc..a56be931551 100644 --- a/drivers/mfd/tps65910-irq.c +++ b/drivers/mfd/tps65910-irq.c @@ -178,8 +178,10 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq,  	switch (tps65910_chip_id(tps65910)) {  	case TPS65910:  		tps65910->irq_num = TPS65910_NUM_IRQ; +		break;  	case TPS65911:  		tps65910->irq_num = TPS65911_NUM_IRQ; +		break;  	}  	/* Register with genirq */ diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c index b5d598c3aa7..7cbf2aa9e64 100644 --- a/drivers/mfd/twl4030-madc.c +++ b/drivers/mfd/twl4030-madc.c @@ -510,8 +510,9 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)  	u8 ch_msb, ch_lsb;  	int ret; -	if (!req) +	if (!req || !twl4030_madc)  		return -EINVAL; +  	mutex_lock(&twl4030_madc->lock);  	if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) {  		ret = -EINVAL; @@ -706,6 +707,8 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev)  	if (!madc)  		return -ENOMEM; +	madc->dev = &pdev->dev; +  	/*  	 * Phoenix provides 2 interrupt lines. The first one is connected to  	 * the OMAP. The other one can be connected to the other processor such diff --git a/drivers/mfd/wm8350-gpio.c b/drivers/mfd/wm8350-gpio.c index ebf99bef392..d584f6b4d6e 100644 --- a/drivers/mfd/wm8350-gpio.c +++ b/drivers/mfd/wm8350-gpio.c @@ -37,7 +37,7 @@ static int gpio_set_dir(struct wm8350 *wm8350, int gpio, int dir)  	return ret;  } -static int gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db) +static int wm8350_gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db)  {  	if (db == WM8350_GPIO_DEBOUNCE_ON)  		return wm8350_set_bits(wm8350, WM8350_GPIO_DEBOUNCE, @@ -210,7 +210,7 @@ int wm8350_gpio_config(struct wm8350 *wm8350, int gpio, int dir, int func,  		goto err;  	if (gpio_set_polarity(wm8350, gpio, pol))  		goto err; -	if (gpio_set_debounce(wm8350, gpio, debounce)) +	if (wm8350_gpio_set_debounce(wm8350, gpio, debounce))  		goto err;  	if (gpio_set_dir(wm8350, gpio, dir))  		goto err; diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 0a4d86c6c4a..2d6423c2d19 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -146,6 +146,7 @@ config PHANTOM  config INTEL_MID_PTI  	tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard" +	depends on PCI  	default n  	help  	  The PTI (Parallel Trace Interface) driver directs diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c index 54e3d05b63c..35903154ca2 100644 --- a/drivers/misc/ab8500-pwm.c +++ b/drivers/misc/ab8500-pwm.c @@ -164,5 +164,5 @@ subsys_initcall(ab8500_pwm_init);  module_exit(ab8500_pwm_exit);  MODULE_AUTHOR("Arun MURTHY <arun.murthy@stericsson.com>");  MODULE_DESCRIPTION("AB8500 Pulse Width Modulation Driver"); -MODULE_ALIAS("AB8500 PWM driver"); +MODULE_ALIAS("platform:ab8500-pwm");  MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/fsa9480.c b/drivers/misc/fsa9480.c index 5325a7e70dc..27dc0d21aaf 100644 --- a/drivers/misc/fsa9480.c +++ b/drivers/misc/fsa9480.c @@ -455,7 +455,7 @@ static int __devinit fsa9480_probe(struct i2c_client *client,  fail2:  	if (client->irq) -		free_irq(client->irq, NULL); +		free_irq(client->irq, usbsw);  fail1:  	i2c_set_clientdata(client, NULL);  	kfree(usbsw); @@ -466,7 +466,7 @@ static int __devexit fsa9480_remove(struct i2c_client *client)  {  	struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client);  	if (client->irq) -		free_irq(client->irq, NULL); +		free_irq(client->irq, usbsw);  	i2c_set_clientdata(client, NULL);  	sysfs_remove_group(&client->dev.kobj, &fsa9480_group); diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c index b928bc14e97..8b51cd62d06 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d.c +++ b/drivers/misc/lis3lv02d/lis3lv02d.c @@ -375,12 +375,14 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3)  	 *      both have been read. So the value read will always be correct.  	 * Set BOOT bit to refresh factory tuning values.  	 */ -	lis3->read(lis3, CTRL_REG2, ®); -	if (lis3->whoami ==  WAI_12B) -		reg |= CTRL2_BDU | CTRL2_BOOT; -	else -		reg |= CTRL2_BOOT_8B; -	lis3->write(lis3, CTRL_REG2, reg); +	if (lis3->pdata) { +		lis3->read(lis3, CTRL_REG2, ®); +		if (lis3->whoami ==  WAI_12B) +			reg |= CTRL2_BDU | CTRL2_BOOT; +		else +			reg |= CTRL2_BOOT_8B; +		lis3->write(lis3, CTRL_REG2, reg); +	}  	/* LIS3 power on delay is quite long */  	msleep(lis3->pwron_delay / lis3lv02d_get_odr()); diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c index 8653bd0b1a3..0b56e3f4357 100644 --- a/drivers/misc/pti.c +++ b/drivers/misc/pti.c @@ -33,6 +33,8 @@  #include <linux/mutex.h>  #include <linux/miscdevice.h>  #include <linux/pti.h> +#include <linux/slab.h> +#include <linux/uaccess.h>  #define DRIVERNAME		"pti"  #define PCINAME			"pciPTI" @@ -163,6 +165,11 @@ static void pti_write_to_aperture(struct pti_masterchannel *mc,  static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc,  					     const char *thread_name)  { +	/* +	 * Since we access the comm member in current's task_struct, we only +	 * need to be as large as what 'comm' in that structure is. +	 */ +	char comm[TASK_COMM_LEN];  	struct pti_masterchannel mccontrol = {.master = CONTROL_ID,  					      .channel = 0};  	const char *thread_name_p; @@ -170,13 +177,6 @@ static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc,  	u8 control_frame[CONTROL_FRAME_LEN];  	if (!thread_name) { -		/* -		 * Since we access the comm member in current's task_struct, -		 * we only need to be as large as what 'comm' in that -		 * structure is. -		 */ -		char comm[TASK_COMM_LEN]; -  		if (!in_interrupt())  			get_task_comm(comm, current);  		else diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c index 54c91ffe4a9..ba168a7d54d 100644 --- a/drivers/misc/ti-st/st_core.c +++ b/drivers/misc/ti-st/st_core.c @@ -338,6 +338,12 @@ void st_int_recv(void *disc_data,  			/* Unknow packet? */  		default:  			type = *ptr; +			if (st_gdata->list[type] == NULL) { +				pr_err("chip/interface misbehavior dropping" +					" frame starting with 0x%02x", type); +				goto done; + +			}  			st_gdata->rx_skb = alloc_skb(  					st_gdata->list[type]->max_frame_size,  					GFP_ATOMIC); @@ -354,6 +360,7 @@ void st_int_recv(void *disc_data,  		ptr++;  		count--;  	} +done:  	spin_unlock_irqrestore(&st_gdata->lock, flags);  	pr_debug("done %s", __func__);  	return; @@ -717,9 +724,10 @@ static void st_tty_close(struct tty_struct *tty)  	 */  	spin_lock_irqsave(&st_gdata->lock, flags);  	for (i = ST_BT; i < ST_MAX_CHANNELS; i++) { -		if (st_gdata->list[i] != NULL) +		if (st_gdata->is_registered[i] == true)  			pr_err("%d not un-registered", i);  		st_gdata->list[i] = NULL; +		st_gdata->is_registered[i] = false;  	}  	st_gdata->protos_registered = 0;  	spin_unlock_irqrestore(&st_gdata->lock, flags); diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c index 38fd2f04c07..3a3580566df 100644 --- a/drivers/misc/ti-st/st_kim.c +++ b/drivers/misc/ti-st/st_kim.c @@ -68,6 +68,7 @@ void validate_firmware_response(struct kim_data_s *kim_gdata)  	if (unlikely(skb->data[5] != 0)) {  		pr_err("no proper response during fw download");  		pr_err("data6 %x", skb->data[5]); +		kfree_skb(skb);  		return;		/* keep waiting for the proper response */  	}  	/* becos of all the script being downloaded */ @@ -210,6 +211,7 @@ static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)  		pr_err(" waiting for ver info- timed out ");  		return -ETIMEDOUT;  	} +	INIT_COMPLETION(kim_gdata->kim_rcvd);  	version =  		MAKEWORD(kim_gdata->resp_buffer[13], @@ -298,6 +300,7 @@ static long download_firmware(struct kim_data_s *kim_gdata)  		switch (((struct bts_action *)ptr)->type) {  		case ACTION_SEND_COMMAND:	/* action send */ +			pr_debug("S");  			action_ptr = &(((struct bts_action *)ptr)->data[0]);  			if (unlikely  			    (((struct hci_command *)action_ptr)->opcode == @@ -335,6 +338,10 @@ static long download_firmware(struct kim_data_s *kim_gdata)  				release_firmware(kim_gdata->fw_entry);  				return -ETIMEDOUT;  			} +			/* reinit completion before sending for the +			 * relevant wait +			 */ +			INIT_COMPLETION(kim_gdata->kim_rcvd);  			/*  			 * Free space found in uart buffer, call st_int_write @@ -361,6 +368,7 @@ static long download_firmware(struct kim_data_s *kim_gdata)  			}  			break;  		case ACTION_WAIT_EVENT:  /* wait */ +			pr_debug("W");  			if (!wait_for_completion_timeout  					(&kim_gdata->kim_rcvd,  					 msecs_to_jiffies(CMD_RESP_TIME))) { @@ -434,11 +442,17 @@ long st_kim_start(void *kim_data)  {  	long err = 0;  	long retry = POR_RETRY_COUNT; +	struct ti_st_plat_data	*pdata;  	struct kim_data_s	*kim_gdata = (struct kim_data_s *)kim_data;  	pr_info(" %s", __func__); +	pdata = kim_gdata->kim_pdev->dev.platform_data;  	do { +		/* platform specific enabling code here */ +		if (pdata->chip_enable) +			pdata->chip_enable(kim_gdata); +  		/* Configure BT nShutdown to HIGH state */  		gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);  		mdelay(5);	/* FIXME: a proper toggle */ @@ -460,6 +474,12 @@ long st_kim_start(void *kim_data)  			pr_info("ldisc_install = 0");  			sysfs_notify(&kim_gdata->kim_pdev->dev.kobj,  					NULL, "install"); +			/* the following wait is never going to be completed, +			 * since the ldisc was never installed, hence serving +			 * as a mdelay of LDISC_TIME msecs */ +			err = wait_for_completion_timeout +				(&kim_gdata->ldisc_installed, +				 msecs_to_jiffies(LDISC_TIME));  			err = -ETIMEDOUT;  			continue;  		} else { @@ -472,6 +492,13 @@ long st_kim_start(void *kim_data)  				pr_info("ldisc_install = 0");  				sysfs_notify(&kim_gdata->kim_pdev->dev.kobj,  						NULL, "install"); +				/* this wait might be completed, though in the +				 * tty_close() since the ldisc is already +				 * installed */ +				err = wait_for_completion_timeout +					(&kim_gdata->ldisc_installed, +					 msecs_to_jiffies(LDISC_TIME)); +				err = -EINVAL;  				continue;  			} else {	/* on success don't retry */  				break; @@ -489,6 +516,8 @@ long st_kim_stop(void *kim_data)  {  	long err = 0;  	struct kim_data_s	*kim_gdata = (struct kim_data_s *)kim_data; +	struct ti_st_plat_data	*pdata = +		kim_gdata->kim_pdev->dev.platform_data;  	INIT_COMPLETION(kim_gdata->ldisc_installed); @@ -515,6 +544,10 @@ long st_kim_stop(void *kim_data)  	gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);  	mdelay(1);  	gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); + +	/* platform specific disable */ +	if (pdata->chip_disable) +		pdata->chip_disable(kim_gdata);  	return err;  } diff --git a/drivers/misc/ti-st/st_ll.c b/drivers/misc/ti-st/st_ll.c index 3f249513885..1ff460a8e9c 100644 --- a/drivers/misc/ti-st/st_ll.c +++ b/drivers/misc/ti-st/st_ll.c @@ -22,6 +22,7 @@  #define pr_fmt(fmt) "(stll) :" fmt  #include <linux/skbuff.h>  #include <linux/module.h> +#include <linux/platform_device.h>  #include <linux/ti_wilink_st.h>  /**********************************************************************/ @@ -37,6 +38,9 @@ static void send_ll_cmd(struct st_data_s *st_data,  static void ll_device_want_to_sleep(struct st_data_s *st_data)  { +	struct kim_data_s	*kim_data; +	struct ti_st_plat_data	*pdata; +  	pr_debug("%s", __func__);  	/* sanity check */  	if (st_data->ll_state != ST_LL_AWAKE) @@ -46,10 +50,19 @@ static void ll_device_want_to_sleep(struct st_data_s *st_data)  	send_ll_cmd(st_data, LL_SLEEP_ACK);  	/* update state */  	st_data->ll_state = ST_LL_ASLEEP; + +	/* communicate to platform about chip asleep */ +	kim_data = st_data->kim_data; +	pdata = kim_data->kim_pdev->dev.platform_data; +	if (pdata->chip_asleep) +		pdata->chip_asleep(NULL);  }  static void ll_device_want_to_wakeup(struct st_data_s *st_data)  { +	struct kim_data_s	*kim_data; +	struct ti_st_plat_data	*pdata; +  	/* diff actions in diff states */  	switch (st_data->ll_state) {  	case ST_LL_ASLEEP: @@ -70,6 +83,12 @@ static void ll_device_want_to_wakeup(struct st_data_s *st_data)  	}  	/* update state */  	st_data->ll_state = ST_LL_AWAKE; + +	/* communicate to platform about chip wakeup */ +	kim_data = st_data->kim_data; +	pdata = kim_data->kim_pdev->dev.platform_data; +	if (pdata->chip_asleep) +		pdata->chip_awake(NULL);  }  /**********************************************************************/ diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 1ff5486213f..4c1a648d00f 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -926,6 +926,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,  	/*  	 * Reliable writes are used to implement Forced Unit Access and  	 * REQ_META accesses, and are supported only on MMCs. +	 * +	 * XXX: this really needs a good explanation of why REQ_META +	 * is treated special.  	 */  	bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||  			  (req->cmd_flags & REQ_META)) && diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 91a0a7460eb..b27b94078c2 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -133,7 +133,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)  		if (mrq->done)  			mrq->done(mrq); -		mmc_host_clk_gate(host); +		mmc_host_clk_release(host);  	}  } @@ -192,7 +192,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)  			mrq->stop->mrq = mrq;  		}  	} -	mmc_host_clk_ungate(host); +	mmc_host_clk_hold(host);  	led_trigger_event(host->led, LED_FULL);  	host->ops->request(host, mrq);  } @@ -728,15 +728,17 @@ static inline void mmc_set_ios(struct mmc_host *host)   */  void mmc_set_chip_select(struct mmc_host *host, int mode)  { +	mmc_host_clk_hold(host);  	host->ios.chip_select = mode;  	mmc_set_ios(host); +	mmc_host_clk_release(host);  }  /*   * Sets the host clock to the highest possible frequency that   * is below "hz".   */ -void mmc_set_clock(struct mmc_host *host, unsigned int hz) +static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)  {  	WARN_ON(hz < host->f_min); @@ -747,6 +749,13 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)  	mmc_set_ios(host);  } +void mmc_set_clock(struct mmc_host *host, unsigned int hz) +{ +	mmc_host_clk_hold(host); +	__mmc_set_clock(host, hz); +	mmc_host_clk_release(host); +} +  #ifdef CONFIG_MMC_CLKGATE  /*   * This gates the clock by setting it to 0 Hz. @@ -779,7 +788,7 @@ void mmc_ungate_clock(struct mmc_host *host)  	if (host->clk_old) {  		BUG_ON(host->ios.clock);  		/* This call will also set host->clk_gated to false */ -		mmc_set_clock(host, host->clk_old); +		__mmc_set_clock(host, host->clk_old);  	}  } @@ -807,8 +816,10 @@ void mmc_set_ungated(struct mmc_host *host)   */  void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)  { +	mmc_host_clk_hold(host);  	host->ios.bus_mode = mode;  	mmc_set_ios(host); +	mmc_host_clk_release(host);  }  /* @@ -816,8 +827,10 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)   */  void mmc_set_bus_width(struct mmc_host *host, unsigned int width)  { +	mmc_host_clk_hold(host);  	host->ios.bus_width = width;  	mmc_set_ios(host); +	mmc_host_clk_release(host);  }  /** @@ -1015,8 +1028,10 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)  		ocr &= 3 << bit; +		mmc_host_clk_hold(host);  		host->ios.vdd = bit;  		mmc_set_ios(host); +		mmc_host_clk_release(host);  	} else {  		pr_warning("%s: host doesn't support card's voltages\n",  				mmc_hostname(host)); @@ -1063,8 +1078,10 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11   */  void mmc_set_timing(struct mmc_host *host, unsigned int timing)  { +	mmc_host_clk_hold(host);  	host->ios.timing = timing;  	mmc_set_ios(host); +	mmc_host_clk_release(host);  }  /* @@ -1072,8 +1089,10 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)   */  void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)  { +	mmc_host_clk_hold(host);  	host->ios.drv_type = drv_type;  	mmc_set_ios(host); +	mmc_host_clk_release(host);  }  /* @@ -1091,6 +1110,8 @@ static void mmc_power_up(struct mmc_host *host)  {  	int bit; +	mmc_host_clk_hold(host); +  	/* If ocr is set, we use it */  	if (host->ocr)  		bit = ffs(host->ocr) - 1; @@ -1126,10 +1147,14 @@ static void mmc_power_up(struct mmc_host *host)  	 * time required to reach a stable voltage.  	 */  	mmc_delay(10); + +	mmc_host_clk_release(host);  }  static void mmc_power_off(struct mmc_host *host)  { +	mmc_host_clk_hold(host); +  	host->ios.clock = 0;  	host->ios.vdd = 0; @@ -1147,6 +1172,8 @@ static void mmc_power_off(struct mmc_host *host)  	host->ios.bus_width = MMC_BUS_WIDTH_1;  	host->ios.timing = MMC_TIMING_LEGACY;  	mmc_set_ios(host); + +	mmc_host_clk_release(host);  }  /* diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index b29d3e8fd3a..793d0a0dad8 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -119,14 +119,14 @@ static void mmc_host_clk_gate_work(struct work_struct *work)  }  /** - *	mmc_host_clk_ungate - ungate hardware MCI clocks + *	mmc_host_clk_hold - ungate hardware MCI clocks   *	@host: host to ungate.   *   *	Makes sure the host ios.clock is restored to a non-zero value   *	past this call.	Increase clock reference count and ungate clock   *	if we're the first user.   */ -void mmc_host_clk_ungate(struct mmc_host *host) +void mmc_host_clk_hold(struct mmc_host *host)  {  	unsigned long flags; @@ -164,14 +164,14 @@ static bool mmc_host_may_gate_card(struct mmc_card *card)  }  /** - *	mmc_host_clk_gate - gate off hardware MCI clocks + *	mmc_host_clk_release - gate off hardware MCI clocks   *	@host: host to gate.   *   *	Calls the host driver with ios.clock set to zero as often as possible   *	in order to gate off hardware MCI clocks. Decrease clock reference   *	count and schedule disabling of clock.   */ -void mmc_host_clk_gate(struct mmc_host *host) +void mmc_host_clk_release(struct mmc_host *host)  {  	unsigned long flags; @@ -179,7 +179,7 @@ void mmc_host_clk_gate(struct mmc_host *host)  	host->clk_requests--;  	if (mmc_host_may_gate_card(host->card) &&  	    !host->clk_requests) -		schedule_work(&host->clk_gate_work); +		queue_work(system_nrt_wq, &host->clk_gate_work);  	spin_unlock_irqrestore(&host->clk_lock, flags);  } @@ -231,7 +231,7 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)  	if (cancel_work_sync(&host->clk_gate_work))  		mmc_host_clk_gate_delayed(host);  	if (host->clk_gated) -		mmc_host_clk_ungate(host); +		mmc_host_clk_hold(host);  	/* There should be only one user now */  	WARN_ON(host->clk_requests > 1);  } diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h index de199f91192..fb8a5cd2e4a 100644 --- a/drivers/mmc/core/host.h +++ b/drivers/mmc/core/host.h @@ -16,16 +16,16 @@ int mmc_register_host_class(void);  void mmc_unregister_host_class(void);  #ifdef CONFIG_MMC_CLKGATE -void mmc_host_clk_ungate(struct mmc_host *host); -void mmc_host_clk_gate(struct mmc_host *host); +void mmc_host_clk_hold(struct mmc_host *host); +void mmc_host_clk_release(struct mmc_host *host);  unsigned int mmc_host_clk_rate(struct mmc_host *host);  #else -static inline void mmc_host_clk_ungate(struct mmc_host *host) +static inline void mmc_host_clk_hold(struct mmc_host *host)  {  } -static inline void mmc_host_clk_gate(struct mmc_host *host) +static inline void mmc_host_clk_release(struct mmc_host *host)  {  } diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 633975ff2bb..0370e03e314 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -469,56 +469,75 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status)  	return 0;  } -static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status) +static void sd_update_bus_speed_mode(struct mmc_card *card)  { -	unsigned int bus_speed = 0, timing = 0; -	int err; -  	/*  	 * If the host doesn't support any of the UHS-I modes, fallback on  	 * default speed.  	 */  	if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | -	    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) -		return 0; +	    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) { +		card->sd_bus_speed = 0; +		return; +	}  	if ((card->host->caps & MMC_CAP_UHS_SDR104) &&  	    (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) { -			bus_speed = UHS_SDR104_BUS_SPEED; -			timing = MMC_TIMING_UHS_SDR104; -			card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR; +			card->sd_bus_speed = UHS_SDR104_BUS_SPEED;  	} else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&  		   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) { -			bus_speed = UHS_DDR50_BUS_SPEED; -			timing = MMC_TIMING_UHS_DDR50; -			card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR; +			card->sd_bus_speed = UHS_DDR50_BUS_SPEED;  	} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |  		    MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &  		    SD_MODE_UHS_SDR50)) { -			bus_speed = UHS_SDR50_BUS_SPEED; -			timing = MMC_TIMING_UHS_SDR50; -			card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR; +			card->sd_bus_speed = UHS_SDR50_BUS_SPEED;  	} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |  		    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&  		   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) { -			bus_speed = UHS_SDR25_BUS_SPEED; -			timing = MMC_TIMING_UHS_SDR25; -			card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR; +			card->sd_bus_speed = UHS_SDR25_BUS_SPEED;  	} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |  		    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |  		    MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &  		    SD_MODE_UHS_SDR12)) { -			bus_speed = UHS_SDR12_BUS_SPEED; -			timing = MMC_TIMING_UHS_SDR12; -			card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR; +			card->sd_bus_speed = UHS_SDR12_BUS_SPEED; +	} +} + +static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status) +{ +	int err; +	unsigned int timing = 0; + +	switch (card->sd_bus_speed) { +	case UHS_SDR104_BUS_SPEED: +		timing = MMC_TIMING_UHS_SDR104; +		card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR; +		break; +	case UHS_DDR50_BUS_SPEED: +		timing = MMC_TIMING_UHS_DDR50; +		card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR; +		break; +	case UHS_SDR50_BUS_SPEED: +		timing = MMC_TIMING_UHS_SDR50; +		card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR; +		break; +	case UHS_SDR25_BUS_SPEED: +		timing = MMC_TIMING_UHS_SDR25; +		card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR; +		break; +	case UHS_SDR12_BUS_SPEED: +		timing = MMC_TIMING_UHS_SDR12; +		card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR; +		break; +	default: +		return 0;  	} -	card->sd_bus_speed = bus_speed; -	err = mmc_sd_switch(card, 1, 0, bus_speed, status); +	err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status);  	if (err)  		return err; -	if ((status[16] & 0xF) != bus_speed) +	if ((status[16] & 0xF) != card->sd_bus_speed)  		printk(KERN_WARNING "%s: Problem setting bus speed mode!\n",  			mmc_hostname(card->host));  	else { @@ -618,18 +637,24 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)  		mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);  	} +	/* +	 * Select the bus speed mode depending on host +	 * and card capability. +	 */ +	sd_update_bus_speed_mode(card); +  	/* Set the driver strength for the card */  	err = sd_select_driver_type(card, status);  	if (err)  		goto out; -	/* Set bus speed mode of the card */ -	err = sd_set_bus_speed_mode(card, status); +	/* Set current limit for the card */ +	err = sd_set_current_limit(card, status);  	if (err)  		goto out; -	/* Set current limit for the card */ -	err = sd_set_current_limit(card, status); +	/* Set bus speed mode of the card */ +	err = sd_set_bus_speed_mode(card, status);  	if (err)  		goto out; diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 0e9780f5a4a..4dc0028086a 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -16,6 +16,7 @@  #include <linux/err.h>  #include <linux/clk.h>  #include <linux/gpio.h> +#include <linux/module.h>  #include <linux/slab.h>  #include <linux/mmc/host.h>  #include <linux/mmc/mmc.h> diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 2bd7bf4fece..fe886d6c474 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -302,6 +302,8 @@ static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)  		ctrl &= ~SDHCI_CTRL_8BITBUS;  		break;  	default: +		ctrl &= ~SDHCI_CTRL_4BITBUS; +		ctrl &= ~SDHCI_CTRL_8BITBUS;  		break;  	} diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c index 774f6439d7c..0c4a672f5db 100644 --- a/drivers/mmc/host/sh_mobile_sdhi.c +++ b/drivers/mmc/host/sh_mobile_sdhi.c @@ -120,11 +120,11 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)  	mmc_data->hclk = clk_get_rate(priv->clk);  	mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;  	mmc_data->get_cd = sh_mobile_sdhi_get_cd; -	if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT) -		mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;  	mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;  	if (p) {  		mmc_data->flags = p->tmio_flags; +		if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT) +			mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;  		mmc_data->ocr_mask = p->tmio_ocr_mask;  		mmc_data->capabilities |= p->tmio_caps; diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index 4be8373d43e..66b616ebe53 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig @@ -142,7 +142,7 @@ config MTD_OF_PARTS  	help  	  This provides a partition parsing function which derives  	  the partition map from the children of the flash node, -	  as described in Documentation/powerpc/booting-without-of.txt. +	  as described in Documentation/devicetree/booting-without-of.txt.  config MTD_AR7_PARTS  	tristate "TI AR7 partitioning support" diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h index 65b5b76cc37..64fbb002182 100644 --- a/drivers/mtd/ubi/debug.h +++ b/drivers/mtd/ubi/debug.h @@ -181,7 +181,7 @@ static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)  #define ubi_dbg_msg(fmt, ...) do {                                           \  	if (0)                                                               \ -		pr_debug(fmt "\n", ##__VA_ARGS__);                           \ +		printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__);                  \  } while (0)  #define dbg_msg(fmt, ...)  ubi_dbg_msg(fmt, ##__VA_ARGS__) diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 8d0314dbd94..ddd63e7cf4a 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1063,8 +1063,7 @@ config SMSC911X  	  Say Y here if you want support for SMSC LAN911x and LAN921x families  	  of ethernet controllers. -	  To compile this driver as a module, choose M here and read -	  <file:Documentation/networking/net-modules.txt>. The module +	  To compile this driver as a module, choose M here. The module  	  will be called smsc911x.  config SMSC911X_ARCH_HOOKS @@ -2535,7 +2534,7 @@ config S6GMAC  source "drivers/net/stmmac/Kconfig"  config PCH_GBE -	tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE" +	tristate "Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"  	depends on PCI  	select MII  	---help--- @@ -2548,10 +2547,11 @@ config PCH_GBE  	  This driver enables Gigabit Ethernet function.  	  This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ -	  Output Hub), ML7223. -	  ML7223 IOH is for MP(Media Phone) use. -	  ML7223 is companion chip for Intel Atom E6xx series. -	  ML7223 is completely compatible for Intel EG20T PCH. +	  Output Hub), ML7223/ML7831. +	  ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general +	  purpose use. +	  ML7223/ML7831 is companion chip for Intel Atom E6xx series. +	  ML7223/ML7831 is completely compatible for Intel EG20T PCH.  config FTGMAC100  	tristate "Faraday FTGMAC100 Gigabit Ethernet support" diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c index 52fe21e1e2c..3b1416e3d21 100644 --- a/drivers/net/arm/am79c961a.c +++ b/drivers/net/arm/am79c961a.c @@ -308,8 +308,11 @@ static void am79c961_timer(unsigned long data)  	struct net_device *dev = (struct net_device *)data;  	struct dev_priv *priv = netdev_priv(dev);  	unsigned int lnkstat, carrier; +	unsigned long flags; +	spin_lock_irqsave(&priv->chip_lock, flags);  	lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST; +	spin_unlock_irqrestore(&priv->chip_lock, flags);  	carrier = netif_carrier_ok(dev);  	if (lnkstat && !carrier) { diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c index c827a6097d0..8d0baeceaaa 100644 --- a/drivers/net/arm/ks8695net.c +++ b/drivers/net/arm/ks8695net.c @@ -414,7 +414,7 @@ ks8695_tx_irq(int irq, void *dev_id)   *    Interrupt Status Register (Offset 0xF208)   *        Bit29: WAN MAC Receive Status   *        Bit16: LAN MAC Receive Status - *    So, this Rx interrrupt enable/status bit number is equal + *    So, this Rx interrupt enable/status bit number is equal   *    as Rx IRQ number.   */  static inline u32 ks8695_get_rx_enable_bit(struct ks8695_priv *ksp) diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 97e6954304e..9f3e5306ef7 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -858,7 +858,7 @@ static s32 atl1_init_hw(struct atl1_hw *hw)  	atl1_init_flash_opcode(hw);  	if (!hw->phy_configured) { -		/* enable GPHY LinkChange Interrrupt */ +		/* enable GPHY LinkChange Interrupt */  		ret_val = atl1_write_phy_reg(hw, 18, 0xC00);  		if (ret_val)  			return ret_val; diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index fc50d4267df..99d31a7d6aa 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h @@ -5617,7 +5617,7 @@ struct l2_fhdr {  #define BNX2_TXP_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)  #define BNX2_TXP_CPU_STATE_SOFT_HALTED			 (1L<<10)  #define BNX2_TXP_CPU_STATE_SPAD_UNDERFLOW		 (1L<<11) -#define BNX2_TXP_CPU_STATE_INTERRRUPT			 (1L<<12) +#define BNX2_TXP_CPU_STATE_INTERRUPT			 (1L<<12)  #define BNX2_TXP_CPU_STATE_DATA_ACCESS_STALL		 (1L<<14)  #define BNX2_TXP_CPU_STATE_INST_FETCH_STALL		 (1L<<15)  #define BNX2_TXP_CPU_STATE_BLOCKED_READ			 (1L<<31) @@ -5712,7 +5712,7 @@ struct l2_fhdr {  #define BNX2_TPAT_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)  #define BNX2_TPAT_CPU_STATE_SOFT_HALTED			 (1L<<10)  #define BNX2_TPAT_CPU_STATE_SPAD_UNDERFLOW		 (1L<<11) -#define BNX2_TPAT_CPU_STATE_INTERRRUPT			 (1L<<12) +#define BNX2_TPAT_CPU_STATE_INTERRUPT			 (1L<<12)  #define BNX2_TPAT_CPU_STATE_DATA_ACCESS_STALL		 (1L<<14)  #define BNX2_TPAT_CPU_STATE_INST_FETCH_STALL		 (1L<<15)  #define BNX2_TPAT_CPU_STATE_BLOCKED_READ		 (1L<<31) @@ -5807,7 +5807,7 @@ struct l2_fhdr {  #define BNX2_RXP_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)  #define BNX2_RXP_CPU_STATE_SOFT_HALTED			 (1L<<10)  #define BNX2_RXP_CPU_STATE_SPAD_UNDERFLOW		 (1L<<11) -#define BNX2_RXP_CPU_STATE_INTERRRUPT			 (1L<<12) +#define BNX2_RXP_CPU_STATE_INTERRUPT			 (1L<<12)  #define BNX2_RXP_CPU_STATE_DATA_ACCESS_STALL		 (1L<<14)  #define BNX2_RXP_CPU_STATE_INST_FETCH_STALL		 (1L<<15)  #define BNX2_RXP_CPU_STATE_BLOCKED_READ			 (1L<<31) @@ -5953,7 +5953,7 @@ struct l2_fhdr {  #define BNX2_COM_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)  #define BNX2_COM_CPU_STATE_SOFT_HALTED			 (1L<<10)  #define BNX2_COM_CPU_STATE_SPAD_UNDERFLOW		 (1L<<11) -#define BNX2_COM_CPU_STATE_INTERRRUPT			 (1L<<12) +#define BNX2_COM_CPU_STATE_INTERRUPT			 (1L<<12)  #define BNX2_COM_CPU_STATE_DATA_ACCESS_STALL		 (1L<<14)  #define BNX2_COM_CPU_STATE_INST_FETCH_STALL		 (1L<<15)  #define BNX2_COM_CPU_STATE_BLOCKED_READ			 (1L<<31) @@ -6119,7 +6119,7 @@ struct l2_fhdr {  #define BNX2_CP_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)  #define BNX2_CP_CPU_STATE_SOFT_HALTED			 (1L<<10)  #define BNX2_CP_CPU_STATE_SPAD_UNDERFLOW		 (1L<<11) -#define BNX2_CP_CPU_STATE_INTERRRUPT			 (1L<<12) +#define BNX2_CP_CPU_STATE_INTERRUPT			 (1L<<12)  #define BNX2_CP_CPU_STATE_DATA_ACCESS_STALL		 (1L<<14)  #define BNX2_CP_CPU_STATE_INST_FETCH_STALL		 (1L<<15)  #define BNX2_CP_CPU_STATE_BLOCKED_READ			 (1L<<31) @@ -6291,7 +6291,7 @@ struct l2_fhdr {  #define BNX2_MCP_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)  #define BNX2_MCP_CPU_STATE_SOFT_HALTED			 (1L<<10)  #define BNX2_MCP_CPU_STATE_SPAD_UNDERFLOW		 (1L<<11) -#define BNX2_MCP_CPU_STATE_INTERRRUPT			 (1L<<12) +#define BNX2_MCP_CPU_STATE_INTERRUPT			 (1L<<12)  #define BNX2_MCP_CPU_STATE_DATA_ACCESS_STALL		 (1L<<14)  #define BNX2_MCP_CPU_STATE_INST_FETCH_STALL		 (1L<<15)  #define BNX2_MCP_CPU_STATE_BLOCKED_READ			 (1L<<31) diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index c423504a755..9a7eb3b36cf 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -239,13 +239,19 @@ void bnx2x_int_disable(struct bnx2x *bp);   *  FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X   *   */ -/* iSCSI L2 */ -#define BNX2X_ISCSI_ETH_CL_ID_IDX	1 -#define BNX2X_ISCSI_ETH_CID		49 +enum { +	BNX2X_ISCSI_ETH_CL_ID_IDX, +	BNX2X_FCOE_ETH_CL_ID_IDX, +	BNX2X_MAX_CNIC_ETH_CL_ID_IDX, +}; -/* FCoE L2 */ -#define BNX2X_FCOE_ETH_CL_ID_IDX	2 -#define BNX2X_FCOE_ETH_CID		50 +#define BNX2X_CNIC_START_ETH_CID	48 +enum { +	/* iSCSI L2 */ +	BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID, +	/* FCoE L2 */ +	BNX2X_FCOE_ETH_CID, +};  /** Additional rings budgeting */  #ifdef BCM_CNIC @@ -315,6 +321,14 @@ union db_prod {  	u32		raw;  }; +/* dropless fc FW/HW related params */ +#define BRB_SIZE(bp)		(CHIP_IS_E3(bp) ? 1024 : 512) +#define MAX_AGG_QS(bp)		(CHIP_IS_E1(bp) ? \ +					ETH_MAX_AGGREGATION_QUEUES_E1 :\ +					ETH_MAX_AGGREGATION_QUEUES_E1H_E2) +#define FW_DROP_LEVEL(bp)	(3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp)) +#define FW_PREFETCH_CNT		16 +#define DROPLESS_FC_HEADROOM	100  /* MC hsi */  #define BCM_PAGE_SHIFT		12 @@ -331,15 +345,35 @@ union db_prod {  /* SGE ring related macros */  #define NUM_RX_SGE_PAGES	2  #define RX_SGE_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) -#define MAX_RX_SGE_CNT		(RX_SGE_CNT - 2) +#define NEXT_PAGE_SGE_DESC_CNT	2 +#define MAX_RX_SGE_CNT		(RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT)  /* RX_SGE_CNT is promised to be a power of 2 */  #define RX_SGE_MASK		(RX_SGE_CNT - 1)  #define NUM_RX_SGE		(RX_SGE_CNT * NUM_RX_SGE_PAGES)  #define MAX_RX_SGE		(NUM_RX_SGE - 1)  #define NEXT_SGE_IDX(x)		((((x) & RX_SGE_MASK) == \ -				  (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1) +				  (MAX_RX_SGE_CNT - 1)) ? \ +					(x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \ +					(x) + 1)  #define RX_SGE(x)		((x) & MAX_RX_SGE) +/* + * Number of required  SGEs is the sum of two: + * 1. Number of possible opened aggregations (next packet for + *    these aggregations will probably consume SGE immidiatelly) + * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only + *    after placement on BD for new TPA aggregation) + * + * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page + */ +#define NUM_SGE_REQ		(MAX_AGG_QS(bp) + \ +					(BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2) +#define NUM_SGE_PG_REQ		((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \ +						MAX_RX_SGE_CNT) +#define SGE_TH_LO(bp)		(NUM_SGE_REQ + \ +				 NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT) +#define SGE_TH_HI(bp)		(SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM) +  /* Manipulate a bit vector defined as an array of u64 */  /* Number of bits in one sge_mask array element */ @@ -551,24 +585,43 @@ struct bnx2x_fastpath {  #define NUM_TX_RINGS		16  #define TX_DESC_CNT		(BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) -#define MAX_TX_DESC_CNT		(TX_DESC_CNT - 1) +#define NEXT_PAGE_TX_DESC_CNT	1 +#define MAX_TX_DESC_CNT		(TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT)  #define NUM_TX_BD		(TX_DESC_CNT * NUM_TX_RINGS)  #define MAX_TX_BD		(NUM_TX_BD - 1)  #define MAX_TX_AVAIL		(MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)  #define NEXT_TX_IDX(x)		((((x) & MAX_TX_DESC_CNT) == \ -				  (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) +				  (MAX_TX_DESC_CNT - 1)) ? \ +					(x) + 1 + NEXT_PAGE_TX_DESC_CNT : \ +					(x) + 1)  #define TX_BD(x)		((x) & MAX_TX_BD)  #define TX_BD_POFF(x)		((x) & MAX_TX_DESC_CNT)  /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */  #define NUM_RX_RINGS		8  #define RX_DESC_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) -#define MAX_RX_DESC_CNT		(RX_DESC_CNT - 2) +#define NEXT_PAGE_RX_DESC_CNT	2 +#define MAX_RX_DESC_CNT		(RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT)  #define RX_DESC_MASK		(RX_DESC_CNT - 1)  #define NUM_RX_BD		(RX_DESC_CNT * NUM_RX_RINGS)  #define MAX_RX_BD		(NUM_RX_BD - 1)  #define MAX_RX_AVAIL		(MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) -#define MIN_RX_AVAIL		128 + +/* dropless fc calculations for BDs + * + * Number of BDs should as number of buffers in BRB: + * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT + * "next" elements on each page + */ +#define NUM_BD_REQ		BRB_SIZE(bp) +#define NUM_BD_PG_REQ		((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \ +					      MAX_RX_DESC_CNT) +#define BD_TH_LO(bp)		(NUM_BD_REQ + \ +				 NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \ +				 FW_DROP_LEVEL(bp)) +#define BD_TH_HI(bp)		(BD_TH_LO(bp) + DROPLESS_FC_HEADROOM) + +#define MIN_RX_AVAIL		((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128)  #define MIN_RX_SIZE_TPA_HW	(CHIP_IS_E1(bp) ? \  					ETH_MIN_RX_CQES_WITH_TPA_E1 : \ @@ -579,7 +632,9 @@ struct bnx2x_fastpath {  								MIN_RX_AVAIL))  #define NEXT_RX_IDX(x)		((((x) & RX_DESC_MASK) == \ -				  (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) +				  (MAX_RX_DESC_CNT - 1)) ? \ +					(x) + 1 + NEXT_PAGE_RX_DESC_CNT : \ +					(x) + 1)  #define RX_BD(x)		((x) & MAX_RX_BD)  /* @@ -589,14 +644,31 @@ struct bnx2x_fastpath {  #define CQE_BD_REL	(sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))  #define NUM_RCQ_RINGS		(NUM_RX_RINGS * CQE_BD_REL)  #define RCQ_DESC_CNT		(BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) -#define MAX_RCQ_DESC_CNT	(RCQ_DESC_CNT - 1) +#define NEXT_PAGE_RCQ_DESC_CNT	1 +#define MAX_RCQ_DESC_CNT	(RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT)  #define NUM_RCQ_BD		(RCQ_DESC_CNT * NUM_RCQ_RINGS)  #define MAX_RCQ_BD		(NUM_RCQ_BD - 1)  #define MAX_RCQ_AVAIL		(MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)  #define NEXT_RCQ_IDX(x)		((((x) & MAX_RCQ_DESC_CNT) == \ -				  (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) +				  (MAX_RCQ_DESC_CNT - 1)) ? \ +					(x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \ +					(x) + 1)  #define RCQ_BD(x)		((x) & MAX_RCQ_BD) +/* dropless fc calculations for RCQs + * + * Number of RCQs should be as number of buffers in BRB: + * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT + * "next" elements on each page + */ +#define NUM_RCQ_REQ		BRB_SIZE(bp) +#define NUM_RCQ_PG_REQ		((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \ +					      MAX_RCQ_DESC_CNT) +#define RCQ_TH_LO(bp)		(NUM_RCQ_REQ + \ +				 NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \ +				 FW_DROP_LEVEL(bp)) +#define RCQ_TH_HI(bp)		(RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM) +  /* This is needed for determining of last_max */  #define SUB_S16(a, b)		(s16)((s16)(a) - (s16)(b)) @@ -685,24 +757,17 @@ struct bnx2x_fastpath {  #define FP_CSB_FUNC_OFF	\  			offsetof(struct cstorm_status_block_c, func) -#define HC_INDEX_TOE_RX_CQ_CONS		0 /* Formerly Ustorm TOE CQ index */ -					  /* (HC_INDEX_U_TOE_RX_CQ_CONS)  */ -#define HC_INDEX_ETH_RX_CQ_CONS		1 /* Formerly Ustorm ETH CQ index */ -					  /* (HC_INDEX_U_ETH_RX_CQ_CONS)  */ -#define HC_INDEX_ETH_RX_BD_CONS		2 /* Formerly Ustorm ETH BD index */ -					  /* (HC_INDEX_U_ETH_RX_BD_CONS)  */ +#define HC_INDEX_ETH_RX_CQ_CONS		1 -#define HC_INDEX_TOE_TX_CQ_CONS		4 /* Formerly Cstorm TOE CQ index   */ -					  /* (HC_INDEX_C_TOE_TX_CQ_CONS)    */ -#define HC_INDEX_ETH_TX_CQ_CONS_COS0	5 /* Formerly Cstorm ETH CQ index   */ -					  /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */ -#define HC_INDEX_ETH_TX_CQ_CONS_COS1	6 /* Formerly Cstorm ETH CQ index   */ -					  /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */ -#define HC_INDEX_ETH_TX_CQ_CONS_COS2	7 /* Formerly Cstorm ETH CQ index   */ -					  /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */ +#define HC_INDEX_OOO_TX_CQ_CONS		4 -#define HC_INDEX_ETH_FIRST_TX_CQ_CONS	HC_INDEX_ETH_TX_CQ_CONS_COS0 +#define HC_INDEX_ETH_TX_CQ_CONS_COS0	5 + +#define HC_INDEX_ETH_TX_CQ_CONS_COS1	6 +#define HC_INDEX_ETH_TX_CQ_CONS_COS2	7 + +#define HC_INDEX_ETH_FIRST_TX_CQ_CONS	HC_INDEX_ETH_TX_CQ_CONS_COS0  #define BNX2X_RX_SB_INDEX \  	(&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) @@ -1100,11 +1165,12 @@ struct bnx2x {  #define BP_PORT(bp)			(bp->pfid & 1)  #define BP_FUNC(bp)			(bp->pfid)  #define BP_ABS_FUNC(bp)			(bp->pf_num) -#define BP_E1HVN(bp)			(bp->pfid >> 1) -#define BP_VN(bp)			(BP_E1HVN(bp)) /*remove when approved*/ -#define BP_L_ID(bp)			(BP_E1HVN(bp) << 2) -#define BP_FW_MB_IDX(bp)		(BP_PORT(bp) +\ -	  BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2  : 1)) +#define BP_VN(bp)			((bp)->pfid >> 1) +#define BP_MAX_VN_NUM(bp)		(CHIP_MODE_IS_4_PORT(bp) ? 2 : 4) +#define BP_L_ID(bp)			(BP_VN(bp) << 2) +#define BP_FW_MB_IDX_VN(bp, vn)		(BP_PORT(bp) +\ +	  (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2  : 1)) +#define BP_FW_MB_IDX(bp)		BP_FW_MB_IDX_VN(bp, BP_VN(bp))  	struct net_device	*dev;  	struct pci_dev		*pdev; @@ -1767,7 +1833,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,  #define MAX_DMAE_C_PER_PORT		8  #define INIT_DMAE_C(bp)			(BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ -					 BP_E1HVN(bp)) +					 BP_VN(bp))  #define PMF_DMAE_C(bp)			(BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \  					 E1HVN_MAX) @@ -1793,7 +1859,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,  /* must be used on a CID before placing it on a HW ring */  #define HW_CID(bp, x)			((BP_PORT(bp) << 23) | \ -					 (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \ +					 (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \  					 (x))  #define SP_DESC_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_spe)) diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index 37e5790681a..c4cbf973641 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c @@ -987,8 +987,6 @@ void __bnx2x_link_report(struct bnx2x *bp)  void bnx2x_init_rx_rings(struct bnx2x *bp)  {  	int func = BP_FUNC(bp); -	int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : -					      ETH_MAX_AGGREGATION_QUEUES_E1H_E2;  	u16 ring_prod;  	int i, j; @@ -1001,7 +999,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)  		if (!fp->disable_tpa) {  			/* Fill the per-aggregtion pool */ -			for (i = 0; i < max_agg_queues; i++) { +			for (i = 0; i < MAX_AGG_QS(bp); i++) {  				struct bnx2x_agg_info *tpa_info =  					&fp->tpa_info[i];  				struct sw_rx_bd *first_buf = @@ -1041,7 +1039,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)  					bnx2x_free_rx_sge_range(bp, fp,  								ring_prod);  					bnx2x_free_tpa_pool(bp, fp, -							    max_agg_queues); +							    MAX_AGG_QS(bp));  					fp->disable_tpa = 1;  					ring_prod = 0;  					break; @@ -1137,9 +1135,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)  		bnx2x_free_rx_bds(fp);  		if (!fp->disable_tpa) -			bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? -					    ETH_MAX_AGGREGATION_QUEUES_E1 : -					    ETH_MAX_AGGREGATION_QUEUES_E1H_E2); +			bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));  	}  } @@ -3095,15 +3091,20 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)  	struct bnx2x_fastpath *fp = &bp->fp[index];  	int ring_size = 0;  	u8 cos; +	int rx_ring_size = 0;  	/* if rx_ring_size specified - use it */ -	int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size : -			   MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); +	if (!bp->rx_ring_size) { -	/* allocate at least number of buffers required by FW */ -	rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : -						    MIN_RX_SIZE_TPA, -				  rx_ring_size); +		rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); + +		/* allocate at least number of buffers required by FW */ +		rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : +				     MIN_RX_SIZE_TPA, rx_ring_size); + +		bp->rx_ring_size = rx_ring_size; +	} else +		rx_ring_size = bp->rx_ring_size;  	/* Common */  	sb = &bnx2x_fp(bp, index, status_blk); diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h index 223bfeebc59..2dc1199239d 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.h +++ b/drivers/net/bnx2x/bnx2x_cmn.h @@ -1297,7 +1297,7 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp,  static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)  {  	return bp->cnic_base_cl_id + cl_idx + -		(bp->pf_num >> 1) * NON_ETH_CONTEXT_USE; +		(bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX;  }  static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c index a1e004a82f7..0b4acf67e0c 100644 --- a/drivers/net/bnx2x/bnx2x_dcb.c +++ b/drivers/net/bnx2x/bnx2x_dcb.c @@ -2120,6 +2120,7 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)  			break;  		case DCB_CAP_ATTR_DCBX:  			*cap = BNX2X_DCBX_CAPS; +			break;  		default:  			rval = -EINVAL;  			break; diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c index 221863059da..cf3e47914dd 100644 --- a/drivers/net/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/bnx2x/bnx2x_ethtool.c @@ -363,13 +363,50 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)  		}  		/* advertise the requested speed and duplex if supported */ -		cmd->advertising &= bp->port.supported[cfg_idx]; +		if (cmd->advertising & ~(bp->port.supported[cfg_idx])) { +			DP(NETIF_MSG_LINK, "Advertisement parameters " +					   "are not supported\n"); +			return -EINVAL; +		}  		bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; -		bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL; -		bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg | +		bp->link_params.req_duplex[cfg_idx] = cmd->duplex; +		bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |  					 cmd->advertising); +		if (cmd->advertising) { + +			bp->link_params.speed_cap_mask[cfg_idx] = 0; +			if (cmd->advertising & ADVERTISED_10baseT_Half) { +				bp->link_params.speed_cap_mask[cfg_idx] |= +				PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF; +			} +			if (cmd->advertising & ADVERTISED_10baseT_Full) +				bp->link_params.speed_cap_mask[cfg_idx] |= +				PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL; + +			if (cmd->advertising & ADVERTISED_100baseT_Full) +				bp->link_params.speed_cap_mask[cfg_idx] |= +				PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL; +			if (cmd->advertising & ADVERTISED_100baseT_Half) { +				bp->link_params.speed_cap_mask[cfg_idx] |= +				     PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF; +			} +			if (cmd->advertising & ADVERTISED_1000baseT_Half) { +				bp->link_params.speed_cap_mask[cfg_idx] |= +					PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; +			} +			if (cmd->advertising & (ADVERTISED_1000baseT_Full | +						ADVERTISED_1000baseKX_Full)) +				bp->link_params.speed_cap_mask[cfg_idx] |= +					PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; + +			if (cmd->advertising & (ADVERTISED_10000baseT_Full | +						ADVERTISED_10000baseKX4_Full | +						ADVERTISED_10000baseKR_Full)) +				bp->link_params.speed_cap_mask[cfg_idx] |= +					PORT_HW_CFG_SPEED_CAPABILITY_D0_10G; +		}  	} else { /* forced speed */  		/* advertise the requested speed and duplex if supported */  		switch (speed) { @@ -1310,10 +1347,7 @@ static void bnx2x_get_ringparam(struct net_device *dev,  	if (bp->rx_ring_size)  		ering->rx_pending = bp->rx_ring_size;  	else -		if (bp->state == BNX2X_STATE_OPEN && bp->num_queues) -			ering->rx_pending = MAX_RX_AVAIL/bp->num_queues; -		else -			ering->rx_pending = MAX_RX_AVAIL; +		ering->rx_pending = MAX_RX_AVAIL;  	ering->rx_mini_pending = 0;  	ering->rx_jumbo_pending = 0; diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c index d45b1555a60..ba15bdc5a1a 100644 --- a/drivers/net/bnx2x/bnx2x_link.c +++ b/drivers/net/bnx2x/bnx2x_link.c @@ -778,9 +778,9 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,  {  	u32 nig_reg_adress_crd_weight = 0;  	u32 pbf_reg_adress_crd_weight = 0; -	/* Calculate and set BW for this COS*/ -	const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw; -	const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw; +	/* Calculate and set BW for this COS - use 1 instead of 0 for BW */ +	const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw; +	const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw;  	switch (cos_entry) {  	case 0: @@ -852,18 +852,12 @@ static int bnx2x_ets_e3b0_get_total_bw(  	/* Calculate total BW requested */  	for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {  		if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) { - -			if (0 == ets_params->cos[cos_idx].params.bw_params.bw) { -				DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW" -						   "was set to 0\n"); -			return -EINVAL; +			*total_bw += +				ets_params->cos[cos_idx].params.bw_params.bw;  		} -		*total_bw += -		    ets_params->cos[cos_idx].params.bw_params.bw; -	    }  	} -	/*Check taotl BW is valid */ +	/* Check total BW is valid */  	if ((100 != *total_bw) || (0 == *total_bw)) {  		if (0 == *total_bw) {  			DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW" @@ -1726,7 +1720,7 @@ static int bnx2x_xmac_enable(struct link_params *params,  	/* Check loopback mode */  	if (lb) -		val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK; +		val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;  	REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);  	bnx2x_set_xumac_nig(params,  			    ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); @@ -3630,6 +3624,12 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,  	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,  			 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); +	/* Advertised and set FEC (Forward Error Correction) */ +	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, +			 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2, +			 (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY | +			  MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ)); +  	/* Enable CL37 BAM */  	if (REG_RD(bp, params->shmem_base +  		   offsetof(struct shmem_region, dev_info. @@ -5924,7 +5924,7 @@ int bnx2x_set_led(struct link_params *params,  					(tmp | EMAC_LED_OVERRIDE));  				/*  				 * return here without enabling traffic -				 * LED blink andsetting rate in ON mode. +				 * LED blink and setting rate in ON mode.  				 * In oper mode, enabling LED blink  				 * and setting rate is needed.  				 */ @@ -5936,7 +5936,11 @@ int bnx2x_set_led(struct link_params *params,  			 * This is a work-around for HW issue found when link  			 * is up in CL73  			 */ -			REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); +			if ((!CHIP_IS_E3(bp)) || +			    (CHIP_IS_E3(bp) && +			     mode == LED_MODE_ON)) +				REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); +  			if (CHIP_IS_E1x(bp) ||  			    CHIP_IS_E2(bp) ||  			    (mode == LED_MODE_ON)) @@ -10638,8 +10642,7 @@ static struct bnx2x_phy phy_warpcore = {  	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,  	.addr		= 0xff,  	.def_md_devad	= 0, -	.flags		= (FLAGS_HW_LOCK_REQUIRED | -			   FLAGS_TX_ERROR_CHECK), +	.flags		= FLAGS_HW_LOCK_REQUIRED,  	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},  	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},  	.mdio_ctrl	= 0, @@ -10765,8 +10768,7 @@ static struct bnx2x_phy phy_8706 = {  	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,  	.addr		= 0xff,  	.def_md_devad	= 0, -	.flags		= (FLAGS_INIT_XGXS_FIRST | -			   FLAGS_TX_ERROR_CHECK), +	.flags		= FLAGS_INIT_XGXS_FIRST,  	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},  	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},  	.mdio_ctrl	= 0, @@ -10797,8 +10799,7 @@ static struct bnx2x_phy phy_8726 = {  	.addr		= 0xff,  	.def_md_devad	= 0,  	.flags		= (FLAGS_HW_LOCK_REQUIRED | -			   FLAGS_INIT_XGXS_FIRST | -			   FLAGS_TX_ERROR_CHECK), +			   FLAGS_INIT_XGXS_FIRST),  	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},  	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},  	.mdio_ctrl	= 0, @@ -10829,8 +10830,7 @@ static struct bnx2x_phy phy_8727 = {  	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,  	.addr		= 0xff,  	.def_md_devad	= 0, -	.flags		= (FLAGS_FAN_FAILURE_DET_REQ | -			   FLAGS_TX_ERROR_CHECK), +	.flags		= FLAGS_FAN_FAILURE_DET_REQ,  	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},  	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},  	.mdio_ctrl	= 0, diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index f74582a22c6..15f800085bb 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c @@ -407,8 +407,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,  	opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);  	opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); -	opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) | -		   (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); +	opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) | +		   (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));  	opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);  #ifdef __BIG_ENDIAN @@ -1419,7 +1419,7 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)  	if (!CHIP_IS_E1(bp)) {  		/* init leading/trailing edge */  		if (IS_MF(bp)) { -			val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); +			val = (0xee0f | (1 << (BP_VN(bp) + 4)));  			if (bp->port.pmf)  				/* enable nig and gpio3 attention */  				val |= 0x1100; @@ -1471,7 +1471,7 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)  	/* init leading/trailing edge */  	if (IS_MF(bp)) { -		val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); +		val = (0xee0f | (1 << (BP_VN(bp) + 4)));  		if (bp->port.pmf)  			/* enable nig and gpio3 attention */  			val |= 0x1100; @@ -2287,7 +2287,7 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)  	int vn;  	bp->vn_weight_sum = 0; -	for (vn = VN_0; vn < E1HVN_MAX; vn++) { +	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {  		u32 vn_cfg = bp->mf_config[vn];  		u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>  				   FUNC_MF_CFG_MIN_BW_SHIFT) * 100; @@ -2320,12 +2320,18 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)  					CMNG_FLAGS_PER_PORT_FAIRNESS_VN;  } +/* returns func by VN for current port */ +static inline int func_by_vn(struct bnx2x *bp, int vn) +{ +	return 2 * vn + BP_PORT(bp); +} +  static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)  {  	struct rate_shaping_vars_per_vn m_rs_vn;  	struct fairness_vars_per_vn m_fair_vn;  	u32 vn_cfg = bp->mf_config[vn]; -	int func = 2*vn + BP_PORT(bp); +	int func = func_by_vn(bp, vn);  	u16 vn_min_rate, vn_max_rate;  	int i; @@ -2422,7 +2428,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp)  	 *  	 *      and there are 2 functions per port  	 */ -	for (vn = VN_0; vn < E1HVN_MAX; vn++) { +	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {  		int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);  		if (func >= E1H_FUNC_MAX) @@ -2454,7 +2460,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)  		/* calculate and set min-max rate for each vn */  		if (bp->port.pmf) -			for (vn = VN_0; vn < E1HVN_MAX; vn++) +			for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)  				bnx2x_init_vn_minmax(bp, vn);  		/* always enable rate shaping and fairness */ @@ -2473,16 +2479,15 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)  static inline void bnx2x_link_sync_notify(struct bnx2x *bp)  { -	int port = BP_PORT(bp);  	int func;  	int vn;  	/* Set the attention towards other drivers on the same port */ -	for (vn = VN_0; vn < E1HVN_MAX; vn++) { -		if (vn == BP_E1HVN(bp)) +	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { +		if (vn == BP_VN(bp))  			continue; -		func = ((vn << 1) | port); +		func = func_by_vn(bp, vn);  		REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +  		       (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);  	} @@ -2577,7 +2582,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp)  	bnx2x_dcbx_pmf_update(bp);  	/* enable nig attention */ -	val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); +	val = (0xff0f | (1 << (BP_VN(bp) + 4)));  	if (bp->common.int_block == INT_BLOCK_HC) {  		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);  		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); @@ -2756,8 +2761,14 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,  	u16 tpa_agg_size = 0;  	if (!fp->disable_tpa) { -		pause->sge_th_hi = 250; -		pause->sge_th_lo = 150; +		pause->sge_th_lo = SGE_TH_LO(bp); +		pause->sge_th_hi = SGE_TH_HI(bp); + +		/* validate SGE ring has enough to cross high threshold */ +		WARN_ON(bp->dropless_fc && +				pause->sge_th_hi + FW_PREFETCH_CNT > +				MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES); +  		tpa_agg_size = min_t(u32,  			(min_t(u32, 8, MAX_SKB_FRAGS) *  			SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); @@ -2771,10 +2782,21 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,  	/* pause - not for e1 */  	if (!CHIP_IS_E1(bp)) { -		pause->bd_th_hi = 350; -		pause->bd_th_lo = 250; -		pause->rcq_th_hi = 350; -		pause->rcq_th_lo = 250; +		pause->bd_th_lo = BD_TH_LO(bp); +		pause->bd_th_hi = BD_TH_HI(bp); + +		pause->rcq_th_lo = RCQ_TH_LO(bp); +		pause->rcq_th_hi = RCQ_TH_HI(bp); +		/* +		 * validate that rings have enough entries to cross +		 * high thresholds +		 */ +		WARN_ON(bp->dropless_fc && +				pause->bd_th_hi + FW_PREFETCH_CNT > +				bp->rx_ring_size); +		WARN_ON(bp->dropless_fc && +				pause->rcq_th_hi + FW_PREFETCH_CNT > +				NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);  		pause->pri_map = 1;  	} @@ -2802,9 +2824,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,  	 * For PF Clients it should be the maximum avaliable number.  	 * VF driver(s) may want to define it to a smaller value.  	 */ -	rxq_init->max_tpa_queues = -		(CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : -		ETH_MAX_AGGREGATION_QUEUES_E1H_E2); +	rxq_init->max_tpa_queues = MAX_AGG_QS(bp);  	rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;  	rxq_init->fw_sb_id = fp->fw_sb_id; @@ -4808,6 +4828,37 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,  	hc_sm->time_to_expire = 0xFFFFFFFF;  } + +/* allocates state machine ids. */ +static inline +void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) +{ +	/* zero out state machine indices */ +	/* rx indices */ +	index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; + +	/* tx indices */ +	index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; +	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; +	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; +	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; + +	/* map indices */ +	/* rx indices */ +	index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= +		SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT; + +	/* tx indices */ +	index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= +		SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; +	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= +		SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; +	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= +		SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; +	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= +		SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; +} +  static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,  			  u8 vf_valid, int fw_sb_id, int igu_sb_id)  { @@ -4839,6 +4890,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,  		hc_sm_p = sb_data_e2.common.state_machine;  		sb_data_p = (u32 *)&sb_data_e2;  		data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); +		bnx2x_map_sb_state_machines(sb_data_e2.index_data);  	} else {  		memset(&sb_data_e1x, 0,  		       sizeof(struct hc_status_block_data_e1x)); @@ -4853,6 +4905,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,  		hc_sm_p = sb_data_e1x.common.state_machine;  		sb_data_p = (u32 *)&sb_data_e1x;  		data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); +		bnx2x_map_sb_state_machines(sb_data_e1x.index_data);  	}  	bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], @@ -4890,7 +4943,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)  	int igu_seg_id;  	int port = BP_PORT(bp);  	int func = BP_FUNC(bp); -	int reg_offset; +	int reg_offset, reg_offset_en5;  	u64 section;  	int index;  	struct hc_sp_status_block_data sp_sb_data; @@ -4913,6 +4966,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)  	reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :  			     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); +	reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : +				 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);  	for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {  		int sindex;  		/* take care of sig[0]..sig[4] */ @@ -4927,7 +4982,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)  			 * and not 16 between the different groups  			 */  			bp->attn_group[index].sig[4] = REG_RD(bp, -					reg_offset + 0x10 + 0x4*index); +					reg_offset_en5 + 0x4*index);  		else  			bp->attn_group[index].sig[4] = 0;  	} @@ -5802,7 +5857,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)  	 * take the UNDI lock to protect undi_unload flow from accessing  	 * registers while we're resetting the chip  	 */ -	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); +	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);  	bnx2x_reset_common(bp);  	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); @@ -5814,7 +5869,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)  	}  	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); -	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); +	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);  	bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); @@ -6671,12 +6726,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)  			if (CHIP_MODE_IS_4_PORT(bp))  				dsb_idx = BP_FUNC(bp);  			else -				dsb_idx = BP_E1HVN(bp); +				dsb_idx = BP_VN(bp);  			prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?  				       IGU_BC_BASE_DSB_PROD + dsb_idx :  				       IGU_NORM_BASE_DSB_PROD + dsb_idx); +			/* +			 * igu prods come in chunks of E1HVN_MAX (4) - +			 * does not matters what is the current chip mode +			 */  			for (i = 0; i < (num_segs * E1HVN_MAX);  			     i += E1HVN_MAX) {  				addr = IGU_REG_PROD_CONS_MEMORY + @@ -7568,9 +7627,12 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)  		u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;  		u8 *mac_addr = bp->dev->dev_addr;  		u32 val; +		u16 pmc; +  		/* The mac address is written to entries 1-4 to -		   preserve entry 0 which is used by the PMF */ -		u8 entry = (BP_E1HVN(bp) + 1)*8; +		 * preserve entry 0 which is used by the PMF +		 */ +		u8 entry = (BP_VN(bp) + 1)*8;  		val = (mac_addr[0] << 8) | mac_addr[1];  		EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); @@ -7579,6 +7641,11 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)  		      (mac_addr[4] << 8) | mac_addr[5];  		EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); +		/* Enable the PME and clear the status */ +		pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc); +		pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; +		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc); +  		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;  	} else @@ -8546,10 +8613,12 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)  	/* Check if there is any driver already loaded */  	val = REG_RD(bp, MISC_REG_UNPREPARED);  	if (val == 0x1) { -		/* Check if it is the UNDI driver + +		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); +		/* +		 * Check if it is the UNDI driver  		 * UNDI driver initializes CID offset for normal bell to 0x7  		 */ -		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);  		val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);  		if (val == 0x7) {  			u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; @@ -8587,9 +8656,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)  				bnx2x_fw_command(bp, reset_code, 0);  			} -			/* now it's safe to release the lock */ -			bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); -  			bnx2x_undi_int_disable(bp);  			port = BP_PORT(bp); @@ -8639,8 +8705,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)  			bp->fw_seq =  			      (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &  				DRV_MSG_SEQ_NUMBER_MASK); -		} else -			bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); +		} + +		/* now it's safe to release the lock */ +		bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);  	}  } @@ -8777,13 +8845,13 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)  static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)  {  	int pfid = BP_FUNC(bp); -	int vn = BP_E1HVN(bp);  	int igu_sb_id;  	u32 val;  	u8 fid, igu_sb_cnt = 0;  	bp->igu_base_sb = 0xff;  	if (CHIP_INT_MODE_IS_BC(bp)) { +		int vn = BP_VN(bp);  		igu_sb_cnt = bp->igu_sb_cnt;  		bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *  			FP_SB_MAX_E1x; @@ -9416,6 +9484,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)  		bp->igu_base_sb = 0;  	} else {  		bp->common.int_block = INT_BLOCK_IGU; + +		/* do not allow device reset during IGU info preocessing */ +		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); +  		val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);  		if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { @@ -9447,6 +9519,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)  		bnx2x_get_igu_cam_info(bp); +		bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);  	}  	/* @@ -9473,7 +9546,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)  	bp->mf_ov = 0;  	bp->mf_mode = 0; -	vn = BP_E1HVN(bp); +	vn = BP_VN(bp);  	if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {  		BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", @@ -9593,13 +9666,6 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)  	/* port info */  	bnx2x_get_port_hwinfo(bp); -	if (!BP_NOMCP(bp)) { -		bp->fw_seq = -			(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & -			 DRV_MSG_SEQ_NUMBER_MASK); -		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); -	} -  	/* Get MAC addresses */  	bnx2x_get_mac_hwinfo(bp); @@ -9765,6 +9831,14 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)  	if (!BP_NOMCP(bp))  		bnx2x_undi_unload(bp); +	/* init fw_seq after undi_unload! */ +	if (!BP_NOMCP(bp)) { +		bp->fw_seq = +			(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & +			 DRV_MSG_SEQ_NUMBER_MASK); +		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); +	} +  	if (CHIP_REV_IS_FPGA(bp))  		dev_err(&bp->pdev->dev, "FPGA detected\n"); @@ -10259,17 +10333,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,  	/* clean indirect addresses */  	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,  			       PCICFG_VENDOR_ID_OFFSET); -	/* Clean the following indirect addresses for all functions since it +	/* +	 * Clean the following indirect addresses for all functions since it  	 * is not used by the driver.  	 */  	REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);  	REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);  	REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);  	REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); -	REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); -	REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); -	REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); -	REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); + +	if (CHIP_IS_E1x(bp)) { +		REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); +		REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); +		REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); +		REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); +	}  	/*  	 * Enable internal target-read (in case we are probed after PF FLR). diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h index 40266c14e6d..fc7bd0f23c0 100644 --- a/drivers/net/bnx2x/bnx2x_reg.h +++ b/drivers/net/bnx2x/bnx2x_reg.h @@ -1384,6 +1384,18 @@     Latched ump_tx_parity; [31] MCP Latched scpad_parity; */  #define MISC_REG_AEU_ENABLE4_PXP_0				 0xa108  #define MISC_REG_AEU_ENABLE4_PXP_1				 0xa1a8 +/* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped + * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC + * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] + * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 + * parity; [31-10] Reserved; */ +#define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0			 0xa688 +/* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped + * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC + * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] + * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 + * parity; [31-10] Reserved; */ +#define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0			 0xa6b0  /* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu     128 bit vector */  #define MISC_REG_AEU_GENERAL_ATTN_0				 0xa000 @@ -5320,7 +5332,7 @@  #define XCM_REG_XX_OVFL_EVNT_ID 				 0x20058  #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS	 (0x1<<0)  #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS	 (0x1<<1) -#define XMAC_CTRL_REG_CORE_LOCAL_LPBK				 (0x1<<3) +#define XMAC_CTRL_REG_LINE_LOCAL_LPBK				 (0x1<<2)  #define XMAC_CTRL_REG_RX_EN					 (0x1<<1)  #define XMAC_CTRL_REG_SOFT_RESET				 (0x1<<6)  #define XMAC_CTRL_REG_TX_EN					 (0x1<<0) @@ -5766,7 +5778,7 @@  #define HW_LOCK_RESOURCE_RECOVERY_LEADER_0			 8  #define HW_LOCK_RESOURCE_RECOVERY_LEADER_1			 9  #define HW_LOCK_RESOURCE_SPIO					 2 -#define HW_LOCK_RESOURCE_UNDI					 5 +#define HW_LOCK_RESOURCE_RESET					 5  #define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT			 (0x1<<4)  #define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR			 (0x1<<5)  #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR			 (0x1<<18) @@ -6853,6 +6865,9 @@ Theotherbitsarereservedandshouldbezero*/  #define MDIO_WC_REG_IEEE0BLK_AUTONEGNP			0x7  #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0	0x10  #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1	0x11 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2	0x12 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY	0x4000 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ		0x8000  #define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150  0x96  #define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL		0x8000  #define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1		0x800e diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c index 771f6803b23..9908f2bbcf7 100644 --- a/drivers/net/bnx2x/bnx2x_stats.c +++ b/drivers/net/bnx2x/bnx2x_stats.c @@ -710,7 +710,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)  		break;  	case MAC_TYPE_NONE: /* unreached */ -		BNX2X_ERR("stats updated by DMAE but no MAC active\n"); +		DP(BNX2X_MSG_STATS, +		   "stats updated by DMAE but no MAC active\n");  		return -1;  	default: /* unreached */ @@ -1391,7 +1392,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)  static void bnx2x_func_stats_base_init(struct bnx2x *bp)  { -	int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX; +	int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX;  	u32 func_stx;  	/* sanity */ @@ -1404,7 +1405,7 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp)  	func_stx = bp->func_stx;  	for (vn = VN_0; vn < vn_max; vn++) { -		int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn; +		int mb_idx = BP_FW_MB_IDX_VN(bp, vn);  		bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);  		bnx2x_func_stats_init(bp); diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index a047eb973e3..47b928ed08f 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -2168,7 +2168,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)  	}  re_arm: -	queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); +	if (!bond->kill_timers) +		queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);  out:  	read_unlock(&bond->lock);  } diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 7f8b20a34ee..d4fbd2e6261 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -1440,7 +1440,8 @@ void bond_alb_monitor(struct work_struct *work)  	}  re_arm: -	queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); +	if (!bond->kill_timers) +		queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);  out:  	read_unlock(&bond->lock);  } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 38a83acd502..de3d351ccb6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -777,6 +777,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)  	read_lock(&bond->lock); +	if (bond->kill_timers) +		goto out; +  	/* rejoin all groups on bond device */  	__bond_resend_igmp_join_requests(bond->dev); @@ -790,9 +793,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)  			__bond_resend_igmp_join_requests(vlan_dev);  	} -	if (--bond->igmp_retrans > 0) +	if ((--bond->igmp_retrans > 0) && !bond->kill_timers)  		queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); - +out:  	read_unlock(&bond->lock);  } @@ -1432,6 +1435,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)  	struct sk_buff *skb = *pskb;  	struct slave *slave;  	struct bonding *bond; +	void (*recv_probe)(struct sk_buff *, struct bonding *, +				struct slave *);  	skb = skb_share_check(skb, GFP_ATOMIC);  	if (unlikely(!skb)) @@ -1445,11 +1450,12 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)  	if (bond->params.arp_interval)  		slave->dev->last_rx = jiffies; -	if (bond->recv_probe) { +	recv_probe = ACCESS_ONCE(bond->recv_probe); +	if (recv_probe) {  		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);  		if (likely(nskb)) { -			bond->recv_probe(nskb, bond, slave); +			recv_probe(nskb, bond, slave);  			dev_kfree_skb(nskb);  		}  	} @@ -2538,7 +2544,7 @@ void bond_mii_monitor(struct work_struct *work)  	}  re_arm: -	if (bond->params.miimon) +	if (bond->params.miimon && !bond->kill_timers)  		queue_delayed_work(bond->wq, &bond->mii_work,  				   msecs_to_jiffies(bond->params.miimon));  out: @@ -2886,7 +2892,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)  	}  re_arm: -	if (bond->params.arp_interval) +	if (bond->params.arp_interval && !bond->kill_timers)  		queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);  out:  	read_unlock(&bond->lock); @@ -3154,7 +3160,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)  	bond_ab_arp_probe(bond);  re_arm: -	if (bond->params.arp_interval) +	if (bond->params.arp_interval && !bond->kill_timers)  		queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);  out:  	read_unlock(&bond->lock); @@ -3419,9 +3425,27 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)  static int bond_open(struct net_device *bond_dev)  {  	struct bonding *bond = netdev_priv(bond_dev); +	struct slave *slave; +	int i;  	bond->kill_timers = 0; +	/* reset slave->backup and slave->inactive */ +	read_lock(&bond->lock); +	if (bond->slave_cnt > 0) { +		read_lock(&bond->curr_slave_lock); +		bond_for_each_slave(bond, slave, i) { +			if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) +				&& (slave != bond->curr_active_slave)) { +				bond_set_slave_inactive_flags(slave); +			} else { +				bond_set_slave_active_flags(slave); +			} +		} +		read_unlock(&bond->curr_slave_lock); +	} +	read_unlock(&bond->lock); +  	INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed);  	if (bond_is_lb(bond)) { diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c index 92feac68b66..4cc6f44c2ba 100644 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c @@ -261,11 +261,13 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)  		void __iomem *data = ®s->tx.dsr1_0;  		u16 *payload = (u16 *)frame->data; -		/* It is safe to write into dsr[dlc+1] */ -		for (i = 0; i < (frame->can_dlc + 1) / 2; i++) { +		for (i = 0; i < frame->can_dlc / 2; i++) {  			out_be16(data, *payload++);  			data += 2 + _MSCAN_RESERVED_DSR_SIZE;  		} +		/* write remaining byte if necessary */ +		if (frame->can_dlc & 1) +			out_8(data, frame->data[frame->can_dlc - 1]);  	}  	out_8(®s->tx.dlr, frame->can_dlc); @@ -330,10 +332,13 @@ static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)  		void __iomem *data = ®s->rx.dsr1_0;  		u16 *payload = (u16 *)frame->data; -		for (i = 0; i < (frame->can_dlc + 1) / 2; i++) { +		for (i = 0; i < frame->can_dlc / 2; i++) {  			*payload++ = in_be16(data);  			data += 2 + _MSCAN_RESERVED_DSR_SIZE;  		} +		/* read remaining byte if necessary */ +		if (frame->can_dlc & 1) +			frame->data[frame->can_dlc - 1] = in_8(data);  	}  	out_8(®s->canrflg, MSCAN_RXF); diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index 231385b8e08..c7f3d4ea116 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c @@ -408,7 +408,7 @@ static void plx_pci_del_card(struct pci_dev *pdev)  	struct sja1000_priv *priv;  	int i = 0; -	for (i = 0; i < card->channels; i++) { +	for (i = 0; i < PLX_PCI_MAX_CHAN; i++) {  		dev = card->net_dev[i];  		if (!dev)  			continue; @@ -536,7 +536,6 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,  			if (err) {  				dev_err(&pdev->dev, "Registering device failed "  					"(err=%d)\n", err); -				free_sja1000dev(dev);  				goto failure_cleanup;  			} @@ -549,6 +548,7 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,  			dev_err(&pdev->dev, "Channel #%d not detected\n",  				i + 1);  			free_sja1000dev(dev); +			card->net_dev[i] = NULL;  		}  	} diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c index cee6ba2b8b5..c3dd9d09be5 100644 --- a/drivers/net/can/sja1000/sja1000_of_platform.c +++ b/drivers/net/can/sja1000/sja1000_of_platform.c @@ -29,7 +29,7 @@   *           nxp,external-clock-frequency = <16000000>;   *   };   * - * See "Documentation/powerpc/dts-bindings/can/sja1000.txt" for further + * See "Documentation/devicetree/bindings/net/can/sja1000.txt" for further   * information.   */ diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index f7bbde9eb2c..2adc294f512 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -46,6 +46,7 @@  #include <linux/skbuff.h>  #include <linux/platform_device.h>  #include <linux/clk.h> +#include <linux/io.h>  #include <linux/can/dev.h>  #include <linux/can/error.h> @@ -503,9 +504,9 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)  	spin_unlock_irqrestore(&priv->mbx_lock, flags);  	/* Prepare mailbox for transmission */ +	data = cf->can_dlc | (get_tx_head_prio(priv) << 8);  	if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */  		data |= HECC_CANMCF_RTR; -	data |= get_tx_head_prio(priv) << 8;  	hecc_write_mbx(priv, mbxno, HECC_CANMCF, data);  	if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */ @@ -923,6 +924,7 @@ static int ti_hecc_probe(struct platform_device *pdev)  	priv->can.do_get_state = ti_hecc_get_state;  	priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; +	spin_lock_init(&priv->mbx_lock);  	ndev->irq = irq->start;  	ndev->flags |= IFF_ECHO;  	platform_set_drvdata(pdev, ndev); diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 646c86bcc54..fdb7a175640 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c @@ -2452,14 +2452,13 @@ static irqreturn_t cas_interruptN(int irq, void *dev_id)  	struct net_device *dev = dev_id;  	struct cas *cp = netdev_priv(dev);  	unsigned long flags; -	int ring; +	int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;  	u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));  	/* check for shared irq */  	if (status == 0)  		return IRQ_NONE; -	ring = (irq == cp->pci_irq_INTC) ? 2 : 3;  	spin_lock_irqsave(&cp->lock, flags);  	if (status & INTR_RX_DONE_ALT) { /* handle rx separately */  #ifdef USE_NAPI diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index 805076c54f1..da5a5d9b8af 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c @@ -1146,12 +1146,14 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)  		if (te && te->ctx && te->client && te->client->redirect) {  			update_tcb = te->client->redirect(te->ctx, old, new, e);  			if (update_tcb) { +				rcu_read_lock();  				l2t_hold(L2DATA(tdev), e); +				rcu_read_unlock();  				set_l2t_ix(tdev, tid, e);  			}  		}  	} -	l2t_release(L2DATA(tdev), e); +	l2t_release(tdev, e);  }  /* @@ -1264,7 +1266,7 @@ int cxgb3_offload_activate(struct adapter *adapter)  		goto out_free;  	err = -ENOMEM; -	L2DATA(dev) = t3_init_l2t(l2t_capacity); +	RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity));  	if (!L2DATA(dev))  		goto out_free; @@ -1298,16 +1300,24 @@ int cxgb3_offload_activate(struct adapter *adapter)  out_free_l2t:  	t3_free_l2t(L2DATA(dev)); -	L2DATA(dev) = NULL; +	rcu_assign_pointer(dev->l2opt, NULL);  out_free:  	kfree(t);  	return err;  } +static void clean_l2_data(struct rcu_head *head) +{ +	struct l2t_data *d = container_of(head, struct l2t_data, rcu_head); +	t3_free_l2t(d); +} + +  void cxgb3_offload_deactivate(struct adapter *adapter)  {  	struct t3cdev *tdev = &adapter->tdev;  	struct t3c_data *t = T3C_DATA(tdev); +	struct l2t_data *d;  	remove_adapter(adapter);  	if (list_empty(&adapter_list)) @@ -1315,8 +1325,11 @@ void cxgb3_offload_deactivate(struct adapter *adapter)  	free_tid_maps(&t->tid_maps);  	T3C_DATA(tdev) = NULL; -	t3_free_l2t(L2DATA(tdev)); -	L2DATA(tdev) = NULL; +	rcu_read_lock(); +	d = L2DATA(tdev); +	rcu_read_unlock(); +	rcu_assign_pointer(tdev->l2opt, NULL); +	call_rcu(&d->rcu_head, clean_l2_data);  	if (t->nofail_skb)  		kfree_skb(t->nofail_skb);  	kfree(t); diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c index f452c400325..41540978a17 100644 --- a/drivers/net/cxgb3/l2t.c +++ b/drivers/net/cxgb3/l2t.c @@ -300,14 +300,21 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)  struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,  			     struct net_device *dev)  { -	struct l2t_entry *e; -	struct l2t_data *d = L2DATA(cdev); +	struct l2t_entry *e = NULL; +	struct l2t_data *d; +	int hash;  	u32 addr = *(u32 *) neigh->primary_key;  	int ifidx = neigh->dev->ifindex; -	int hash = arp_hash(addr, ifidx, d);  	struct port_info *p = netdev_priv(dev);  	int smt_idx = p->port_id; +	rcu_read_lock(); +	d = L2DATA(cdev); +	if (!d) +		goto done_rcu; + +	hash = arp_hash(addr, ifidx, d); +  	write_lock_bh(&d->lock);  	for (e = d->l2tab[hash].first; e; e = e->next)  		if (e->addr == addr && e->ifindex == ifidx && @@ -338,6 +345,8 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,  	}  done:  	write_unlock_bh(&d->lock); +done_rcu: +	rcu_read_unlock();  	return e;  } diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h index 7a12d52ed4f..c5f54796e2c 100644 --- a/drivers/net/cxgb3/l2t.h +++ b/drivers/net/cxgb3/l2t.h @@ -76,6 +76,7 @@ struct l2t_data {  	atomic_t nfree;		/* number of free entries */  	rwlock_t lock;  	struct l2t_entry l2tab[0]; +	struct rcu_head rcu_head;	/* to handle rcu cleanup */  };  typedef void (*arp_failure_handler_func)(struct t3cdev * dev, @@ -99,7 +100,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb,  /*   * Getting to the L2 data from an offload device.   */ -#define L2DATA(dev) ((dev)->l2opt) +#define L2DATA(cdev) (rcu_dereference((cdev)->l2opt))  #define W_TCB_L2T_IX    0  #define S_TCB_L2T_IX    7 @@ -126,15 +127,22 @@ static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,  	return t3_l2t_send_slow(dev, skb, e);  } -static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e) +static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e)  { -	if (atomic_dec_and_test(&e->refcnt)) +	struct l2t_data *d; + +	rcu_read_lock(); +	d = L2DATA(t); + +	if (atomic_dec_and_test(&e->refcnt) && d)  		t3_l2e_free(d, e); + +	rcu_read_unlock();  }  static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)  { -	if (atomic_add_return(1, &e->refcnt) == 1)	/* 0 -> 1 transition */ +	if (d && atomic_add_return(1, &e->refcnt) == 1)	/* 0 -> 1 transition */  		atomic_dec(&d->nfree);  } diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c index c9957b7f17b..b4efa292fd6 100644 --- a/drivers/net/cxgb4/cxgb4_main.c +++ b/drivers/net/cxgb4/cxgb4_main.c @@ -3712,6 +3712,9 @@ static int __devinit init_one(struct pci_dev *pdev,  		setup_debugfs(adapter);  	} +	/* PCIe EEH recovery on powerpc platforms needs fundamental reset */ +	pdev->needs_freset = 1; +  	if (is_offload(adapter))  		attach_ulds(adapter); diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index 8545c7aa93e..a5a89ecb6f3 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c @@ -4026,6 +4026,12 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)  		checksum += eeprom_data;  	} +#ifdef CONFIG_PARISC +	/* This is a signature and not a checksum on HP c8000 */ +	if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6)) +		return E1000_SUCCESS; + +#endif  	if (checksum == (u16) EEPROM_SUM)  		return E1000_SUCCESS;  	else { diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 35916f48502..8533ad7f355 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h @@ -155,6 +155,9 @@ struct e1000_info;  #define HV_M_STATUS_SPEED_1000            0x0200  #define HV_M_STATUS_LINK_UP               0x0040 +#define E1000_ICH_FWSM_PCIM2PCI		0x01000000 /* ME PCIm-to-PCI active */ +#define E1000_ICH_FWSM_PCIM2PCI_COUNT	2000 +  /* Time to wait before putting the device into D3 if there's no link (in ms). */  #define LINK_TIMEOUT		100 @@ -454,6 +457,7 @@ struct e1000_info {  #define FLAG2_DISABLE_AIM                 (1 << 8)  #define FLAG2_CHECK_PHY_HANG              (1 << 9)  #define FLAG2_NO_DISABLE_RX               (1 << 10) +#define FLAG2_PCIM2PCI_ARBITER_WA         (1 << 11)  #define E1000_RX_DESC_PS(R, i)	    \  	(&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 4e36978b8fd..54add27c8f7 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c @@ -137,8 +137,9 @@  #define HV_PM_CTRL		PHY_REG(770, 17)  /* PHY Low Power Idle Control */ -#define I82579_LPI_CTRL			PHY_REG(772, 20) -#define I82579_LPI_CTRL_ENABLE_MASK	0x6000 +#define I82579_LPI_CTRL				PHY_REG(772, 20) +#define I82579_LPI_CTRL_ENABLE_MASK		0x6000 +#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT	0x80  /* EMI Registers */  #define I82579_EMI_ADDR         0x10 @@ -163,6 +164,11 @@  #define HV_KMRN_MODE_CTRL      PHY_REG(769, 16)  #define HV_KMRN_MDIO_SLOW      0x0400 +/* KMRN FIFO Control and Status */ +#define HV_KMRN_FIFO_CTRLSTA                  PHY_REG(770, 16) +#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK    0x7000 +#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT   12 +  /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */  /* Offset 04h HSFSTS */  union ich8_hws_flash_status { @@ -657,6 +663,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)  	struct e1000_mac_info *mac = &hw->mac;  	s32 ret_val;  	bool link; +	u16 phy_reg;  	/*  	 * We only want to go out to the PHY registers to see if Auto-Neg @@ -689,16 +696,35 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)  	mac->get_link_status = false; -	if (hw->phy.type == e1000_phy_82578) { -		ret_val = e1000_link_stall_workaround_hv(hw); -		if (ret_val) -			goto out; -	} - -	if (hw->mac.type == e1000_pch2lan) { +	switch (hw->mac.type) { +	case e1000_pch2lan:  		ret_val = e1000_k1_workaround_lv(hw);  		if (ret_val)  			goto out; +		/* fall-thru */ +	case e1000_pchlan: +		if (hw->phy.type == e1000_phy_82578) { +			ret_val = e1000_link_stall_workaround_hv(hw); +			if (ret_val) +				goto out; +		} + +		/* +		 * Workaround for PCHx parts in half-duplex: +		 * Set the number of preambles removed from the packet +		 * when it is passed from the PHY to the MAC to prevent +		 * the MAC from misinterpreting the packet type. +		 */ +		e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg); +		phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK; + +		if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD) +			phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); + +		e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg); +		break; +	default: +		break;  	}  	/* @@ -788,6 +814,11 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)  	    (adapter->hw.phy.type == e1000_phy_igp_3))  		adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; +	/* Enable workaround for 82579 w/ ME enabled */ +	if ((adapter->hw.mac.type == e1000_pch2lan) && +	    (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) +		adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA; +  	/* Disable EEE by default until IEEE802.3az spec is finalized */  	if (adapter->flags2 & FLAG2_HAS_EEE)  		adapter->hw.dev_spec.ich8lan.eee_disable = true; @@ -1355,7 +1386,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)  			return ret_val;  		/* Preamble tuning for SSC */ -		ret_val = e1e_wphy(hw, PHY_REG(770, 16), 0xA204); +		ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);  		if (ret_val)  			return ret_val;  	} @@ -1645,6 +1676,7 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)  	s32 ret_val = 0;  	u16 status_reg = 0;  	u32 mac_reg; +	u16 phy_reg;  	if (hw->mac.type != e1000_pch2lan)  		goto out; @@ -1659,12 +1691,19 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)  		mac_reg = er32(FEXTNVM4);  		mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; -		if (status_reg & HV_M_STATUS_SPEED_1000) +		ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); +		if (ret_val) +			goto out; + +		if (status_reg & HV_M_STATUS_SPEED_1000) {  			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; -		else +			phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; +		} else {  			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; - +			phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; +		}  		ew32(FEXTNVM4, mac_reg); +		ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);  	}  out: diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 362f70382cd..2198e615f24 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -519,6 +519,63 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,  }  /** + * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa() + * @hw: pointer to the HW structure + * @tail: address of tail descriptor register + * @i: value to write to tail descriptor register + * + * When updating the tail register, the ME could be accessing Host CSR + * registers at the same time.  Normally, this is handled in h/w by an + * arbiter but on some parts there is a bug that acknowledges Host accesses + * later than it should which could result in the descriptor register to + * have an incorrect value.  Workaround this by checking the FWSM register + * which has bit 24 set while ME is accessing Host CSR registers, wait + * if it is set and try again a number of times. + **/ +static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail, +					unsigned int i) +{ +	unsigned int j = 0; + +	while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) && +	       (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI)) +		udelay(50); + +	writel(i, tail); + +	if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail))) +		return E1000_ERR_SWFW_SYNC; + +	return 0; +} + +static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i) +{ +	u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail); +	struct e1000_hw *hw = &adapter->hw; + +	if (e1000e_update_tail_wa(hw, tail, i)) { +		u32 rctl = er32(RCTL); +		ew32(RCTL, rctl & ~E1000_RCTL_EN); +		e_err("ME firmware caused invalid RDT - resetting\n"); +		schedule_work(&adapter->reset_task); +	} +} + +static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i) +{ +	u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail); +	struct e1000_hw *hw = &adapter->hw; + +	if (e1000e_update_tail_wa(hw, tail, i)) { +		u32 tctl = er32(TCTL); +		ew32(TCTL, tctl & ~E1000_TCTL_EN); +		e_err("ME firmware caused invalid TDT - resetting\n"); +		schedule_work(&adapter->reset_task); +	} +} + +/**   * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended   * @adapter: address of board private structure   **/ @@ -573,7 +630,10 @@ map_skb:  			 * such as IA-64).  			 */  			wmb(); -			writel(i, adapter->hw.hw_addr + rx_ring->tail); +			if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) +				e1000e_update_rdt_wa(adapter, i); +			else +				writel(i, adapter->hw.hw_addr + rx_ring->tail);  		}  		i++;  		if (i == rx_ring->count) @@ -673,7 +733,11 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,  			 * such as IA-64).  			 */  			wmb(); -			writel(i << 1, adapter->hw.hw_addr + rx_ring->tail); +			if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) +				e1000e_update_rdt_wa(adapter, i << 1); +			else +				writel(i << 1, +				       adapter->hw.hw_addr + rx_ring->tail);  		}  		i++; @@ -756,7 +820,10 @@ check_page:  		 * applicable for weak-ordered memory model archs,  		 * such as IA-64). */  		wmb(); -		writel(i, adapter->hw.hw_addr + rx_ring->tail); +		if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) +			e1000e_update_rdt_wa(adapter, i); +		else +			writel(i, adapter->hw.hw_addr + rx_ring->tail);  	}  } @@ -4689,7 +4756,12 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,  	wmb();  	tx_ring->next_to_use = i; -	writel(i, adapter->hw.hw_addr + tx_ring->tail); + +	if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) +		e1000e_update_tdt_wa(adapter, i); +	else +		writel(i, adapter->hw.hw_addr + tx_ring->tail); +  	/*  	 * we need this if more than one processor can write to our tail  	 * at a time, it synchronizes IO on IA64/Altix systems diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index e55df308a3a..6d5fbd4d425 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -5615,7 +5615,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i  		goto out_error;  	} -	nv_vlan_mode(dev, dev->features); +	if (id->driver_data & DEV_HAS_VLAN) +		nv_vlan_mode(dev, dev->features);  	netif_carrier_off(dev); diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 2659daad783..31d5c574e5a 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -2710,8 +2710,13 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,  	/* Tell the skb what kind of packet this is */  	skb->protocol = eth_type_trans(skb, dev); -	/* Set vlan tag */ -	if (fcb->flags & RXFCB_VLN) +	/* +	 * There's need to check for NETIF_F_HW_VLAN_RX here. +	 * Even if vlan rx accel is disabled, on some chips +	 * RXFCB_VLN is pseudo randomly set. +	 */ +	if (dev->features & NETIF_F_HW_VLAN_RX && +	    fcb->flags & RXFCB_VLN)  		__vlan_hwaccel_put_tag(skb, fcb->vlctl);  	/* Send the packet up the stack */ diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c index 6e350692d11..0caf3c323ec 100644 --- a/drivers/net/gianfar_ethtool.c +++ b/drivers/net/gianfar_ethtool.c @@ -686,10 +686,21 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u  {  	unsigned int last_rule_idx = priv->cur_filer_idx;  	unsigned int cmp_rqfpr; -	unsigned int local_rqfpr[MAX_FILER_IDX + 1]; -	unsigned int local_rqfcr[MAX_FILER_IDX + 1]; +	unsigned int *local_rqfpr; +	unsigned int *local_rqfcr;  	int i = 0x0, k = 0x0;  	int j = MAX_FILER_IDX, l = 0x0; +	int ret = 1; + +	local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1), +		GFP_KERNEL); +	local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1), +		GFP_KERNEL); +	if (!local_rqfpr || !local_rqfcr) { +		pr_err("Out of memory\n"); +		ret = 0; +		goto err; +	}  	switch (class) {  	case TCP_V4_FLOW: @@ -706,7 +717,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u  		break;  	default:  		pr_err("Right now this class is not supported\n"); -		return 0; +		ret = 0; +		goto err;  	}  	for (i = 0; i < MAX_FILER_IDX + 1; i++) { @@ -721,7 +733,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u  	if (i == MAX_FILER_IDX + 1) {  		pr_err("No parse rule found, can't create hash rules\n"); -		return 0; +		ret = 0; +		goto err;  	}  	/* If a match was found, then it begins the starting of a cluster rule @@ -765,7 +778,10 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u  		priv->cur_filer_idx = priv->cur_filer_idx - 1;  	} -	return 1; +err: +	kfree(local_rqfcr); +	kfree(local_rqfpr); +	return ret;  }  static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd) @@ -1653,10 +1669,10 @@ static int gfar_get_cls_all(struct gfar_private *priv,  	u32 i = 0;  	list_for_each_entry(comp, &priv->rx_list.list, list) { -		if (i <= cmd->rule_cnt) { -			rule_locs[i] = comp->fs.location; -			i++; -		} +		if (i == cmd->rule_cnt) +			return -EMSGSIZE; +		rule_locs[i] = comp->fs.location; +		i++;  	}  	cmd->data = MAX_FILER_IDX; diff --git a/drivers/net/greth.c b/drivers/net/greth.c index 16ce45c1193..52a39000c42 100644 --- a/drivers/net/greth.c +++ b/drivers/net/greth.c @@ -428,6 +428,7 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)  	dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);  	status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN); +	greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN;  	/* Wrap around descriptor ring */  	if (greth->tx_next == GRETH_TXBD_NUM_MASK) { @@ -490,7 +491,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)  	if (nr_frags != 0)  		status = GRETH_TXBD_MORE; -	status |= GRETH_TXBD_CSALL; +	if (skb->ip_summed == CHECKSUM_PARTIAL) +		status |= GRETH_TXBD_CSALL;  	status |= skb_headlen(skb) & GRETH_BD_LEN;  	if (greth->tx_next == GRETH_TXBD_NUM_MASK)  		status |= GRETH_BD_WR; @@ -513,7 +515,9 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)  		greth->tx_skbuff[curr_tx] = NULL;  		bdp = greth->tx_bd_base + curr_tx; -		status = GRETH_TXBD_CSALL | GRETH_BD_EN; +		status = GRETH_BD_EN; +		if (skb->ip_summed == CHECKSUM_PARTIAL) +			status |= GRETH_TXBD_CSALL;  		status |= frag->size & GRETH_BD_LEN;  		/* Wrap around descriptor ring */ @@ -641,6 +645,7 @@ static void greth_clean_tx(struct net_device *dev)  				dev->stats.tx_fifo_errors++;  		}  		dev->stats.tx_packets++; +		dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last];  		greth->tx_last = NEXT_TX(greth->tx_last);  		greth->tx_free++;  	} @@ -695,6 +700,7 @@ static void greth_clean_tx_gbit(struct net_device *dev)  		greth->tx_skbuff[greth->tx_last] = NULL;  		greth_update_tx_stats(dev, stat); +		dev->stats.tx_bytes += skb->len;  		bdp = greth->tx_bd_base + greth->tx_last; @@ -796,6 +802,7 @@ static int greth_rx(struct net_device *dev, int limit)  				memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);  				skb->protocol = eth_type_trans(skb, dev); +				dev->stats.rx_bytes += pkt_len;  				dev->stats.rx_packets++;  				netif_receive_skb(skb);  			} @@ -910,6 +917,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit)  				skb->protocol = eth_type_trans(skb, dev);  				dev->stats.rx_packets++; +				dev->stats.rx_bytes += pkt_len;  				netif_receive_skb(skb);  				greth->rx_skbuff[greth->rx_cur] = newskb; diff --git a/drivers/net/greth.h b/drivers/net/greth.h index 9a0040dee4d..232a622a85b 100644 --- a/drivers/net/greth.h +++ b/drivers/net/greth.h @@ -103,6 +103,7 @@ struct greth_private {  	unsigned char *tx_bufs[GRETH_TXBD_NUM];  	unsigned char *rx_bufs[GRETH_RXBD_NUM]; +	u16 tx_bufs_length[GRETH_TXBD_NUM];  	u16 tx_next;  	u16 tx_last; diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index ba99af05bf6..d393f1e764e 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c @@ -395,7 +395,7 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada  }  /* recycle the current buffer on the rx queue */ -static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) +static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)  {  	u32 q_index = adapter->rx_queue.index;  	u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator; @@ -403,6 +403,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)  	unsigned int index = correlator & 0xffffffffUL;  	union ibmveth_buf_desc desc;  	unsigned long lpar_rc; +	int ret = 1;  	BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);  	BUG_ON(index >= adapter->rx_buff_pool[pool].size); @@ -410,7 +411,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)  	if (!adapter->rx_buff_pool[pool].active) {  		ibmveth_rxq_harvest_buffer(adapter);  		ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); -		return; +		goto out;  	}  	desc.fields.flags_len = IBMVETH_BUF_VALID | @@ -423,12 +424,16 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)  		netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "  			   "during recycle rc=%ld", lpar_rc);  		ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); +		ret = 0;  	}  	if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {  		adapter->rx_queue.index = 0;  		adapter->rx_queue.toggle = !adapter->rx_queue.toggle;  	} + +out: +	return ret;  }  static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) @@ -631,8 +636,8 @@ static int ibmveth_open(struct net_device *netdev)  		netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",  			   netdev->irq, rc);  		do { -			rc = h_free_logical_lan(adapter->vdev->unit_address); -		} while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); +			lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); +		} while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));  		goto err_out;  	} @@ -752,7 +757,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)  	struct ibmveth_adapter *adapter = netdev_priv(dev);  	unsigned long set_attr, clr_attr, ret_attr;  	unsigned long set_attr6, clr_attr6; -	long ret, ret6; +	long ret, ret4, ret6;  	int rc1 = 0, rc2 = 0;  	int restart = 0; @@ -765,6 +770,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)  	set_attr = 0;  	clr_attr = 0; +	set_attr6 = 0; +	clr_attr6 = 0;  	if (data) {  		set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; @@ -779,16 +786,20 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)  	if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&  	    !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&  	    (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { -		ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr, +		ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,  					 set_attr, &ret_attr); -		if (ret != H_SUCCESS) { +		if (ret4 != H_SUCCESS) {  			netdev_err(dev, "unable to change IPv4 checksum "  					"offload settings. %d rc=%ld\n", -					data, ret); +					data, ret4); + +			h_illan_attributes(adapter->vdev->unit_address, +					   set_attr, clr_attr, &ret_attr); + +			if (data == 1) +				dev->features &= ~NETIF_F_IP_CSUM; -			ret = h_illan_attributes(adapter->vdev->unit_address, -						 set_attr, clr_attr, &ret_attr);  		} else {  			adapter->fw_ipv4_csum_support = data;  		} @@ -799,15 +810,18 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)  		if (ret6 != H_SUCCESS) {  			netdev_err(dev, "unable to change IPv6 checksum "  					"offload settings. %d rc=%ld\n", -					data, ret); +					data, ret6); + +			h_illan_attributes(adapter->vdev->unit_address, +					   set_attr6, clr_attr6, &ret_attr); + +			if (data == 1) +				dev->features &= ~NETIF_F_IPV6_CSUM; -			ret = h_illan_attributes(adapter->vdev->unit_address, -						 set_attr6, clr_attr6, -						 &ret_attr);  		} else  			adapter->fw_ipv6_csum_support = data; -		if (ret != H_SUCCESS || ret6 != H_SUCCESS) +		if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)  			adapter->rx_csum = data;  		else  			rc1 = -EIO; @@ -925,6 +939,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,  	union ibmveth_buf_desc descs[6];  	int last, i;  	int force_bounce = 0; +	dma_addr_t dma_addr;  	/*  	 * veth handles a maximum of 6 segments including the header, so @@ -989,17 +1004,16 @@ retry_bounce:  	}  	/* Map the header */ -	descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data, -						 skb_headlen(skb), -						 DMA_TO_DEVICE); -	if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address)) +	dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, +				  skb_headlen(skb), DMA_TO_DEVICE); +	if (dma_mapping_error(&adapter->vdev->dev, dma_addr))  		goto map_failed;  	descs[0].fields.flags_len = desc_flags | skb_headlen(skb); +	descs[0].fields.address = dma_addr;  	/* Map the frags */  	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { -		unsigned long dma_addr;  		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];  		dma_addr = dma_map_page(&adapter->vdev->dev, frag->page, @@ -1021,7 +1035,12 @@ retry_bounce:  		netdev->stats.tx_bytes += skb->len;  	} -	for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++) +	dma_unmap_single(&adapter->vdev->dev, +			 descs[0].fields.address, +			 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK, +			 DMA_TO_DEVICE); + +	for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)  		dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,  			       descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,  			       DMA_TO_DEVICE); @@ -1084,8 +1103,9 @@ restart_poll:  				if (rx_flush)  					ibmveth_flush_buffer(skb->data,  						length + offset); +				if (!ibmveth_rxq_recycle_buffer(adapter)) +					kfree_skb(skb);  				skb = new_skb; -				ibmveth_rxq_recycle_buffer(adapter);  			} else {  				ibmveth_rxq_harvest_buffer(adapter);  				skb_reserve(skb, offset); diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c index 74f2f11ac29..469d95eaa15 100644 --- a/drivers/net/igb/e1000_mbx.c +++ b/drivers/net/igb/e1000_mbx.c @@ -34,7 +34,7 @@   *  @size: Length of buffer   *  @mbx_id: id of mailbox to read   * - *  returns SUCCESS if it successfuly read message from buffer + *  returns SUCCESS if it successfully read message from buffer   **/  s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)  { diff --git a/drivers/net/igbvf/mbx.c b/drivers/net/igbvf/mbx.c index 3d6f4cc3998..048aae248d0 100644 --- a/drivers/net/igbvf/mbx.c +++ b/drivers/net/igbvf/mbx.c @@ -288,7 +288,7 @@ out_no_write:   *  @msg: The message buffer   *  @size: Length of buffer   * - *  returns SUCCESS if it successfuly read message from buffer + *  returns SUCCESS if it successfully read message from buffer   **/  static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)  { diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig index 25bb2a015e1..a40fab44b9a 100644 --- a/drivers/net/irda/Kconfig +++ b/drivers/net/irda/Kconfig @@ -183,7 +183,7 @@ config OLD_BELKIN_DONGLE  	  Say Y here if you want to build support for the Adaptec Airport 1000  	  and 2000 dongles.  If you want to compile it as a module, choose  	  M here. Some information is contained in the comments -	  at the top of <file:drivers/net/irda/old_belkin.c>. +	  at the top of <file:drivers/net/irda/old_belkin-sir.c>.  config ACT200L_DONGLE  	tristate "ACTiSYS IR-200L dongle" diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index e86297b3273..e1fcc958927 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -1321,8 +1321,8 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,  		if (ring_is_rsc_enabled(rx_ring))  			pkt_is_rsc = ixgbe_get_rsc_state(rx_desc); -		/* if this is a skb from previous receive DMA will be 0 */ -		if (rx_buffer_info->dma) { +		/* linear means we are building an skb from multiple pages */ +		if (!skb_is_nonlinear(skb)) {  			u16 hlen;  			if (pkt_is_rsc &&  			    !(staterr & IXGBE_RXD_STAT_EOP) && @@ -1459,8 +1459,10 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,  		if (ixgbe_rx_is_fcoe(adapter, rx_desc)) {  			ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb,  						   staterr); -			if (!ddp_bytes) +			if (!ddp_bytes) { +				dev_kfree_skb_any(skb);  				goto next_desc; +			}  		}  #endif /* IXGBE_FCOE */  		ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c index 1ff0eefcfd0..3f725d48336 100644 --- a/drivers/net/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ixgbe/ixgbe_mbx.c @@ -38,7 +38,7 @@   *  @size: Length of buffer   *  @mbx_id: id of mailbox to read   * - *  returns SUCCESS if it successfuly read message from buffer + *  returns SUCCESS if it successfully read message from buffer   **/  s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)  { diff --git a/drivers/net/ixgbevf/mbx.c b/drivers/net/ixgbevf/mbx.c index 7a883312577..930fa83f256 100644 --- a/drivers/net/ixgbevf/mbx.c +++ b/drivers/net/ixgbevf/mbx.c @@ -276,7 +276,7 @@ out_no_write:   *  @msg: The message buffer   *  @size: Length of buffer   * - *  returns 0 if it successfuly read message from buffer + *  returns 0 if it successfully read message from buffer   **/  static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)  { diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 05172c39a0c..376e3e94bae 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -239,7 +239,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)  		dest = macvlan_hash_lookup(port, eth->h_dest);  		if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {  			/* send to lowerdev first for its network taps */ -			vlan->forward(vlan->lowerdev, skb); +			dev_forward_skb(vlan->lowerdev, skb);  			return NET_XMIT_SUCCESS;  		} diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c index 6e03de034ac..f76ab6bf309 100644 --- a/drivers/net/mlx4/en_tx.c +++ b/drivers/net/mlx4/en_tx.c @@ -172,7 +172,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,  	memset(ring->buf, 0, ring->buf_size);  	ring->qp_state = MLX4_QP_STATE_RST; -	ring->doorbell_qpn = swab32(ring->qp.qpn << 8); +	ring->doorbell_qpn = ring->qp.qpn << 8;  	mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,  				ring->cqn, &ring->context); @@ -791,7 +791,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)  		skb_orphan(skb);  	if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { -		*(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn; +		*(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);  		op_own |= htonl((bf_index & 0xffff) << 8);  		/* Ensure new descirptor hits memory  		* before setting ownership of this descriptor to HW */ @@ -812,7 +812,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)  		wmb();  		tx_desc->ctrl.owner_opcode = op_own;  		wmb(); -		writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); +		iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);  	}  	/* Poll CQ here */ diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index dfc82720065..e8882023576 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -307,6 +307,11 @@ static ssize_t store_enabled(struct netconsole_target *nt,  		return err;  	if (enabled < 0 || enabled > 1)  		return -EINVAL; +	if (enabled == nt->enabled) { +		printk(KERN_INFO "netconsole: network logging has already %s\n", +				nt->enabled ? "started" : "stopped"); +		return -EINVAL; +	}  	if (enabled) {	/* 1 */ @@ -799,5 +804,11 @@ static void __exit cleanup_netconsole(void)  	}  } -module_init(init_netconsole); +/* + * Use late_initcall to ensure netconsole is + * initialized after network device driver if built-in. + * + * late_initcall() and module_init() are identical if built as module. + */ +late_initcall(init_netconsole);  module_exit(cleanup_netconsole); diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h index 59fac77d0db..a09a07197eb 100644 --- a/drivers/net/pch_gbe/pch_gbe.h +++ b/drivers/net/pch_gbe/pch_gbe.h @@ -127,8 +127,8 @@ struct pch_gbe_regs {  /* Reset */  #define PCH_GBE_ALL_RST         0x80000000  /* All reset */ -#define PCH_GBE_TX_RST          0x40000000  /* TX MAC, TX FIFO, TX DMA reset */ -#define PCH_GBE_RX_RST          0x04000000  /* RX MAC, RX FIFO, RX DMA reset */ +#define PCH_GBE_TX_RST          0x00008000  /* TX MAC, TX FIFO, TX DMA reset */ +#define PCH_GBE_RX_RST          0x00004000  /* RX MAC, RX FIFO, RX DMA reset */  /* TCP/IP Accelerator Control */  #define PCH_GBE_EX_LIST_EN      0x00000008  /* External List Enable */ @@ -276,6 +276,9 @@ struct pch_gbe_regs {  #define PCH_GBE_RX_DMA_EN       0x00000002   /* Enables Receive DMA */  #define PCH_GBE_TX_DMA_EN       0x00000001   /* Enables Transmission DMA */ +/* RX DMA STATUS */ +#define PCH_GBE_IDLE_CHECK       0xFFFFFFFE +  /* Wake On LAN Status */  #define PCH_GBE_WLS_BR          0x00000008 /* Broadcas Address */  #define PCH_GBE_WLS_MLT         0x00000004 /* Multicast Address */ @@ -471,6 +474,7 @@ struct pch_gbe_tx_desc {  struct pch_gbe_buffer {  	struct sk_buff *skb;  	dma_addr_t dma; +	unsigned char *rx_buffer;  	unsigned long time_stamp;  	u16 length;  	bool mapped; @@ -511,6 +515,9 @@ struct pch_gbe_tx_ring {  struct pch_gbe_rx_ring {  	struct pch_gbe_rx_desc *desc;  	dma_addr_t dma; +	unsigned char *rx_buff_pool; +	dma_addr_t rx_buff_pool_logic; +	unsigned int rx_buff_pool_size;  	unsigned int size;  	unsigned int count;  	unsigned int next_to_use; @@ -622,6 +629,7 @@ struct pch_gbe_adapter {  	unsigned long rx_buffer_len;  	unsigned long tx_queue_len;  	bool have_msi; +	bool rx_stop_flag;  };  extern const char pch_driver_version[]; diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index eac3c5ca973..b8b4ba27b0e 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c @@ -20,7 +20,6 @@  #include "pch_gbe.h"  #include "pch_gbe_api.h" -#include <linux/prefetch.h>  #define DRV_VERSION     "1.00"  const char pch_driver_version[] = DRV_VERSION; @@ -34,11 +33,15 @@ const char pch_driver_version[] = DRV_VERSION;  #define PCH_GBE_WATCHDOG_PERIOD		(1 * HZ)	/* watchdog time */  #define PCH_GBE_COPYBREAK_DEFAULT	256  #define PCH_GBE_PCI_BAR			1 +#define PCH_GBE_RESERVE_MEMORY		0x200000	/* 2MB */  /* Macros for ML7223 */  #define PCI_VENDOR_ID_ROHM			0x10db  #define PCI_DEVICE_ID_ROHM_ML7223_GBE		0x8013 +/* Macros for ML7831 */ +#define PCI_DEVICE_ID_ROHM_ML7831_GBE		0x8802 +  #define PCH_GBE_TX_WEIGHT         64  #define PCH_GBE_RX_WEIGHT         64  #define PCH_GBE_RX_BUFFER_WRITE   16 @@ -52,6 +55,7 @@ const char pch_driver_version[] = DRV_VERSION;  	)  /* Ethertype field values */ +#define PCH_GBE_MAX_RX_BUFFER_SIZE      0x2880  #define PCH_GBE_MAX_JUMBO_FRAME_SIZE    10318  #define PCH_GBE_FRAME_SIZE_2048         2048  #define PCH_GBE_FRAME_SIZE_4096         4096 @@ -83,10 +87,12 @@ const char pch_driver_version[] = DRV_VERSION;  #define PCH_GBE_INT_ENABLE_MASK ( \  	PCH_GBE_INT_RX_DMA_CMPLT |    \  	PCH_GBE_INT_RX_DSC_EMP   |    \ +	PCH_GBE_INT_RX_FIFO_ERR  |    \  	PCH_GBE_INT_WOL_DET      |    \  	PCH_GBE_INT_TX_CMPLT          \  	) +#define PCH_GBE_INT_DISABLE_ALL		0  static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; @@ -138,6 +144,27 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit)  	if (!tmp)  		pr_err("Error: busy bit is not cleared\n");  } + +/** + * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context + * @reg:	Pointer of register + * @busy:	Busy bit + */ +static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit) +{ +	u32 tmp; +	int ret = -1; +	/* wait busy */ +	tmp = 20; +	while ((ioread32(reg) & bit) && --tmp) +		udelay(5); +	if (!tmp) +		pr_err("Error: busy bit is not cleared\n"); +	else +		ret = 0; +	return ret; +} +  /**   * pch_gbe_mac_mar_set - Set MAC address register   * @hw:	    Pointer to the HW structure @@ -189,6 +216,17 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)  	return;  } +static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw) +{ +	/* Read the MAC address. and store to the private data */ +	pch_gbe_mac_read_mac_addr(hw); +	iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET); +	pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST); +	/* Setup the MAC address */ +	pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); +	return; +} +  /**   * pch_gbe_mac_init_rx_addrs - Initialize receive address's   * @hw:	Pointer to the HW structure @@ -671,13 +709,8 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)  	tcpip = ioread32(&hw->reg->TCPIP_ACC); -	if (netdev->features & NETIF_F_RXCSUM) { -		tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF; -		tcpip |= PCH_GBE_RX_TCPIPACC_EN; -	} else { -		tcpip |= PCH_GBE_RX_TCPIPACC_OFF; -		tcpip &= ~PCH_GBE_RX_TCPIPACC_EN; -	} +	tcpip |= PCH_GBE_RX_TCPIPACC_OFF; +	tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;  	iowrite32(tcpip, &hw->reg->TCPIP_ACC);  	return;  } @@ -717,13 +750,6 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)  	iowrite32(rdba, &hw->reg->RX_DSC_BASE);  	iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);  	iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P); - -	/* Enables Receive DMA */ -	rxdma = ioread32(&hw->reg->DMA_CTRL); -	rxdma |= PCH_GBE_RX_DMA_EN; -	iowrite32(rxdma, &hw->reg->DMA_CTRL); -	/* Enables Receive */ -	iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);  }  /** @@ -1097,6 +1123,48 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)  	spin_unlock_irqrestore(&adapter->stats_lock, flags);  } +static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter) +{ +	struct pch_gbe_hw *hw = &adapter->hw; +	u32 rxdma; +	u16 value; +	int ret; + +	/* Disable Receive DMA */ +	rxdma = ioread32(&hw->reg->DMA_CTRL); +	rxdma &= ~PCH_GBE_RX_DMA_EN; +	iowrite32(rxdma, &hw->reg->DMA_CTRL); +	/* Wait Rx DMA BUS is IDLE */ +	ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK); +	if (ret) { +		/* Disable Bus master */ +		pci_read_config_word(adapter->pdev, PCI_COMMAND, &value); +		value &= ~PCI_COMMAND_MASTER; +		pci_write_config_word(adapter->pdev, PCI_COMMAND, value); +		/* Stop Receive */ +		pch_gbe_mac_reset_rx(hw); +		/* Enable Bus master */ +		value |= PCI_COMMAND_MASTER; +		pci_write_config_word(adapter->pdev, PCI_COMMAND, value); +	} else { +		/* Stop Receive */ +		pch_gbe_mac_reset_rx(hw); +	} +} + +static void pch_gbe_start_receive(struct pch_gbe_hw *hw) +{ +	u32 rxdma; + +	/* Enables Receive DMA */ +	rxdma = ioread32(&hw->reg->DMA_CTRL); +	rxdma |= PCH_GBE_RX_DMA_EN; +	iowrite32(rxdma, &hw->reg->DMA_CTRL); +	/* Enables Receive */ +	iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN); +	return; +} +  /**   * pch_gbe_intr - Interrupt Handler   * @irq:   Interrupt number @@ -1123,7 +1191,17 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)  	if (int_st & PCH_GBE_INT_RX_FRAME_ERR)  		adapter->stats.intr_rx_frame_err_count++;  	if (int_st & PCH_GBE_INT_RX_FIFO_ERR) -		adapter->stats.intr_rx_fifo_err_count++; +		if (!adapter->rx_stop_flag) { +			adapter->stats.intr_rx_fifo_err_count++; +			pr_debug("Rx fifo over run\n"); +			adapter->rx_stop_flag = true; +			int_en = ioread32(&hw->reg->INT_EN); +			iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), +				  &hw->reg->INT_EN); +			pch_gbe_stop_receive(adapter); +			int_st |= ioread32(&hw->reg->INT_ST); +			int_st = int_st & ioread32(&hw->reg->INT_EN); +		}  	if (int_st & PCH_GBE_INT_RX_DMA_ERR)  		adapter->stats.intr_rx_dma_err_count++;  	if (int_st & PCH_GBE_INT_TX_FIFO_ERR) @@ -1135,21 +1213,18 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)  	/* When Rx descriptor is empty  */  	if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {  		adapter->stats.intr_rx_dsc_empty_count++; -		pr_err("Rx descriptor is empty\n"); +		pr_debug("Rx descriptor is empty\n");  		int_en = ioread32(&hw->reg->INT_EN);  		iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);  		if (hw->mac.tx_fc_enable) {  			/* Set Pause packet */  			pch_gbe_mac_set_pause_packet(hw);  		} -		if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) -		    == 0) { -			return IRQ_HANDLED; -		}  	}  	/* When request status is Receive interruption */ -	if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) { +	if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) || +	    (adapter->rx_stop_flag == true)) {  		if (likely(napi_schedule_prep(&adapter->napi))) {  			/* Enable only Rx Descriptor empty */  			atomic_inc(&adapter->irq_sem); @@ -1185,29 +1260,23 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,  	unsigned int i;  	unsigned int bufsz; -	bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN; +	bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;  	i = rx_ring->next_to_use;  	while ((cleaned_count--)) {  		buffer_info = &rx_ring->buffer_info[i]; -		skb = buffer_info->skb; -		if (skb) { -			skb_trim(skb, 0); -		} else { -			skb = netdev_alloc_skb(netdev, bufsz); -			if (unlikely(!skb)) { -				/* Better luck next round */ -				adapter->stats.rx_alloc_buff_failed++; -				break; -			} -			/* 64byte align */ -			skb_reserve(skb, PCH_GBE_DMA_ALIGN); - -			buffer_info->skb = skb; -			buffer_info->length = adapter->rx_buffer_len; +		skb = netdev_alloc_skb(netdev, bufsz); +		if (unlikely(!skb)) { +			/* Better luck next round */ +			adapter->stats.rx_alloc_buff_failed++; +			break;  		} +		/* align */ +		skb_reserve(skb, NET_IP_ALIGN); +		buffer_info->skb = skb; +  		buffer_info->dma = dma_map_single(&pdev->dev, -						  skb->data, +						  buffer_info->rx_buffer,  						  buffer_info->length,  						  DMA_FROM_DEVICE);  		if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { @@ -1240,6 +1309,36 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,  	return;  } +static int +pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter, +			 struct pch_gbe_rx_ring *rx_ring, int cleaned_count) +{ +	struct pci_dev *pdev = adapter->pdev; +	struct pch_gbe_buffer *buffer_info; +	unsigned int i; +	unsigned int bufsz; +	unsigned int size; + +	bufsz = adapter->rx_buffer_len; + +	size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; +	rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size, +						&rx_ring->rx_buff_pool_logic, +						GFP_KERNEL); +	if (!rx_ring->rx_buff_pool) { +		pr_err("Unable to allocate memory for the receive poll buffer\n"); +		return -ENOMEM; +	} +	memset(rx_ring->rx_buff_pool, 0, size); +	rx_ring->rx_buff_pool_size = size; +	for (i = 0; i < rx_ring->count; i++) { +		buffer_info = &rx_ring->buffer_info[i]; +		buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i; +		buffer_info->length = bufsz; +	} +	return 0; +} +  /**   * pch_gbe_alloc_tx_buffers - Allocate transmit buffers   * @adapter:   Board private structure @@ -1285,7 +1384,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,  	struct sk_buff *skb;  	unsigned int i;  	unsigned int cleaned_count = 0; -	bool cleaned = false; +	bool cleaned = true;  	pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); @@ -1296,7 +1395,6 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,  	while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {  		pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status); -		cleaned = true;  		buffer_info = &tx_ring->buffer_info[i];  		skb = buffer_info->skb; @@ -1339,8 +1437,10 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,  		tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);  		/* weight of a sort for tx, to avoid endless transmit cleanup */ -		if (cleaned_count++ == PCH_GBE_TX_WEIGHT) +		if (cleaned_count++ == PCH_GBE_TX_WEIGHT) { +			cleaned = false;  			break; +		}  	}  	pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",  		 cleaned_count); @@ -1380,7 +1480,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,  	unsigned int i;  	unsigned int cleaned_count = 0;  	bool cleaned = false; -	struct sk_buff *skb, *new_skb; +	struct sk_buff *skb;  	u8 dma_status;  	u16 gbec_status;  	u32 tcp_ip_status; @@ -1401,13 +1501,12 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,  		rx_desc->gbec_status = DSC_INIT16;  		buffer_info = &rx_ring->buffer_info[i];  		skb = buffer_info->skb; +		buffer_info->skb = NULL;  		/* unmap dma */  		dma_unmap_single(&pdev->dev, buffer_info->dma,  				   buffer_info->length, DMA_FROM_DEVICE);  		buffer_info->mapped = false; -		/* Prefetch the packet */ -		prefetch(skb->data);  		pr_debug("RxDecNo = 0x%04x  Status[DMA:0x%02x GBE:0x%04x "  			 "TCP:0x%08x]  BufInf = 0x%p\n", @@ -1427,70 +1526,16 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,  			pr_err("Receive CRC Error\n");  		} else {  			/* get receive length */ -			/* length convert[-3] */ -			length = (rx_desc->rx_words_eob) - 3; +			/* length convert[-3], length includes FCS length */ +			length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN; +			if (rx_desc->rx_words_eob & 0x02) +				length = length - 4; +			/* +			 * buffer_info->rx_buffer: [Header:14][payload] +			 * skb->data: [Reserve:2][Header:14][payload] +			 */ +			memcpy(skb->data, buffer_info->rx_buffer, length); -			/* Decide the data conversion method */ -			if (!(netdev->features & NETIF_F_RXCSUM)) { -				/* [Header:14][payload] */ -				if (NET_IP_ALIGN) { -					/* Because alignment differs, -					 * the new_skb is newly allocated, -					 * and data is copied to new_skb.*/ -					new_skb = netdev_alloc_skb(netdev, -							 length + NET_IP_ALIGN); -					if (!new_skb) { -						/* dorrop error */ -						pr_err("New skb allocation " -							"Error\n"); -						goto dorrop; -					} -					skb_reserve(new_skb, NET_IP_ALIGN); -					memcpy(new_skb->data, skb->data, -					       length); -					skb = new_skb; -				} else { -					/* DMA buffer is used as SKB as it is.*/ -					buffer_info->skb = NULL; -				} -			} else { -				/* [Header:14][padding:2][payload] */ -				/* The length includes padding length */ -				length = length - PCH_GBE_DMA_PADDING; -				if ((length < copybreak) || -				    (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) { -					/* Because alignment differs, -					 * the new_skb is newly allocated, -					 * and data is copied to new_skb. -					 * Padding data is deleted -					 * at the time of a copy.*/ -					new_skb = netdev_alloc_skb(netdev, -							 length + NET_IP_ALIGN); -					if (!new_skb) { -						/* dorrop error */ -						pr_err("New skb allocation " -							"Error\n"); -						goto dorrop; -					} -					skb_reserve(new_skb, NET_IP_ALIGN); -					memcpy(new_skb->data, skb->data, -					       ETH_HLEN); -					memcpy(&new_skb->data[ETH_HLEN], -					       &skb->data[ETH_HLEN + -					       PCH_GBE_DMA_PADDING], -					       length - ETH_HLEN); -					skb = new_skb; -				} else { -					/* Padding data is deleted -					 * by moving header data.*/ -					memmove(&skb->data[PCH_GBE_DMA_PADDING], -						&skb->data[0], ETH_HLEN); -					skb_reserve(skb, NET_IP_ALIGN); -					buffer_info->skb = NULL; -				} -			} -			/* The length includes FCS length */ -			length = length - ETH_FCS_LEN;  			/* update status of driver */  			adapter->stats.rx_bytes += length;  			adapter->stats.rx_packets++; @@ -1509,7 +1554,6 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,  			pr_debug("Receive skb->ip_summed: %d length: %d\n",  				 skb->ip_summed, length);  		} -dorrop:  		/* return some buffers to hardware, one at a time is too slow */  		if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {  			pch_gbe_alloc_rx_buffers(adapter, rx_ring, @@ -1714,9 +1758,15 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)  		pr_err("Error: can't bring device up\n");  		return err;  	} +	err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count); +	if (err) { +		pr_err("Error: can't bring device up\n"); +		return err; +	}  	pch_gbe_alloc_tx_buffers(adapter, tx_ring);  	pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);  	adapter->tx_queue_len = netdev->tx_queue_len; +	pch_gbe_start_receive(&adapter->hw);  	mod_timer(&adapter->watchdog_timer, jiffies); @@ -1734,6 +1784,7 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)  void pch_gbe_down(struct pch_gbe_adapter *adapter)  {  	struct net_device *netdev = adapter->netdev; +	struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;  	/* signal that we're down so the interrupt handler does not  	 * reschedule our watchdog timer */ @@ -1752,6 +1803,12 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)  	pch_gbe_reset(adapter);  	pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);  	pch_gbe_clean_rx_ring(adapter, adapter->rx_ring); + +	pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size, +			    rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic); +	rx_ring->rx_buff_pool_logic = 0; +	rx_ring->rx_buff_pool_size = 0; +	rx_ring->rx_buff_pool = NULL;  }  /** @@ -2004,6 +2061,8 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)  {  	struct pch_gbe_adapter *adapter = netdev_priv(netdev);  	int max_frame; +	unsigned long old_rx_buffer_len = adapter->rx_buffer_len; +	int err;  	max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;  	if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || @@ -2018,14 +2077,24 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)  	else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)  		adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;  	else -		adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE; -	netdev->mtu = new_mtu; -	adapter->hw.mac.max_frame_size = max_frame; +		adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE; -	if (netif_running(netdev)) -		pch_gbe_reinit_locked(adapter); -	else +	if (netif_running(netdev)) { +		pch_gbe_down(adapter); +		err = pch_gbe_up(adapter); +		if (err) { +			adapter->rx_buffer_len = old_rx_buffer_len; +			pch_gbe_up(adapter); +			return -ENOMEM; +		} else { +			netdev->mtu = new_mtu; +			adapter->hw.mac.max_frame_size = max_frame; +		} +	} else {  		pch_gbe_reset(adapter); +		netdev->mtu = new_mtu; +		adapter->hw.mac.max_frame_size = max_frame; +	}  	pr_debug("max_frame : %d  rx_buffer_len : %d  mtu : %d  max_frame_size : %d\n",  		 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu, @@ -2099,33 +2168,39 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)  {  	struct pch_gbe_adapter *adapter =  	    container_of(napi, struct pch_gbe_adapter, napi); -	struct net_device *netdev = adapter->netdev;  	int work_done = 0;  	bool poll_end_flag = false;  	bool cleaned = false; +	u32 int_en;  	pr_debug("budget : %d\n", budget); -	/* Keep link state information with original netdev */ -	if (!netif_carrier_ok(netdev)) { -		poll_end_flag = true; -	} else { -		cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); -		pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); +	pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); +	cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); -		if (cleaned) -			work_done = budget; -		/* If no Tx and not enough Rx work done, -		 * exit the polling mode -		 */ -		if ((work_done < budget) || !netif_running(netdev)) -			poll_end_flag = true; -	} +	if (!cleaned) +		work_done = budget; +	/* If no Tx and not enough Rx work done, +	 * exit the polling mode +	 */ +	if (work_done < budget) +		poll_end_flag = true;  	if (poll_end_flag) {  		napi_complete(napi); +		if (adapter->rx_stop_flag) { +			adapter->rx_stop_flag = false; +			pch_gbe_start_receive(&adapter->hw); +		}  		pch_gbe_irq_enable(adapter); -	} +	} else +		if (adapter->rx_stop_flag) { +			adapter->rx_stop_flag = false; +			pch_gbe_start_receive(&adapter->hw); +			int_en = ioread32(&adapter->hw.reg->INT_EN); +			iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), +				&adapter->hw.reg->INT_EN); +		}  	pr_debug("poll_end_flag : %d  work_done : %d  budget : %d\n",  		 poll_end_flag, work_done, budget); @@ -2452,6 +2527,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {  	 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),  	 .class_mask = (0xFFFF00)  	 }, +	{.vendor = PCI_VENDOR_ID_ROHM, +	 .device = PCI_DEVICE_ID_ROHM_ML7831_GBE, +	 .subvendor = PCI_ANY_ID, +	 .subdevice = PCI_ANY_ID, +	 .class = (PCI_CLASS_NETWORK_ETHERNET << 8), +	 .class_mask = (0xFFFF00) +	 },  	/* required last entry */  	{0}  }; diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index d84c4224dd1..e8be47d6d7d 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -553,7 +553,7 @@ static int bcm5481_config_aneg(struct phy_device *phydev)  		/*  		 * There is no BCM5481 specification available, so down  		 * here is everything we know about "register 0x18". This -		 * at least helps BCM5481 to successfuly receive packets +		 * at least helps BCM5481 to successfully receive packets  		 * on MPC8360E-RDK board. Peter Barada <peterb@logicpd.com>  		 * says: "This sets delay between the RXD and RXC signals  		 * instead of using trace lengths to achieve timing". diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index cb6e0b486b1..edd7304773e 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -589,7 +589,7 @@ static void decode_rxts(struct dp83640_private *dp83640,  	prune_rx_ts(dp83640);  	if (list_empty(&dp83640->rxpool)) { -		pr_warning("dp83640: rx timestamp pool is empty\n"); +		pr_debug("dp83640: rx timestamp pool is empty\n");  		goto out;  	}  	rxts = list_first_entry(&dp83640->rxpool, struct rxts, list); @@ -612,7 +612,7 @@ static void decode_txts(struct dp83640_private *dp83640,  	skb = skb_dequeue(&dp83640->tx_queue);  	if (!skb) { -		pr_warning("dp83640: have timestamp but tx_queue empty\n"); +		pr_debug("dp83640: have timestamp but tx_queue empty\n");  		return;  	}  	ns = phy2txts(phy_txts); diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c index 0620ba96350..04bb8fcc0cb 100644 --- a/drivers/net/phy/national.c +++ b/drivers/net/phy/national.c @@ -25,8 +25,9 @@  /* DP83865 phy identifier values */  #define DP83865_PHY_ID	0x20005c7a -#define DP83865_INT_MASK_REG 0x15 -#define DP83865_INT_MASK_STATUS 0x14 +#define DP83865_INT_STATUS	0x14 +#define DP83865_INT_MASK	0x15 +#define DP83865_INT_CLEAR	0x17  #define DP83865_INT_REMOTE_FAULT 0x0008  #define DP83865_INT_ANE_COMPLETED 0x0010 @@ -68,21 +69,25 @@ static int ns_config_intr(struct phy_device *phydev)  	int err;  	if (phydev->interrupts == PHY_INTERRUPT_ENABLED) -		err = phy_write(phydev, DP83865_INT_MASK_REG, +		err = phy_write(phydev, DP83865_INT_MASK,  				DP83865_INT_MASK_DEFAULT);  	else -		err = phy_write(phydev, DP83865_INT_MASK_REG, 0); +		err = phy_write(phydev, DP83865_INT_MASK, 0);  	return err;  }  static int ns_ack_interrupt(struct phy_device *phydev)  { -	int ret = phy_read(phydev, DP83865_INT_MASK_STATUS); +	int ret = phy_read(phydev, DP83865_INT_STATUS);  	if (ret < 0)  		return ret; -	return 0; +	/* Clear the interrupt status bit by writing a “1” +	 * to the corresponding bit in INT_CLEAR (2:0 are reserved) */ +	ret = phy_write(phydev, DP83865_INT_CLEAR, ret & ~0x7); + +	return ret;  }  static void ns_giga_speed_fallback(struct phy_device *phydev, int mode) diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 10e5d985afa..edfa15d2e79 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -1465,7 +1465,12 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)  			continue;  		} -		mtu = pch->chan->mtu - hdrlen; +		/* +		 * hdrlen includes the 2-byte PPP protocol field, but the +		 * MTU counts only the payload excluding the protocol field. +		 * (RFC1661 Section 2) +		 */ +		mtu = pch->chan->mtu - (hdrlen - 2);  		if (mtu < 4)  			mtu = 4;  		if (flen > mtu) diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c index eae542a7e98..89f829f5f72 100644 --- a/drivers/net/pptp.c +++ b/drivers/net/pptp.c @@ -285,8 +285,10 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)  	ip_send_check(iph);  	ip_local_out(skb); +	return 1;  tx_error: +	kfree_skb(skb);  	return 1;  } @@ -305,11 +307,18 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)  	}  	header = (struct pptp_gre_header *)(skb->data); +	headersize  = sizeof(*header);  	/* test if acknowledgement present */  	if (PPTP_GRE_IS_A(header->ver)) { -		__u32 ack = (PPTP_GRE_IS_S(header->flags)) ? -				header->ack : header->seq; /* ack in different place if S = 0 */ +		__u32 ack; + +		if (!pskb_may_pull(skb, headersize)) +			goto drop; +		header = (struct pptp_gre_header *)(skb->data); + +		/* ack in different place if S = 0 */ +		ack = PPTP_GRE_IS_S(header->flags) ? header->ack : header->seq;  		ack = ntohl(ack); @@ -318,21 +327,18 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)  		/* also handle sequence number wrap-around  */  		if (WRAPPED(ack, opt->ack_recv))  			opt->ack_recv = ack; +	} else { +		headersize -= sizeof(header->ack);  	} -  	/* test if payload present */  	if (!PPTP_GRE_IS_S(header->flags))  		goto drop; -	headersize  = sizeof(*header);  	payload_len = ntohs(header->payload_len);  	seq         = ntohl(header->seq); -	/* no ack present? */ -	if (!PPTP_GRE_IS_A(header->ver)) -		headersize -= sizeof(header->ack);  	/* check for incomplete packet (length smaller than expected) */ -	if (skb->len - headersize < payload_len) +	if (!pskb_may_pull(skb, headersize + payload_len))  		goto drop;  	payload = skb->data + headersize; diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c index 1a3033d8e7e..d17d0624c5e 100644 --- a/drivers/net/pxa168_eth.c +++ b/drivers/net/pxa168_eth.c @@ -40,6 +40,7 @@  #include <linux/clk.h>  #include <linux/phy.h>  #include <linux/io.h> +#include <linux/interrupt.h>  #include <linux/types.h>  #include <asm/pgtable.h>  #include <asm/system.h> diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 02339b3352e..6d657cabb95 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -407,6 +407,7 @@ enum rtl_register_content {  	RxOK		= 0x0001,  	/* RxStatusDesc */ +	RxBOVF	= (1 << 24),  	RxFOVF	= (1 << 23),  	RxRWT	= (1 << 22),  	RxRES	= (1 << 21), @@ -682,6 +683,7 @@ struct rtl8169_private {  	struct mii_if_info mii;  	struct rtl8169_counters counters;  	u32 saved_wolopts; +	u32 opts1_mask;  	struct rtl_fw {  		const struct firmware *fw; @@ -710,6 +712,7 @@ MODULE_FIRMWARE(FIRMWARE_8168D_1);  MODULE_FIRMWARE(FIRMWARE_8168D_2);  MODULE_FIRMWARE(FIRMWARE_8168E_1);  MODULE_FIRMWARE(FIRMWARE_8168E_2); +MODULE_FIRMWARE(FIRMWARE_8168E_3);  MODULE_FIRMWARE(FIRMWARE_8105E_1);  static int rtl8169_open(struct net_device *dev); @@ -2856,7 +2859,7 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)  	rtl_writephy(tp, 0x1f, 0x0004);  	rtl_writephy(tp, 0x1f, 0x0007);  	rtl_writephy(tp, 0x1e, 0x0020); -	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); +	rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);  	rtl_writephy(tp, 0x1f, 0x0002);  	rtl_writephy(tp, 0x1f, 0x0000);  	rtl_writephy(tp, 0x0d, 0x0007); @@ -3077,6 +3080,14 @@ static void rtl8169_phy_reset(struct net_device *dev,  	netif_err(tp, link, dev, "PHY reset failed\n");  } +static bool rtl_tbi_enabled(struct rtl8169_private *tp) +{ +	void __iomem *ioaddr = tp->mmio_addr; + +	return (tp->mac_version == RTL_GIGA_MAC_VER_01) && +	    (RTL_R8(PHYstatus) & TBI_Enable); +} +  static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)  {  	void __iomem *ioaddr = tp->mmio_addr; @@ -3109,7 +3120,7 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)  			   ADVERTISED_1000baseT_Half |  			   ADVERTISED_1000baseT_Full : 0)); -	if (RTL_R8(PHYstatus) & TBI_Enable) +	if (rtl_tbi_enabled(tp))  		netif_info(tp, link, dev, "TBI auto-negotiating\n");  } @@ -3305,6 +3316,37 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)  	}  } +static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) +{ +	void __iomem *ioaddr = tp->mmio_addr; + +	switch (tp->mac_version) { +	case RTL_GIGA_MAC_VER_29: +	case RTL_GIGA_MAC_VER_30: +	case RTL_GIGA_MAC_VER_32: +	case RTL_GIGA_MAC_VER_33: +	case RTL_GIGA_MAC_VER_34: +		RTL_W32(RxConfig, RTL_R32(RxConfig) | +			AcceptBroadcast | AcceptMulticast | AcceptMyPhys); +		break; +	default: +		break; +	} +} + +static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) +{ +	if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) +		return false; + +	rtl_writephy(tp, 0x1f, 0x0000); +	rtl_writephy(tp, MII_BMCR, 0x0000); + +	rtl_wol_suspend_quirk(tp); + +	return true; +} +  static void r810x_phy_power_down(struct rtl8169_private *tp)  {  	rtl_writephy(tp, 0x1f, 0x0000); @@ -3319,11 +3361,8 @@ static void r810x_phy_power_up(struct rtl8169_private *tp)  static void r810x_pll_power_down(struct rtl8169_private *tp)  { -	if (__rtl8169_get_wol(tp) & WAKE_ANY) { -		rtl_writephy(tp, 0x1f, 0x0000); -		rtl_writephy(tp, MII_BMCR, 0x0000); +	if (rtl_wol_pll_power_down(tp))  		return; -	}  	r810x_phy_power_down(tp);  } @@ -3412,16 +3451,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)  	    tp->mac_version == RTL_GIGA_MAC_VER_33)  		rtl_ephy_write(ioaddr, 0x19, 0xff64); -	if (__rtl8169_get_wol(tp) & WAKE_ANY) { -		rtl_writephy(tp, 0x1f, 0x0000); -		rtl_writephy(tp, MII_BMCR, 0x0000); - -		if (tp->mac_version == RTL_GIGA_MAC_VER_32 || -		    tp->mac_version == RTL_GIGA_MAC_VER_33) -			RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | -				AcceptMulticast | AcceptMyPhys); +	if (rtl_wol_pll_power_down(tp))  		return; -	}  	r8168_phy_power_down(tp); @@ -3727,8 +3758,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)  	tp->features |= rtl_try_msi(pdev, ioaddr, cfg);  	RTL_W8(Cfg9346, Cfg9346_Lock); -	if ((tp->mac_version <= RTL_GIGA_MAC_VER_06) && -	    (RTL_R8(PHYstatus) & TBI_Enable)) { +	if (rtl_tbi_enabled(tp)) {  		tp->set_speed = rtl8169_set_speed_tbi;  		tp->get_settings = rtl8169_gset_tbi;  		tp->phy_reset_enable = rtl8169_tbi_reset_enable; @@ -3777,6 +3807,9 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)  	tp->intr_event = cfg->intr_event;  	tp->napi_event = cfg->napi_event; +	tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? +		~(RxBOVF | RxFOVF) : ~0; +  	init_timer(&tp->timer);  	tp->timer.data = (unsigned long) dev;  	tp->timer.function = rtl8169_phy_timer; @@ -3988,6 +4021,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)  		while (RTL_R8(TxPoll) & NPQ)  			udelay(20);  	} else if (tp->mac_version == RTL_GIGA_MAC_VER_34) { +		RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);  		while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))  			udelay(100);  	} else { @@ -5314,7 +5348,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,  		u32 status;  		rmb(); -		status = le32_to_cpu(desc->opts1); +		status = le32_to_cpu(desc->opts1) & tp->opts1_mask;  		if (status & DescOwn)  			break; @@ -5766,11 +5800,30 @@ static const struct dev_pm_ops rtl8169_pm_ops = {  #endif /* !CONFIG_PM */ +static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) +{ +	void __iomem *ioaddr = tp->mmio_addr; + +	/* WoL fails with 8168b when the receiver is disabled. */ +	switch (tp->mac_version) { +	case RTL_GIGA_MAC_VER_11: +	case RTL_GIGA_MAC_VER_12: +	case RTL_GIGA_MAC_VER_17: +		pci_clear_master(tp->pci_dev); + +		RTL_W8(ChipCmd, CmdRxEnb); +		/* PCI commit */ +		RTL_R8(ChipCmd); +		break; +	default: +		break; +	} +} +  static void rtl_shutdown(struct pci_dev *pdev)  {  	struct net_device *dev = pci_get_drvdata(pdev);  	struct rtl8169_private *tp = netdev_priv(dev); -	void __iomem *ioaddr = tp->mmio_addr;  	rtl8169_net_suspend(dev); @@ -5784,16 +5837,9 @@ static void rtl_shutdown(struct pci_dev *pdev)  	spin_unlock_irq(&tp->lock);  	if (system_state == SYSTEM_POWER_OFF) { -		/* WoL fails with 8168b when the receiver is disabled. */ -		if ((tp->mac_version == RTL_GIGA_MAC_VER_11 || -		     tp->mac_version == RTL_GIGA_MAC_VER_12 || -		     tp->mac_version == RTL_GIGA_MAC_VER_17) && -		    (tp->features & RTL_FEATURE_WOL)) { -			pci_clear_master(pdev); - -			RTL_W8(ChipCmd, CmdRxEnb); -			/* PCI commit */ -			RTL_R8(ChipCmd); +		if (__rtl8169_get_wol(tp) & WAKE_ANY) { +			rtl_wol_suspend_quirk(tp); +			rtl_wol_shutdown_quirk(tp);  		}  		pci_wake_from_d3(pdev, true); diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index 86ac38c96bc..3bb13113703 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c @@ -80,13 +80,13 @@ static int rionet_capable = 1;   */  static struct rio_dev **rionet_active; -#define is_rionet_capable(pef, src_ops, dst_ops)		\ -			((pef & RIO_PEF_INB_MBOX) &&		\ -			 (pef & RIO_PEF_INB_DOORBELL) &&	\ +#define is_rionet_capable(src_ops, dst_ops)			\ +			((src_ops & RIO_SRC_OPS_DATA_MSG) &&	\ +			 (dst_ops & RIO_DST_OPS_DATA_MSG) &&	\  			 (src_ops & RIO_SRC_OPS_DOORBELL) &&	\  			 (dst_ops & RIO_DST_OPS_DOORBELL))  #define dev_rionet_capable(dev) \ -	is_rionet_capable(dev->pef, dev->src_ops, dev->dst_ops) +	is_rionet_capable(dev->src_ops, dev->dst_ops)  #define RIONET_MAC_MATCH(x)	(*(u32 *)x == 0x00010001)  #define RIONET_GET_DESTID(x)	(*(u16 *)(x + 4)) @@ -282,7 +282,6 @@ static int rionet_open(struct net_device *ndev)  {  	int i, rc = 0;  	struct rionet_peer *peer, *tmp; -	u32 pwdcsr;  	struct rionet_private *rnet = netdev_priv(ndev);  	if (netif_msg_ifup(rnet)) @@ -332,13 +331,8 @@ static int rionet_open(struct net_device *ndev)  			continue;  		} -		/* -		 * If device has initialized inbound doorbells, -		 * send a join message -		 */ -		rio_read_config_32(peer->rdev, RIO_WRITE_PORT_CSR, &pwdcsr); -		if (pwdcsr & RIO_DOORBELL_AVAIL) -			rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); +		/* Send a join message */ +		rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);  	}        out: @@ -492,7 +486,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)  static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)  {  	int rc = -ENODEV; -	u32 lpef, lsrc_ops, ldst_ops; +	u32 lsrc_ops, ldst_ops;  	struct rionet_peer *peer;  	struct net_device *ndev = NULL; @@ -515,12 +509,11 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)  	 * on later probes  	 */  	if (!rionet_check) { -		rio_local_read_config_32(rdev->net->hport, RIO_PEF_CAR, &lpef);  		rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR,  					 &lsrc_ops);  		rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR,  					 &ldst_ops); -		if (!is_rionet_capable(lpef, lsrc_ops, ldst_ops)) { +		if (!is_rionet_capable(lsrc_ops, ldst_ops)) {  			printk(KERN_ERR  			       "%s: local device is not network capable\n",  			       DRV_NAME); diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index faca764aa21..b59abc706d9 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -1050,7 +1050,6 @@ static int efx_init_io(struct efx_nic *efx)  {  	struct pci_dev *pci_dev = efx->pci_dev;  	dma_addr_t dma_mask = efx->type->max_dma_mask; -	bool use_wc;  	int rc;  	netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); @@ -1101,21 +1100,8 @@ static int efx_init_io(struct efx_nic *efx)  		rc = -EIO;  		goto fail3;  	} - -	/* bug22643: If SR-IOV is enabled then tx push over a write combined -	 * mapping is unsafe. We need to disable write combining in this case. -	 * MSI is unsupported when SR-IOV is enabled, and the firmware will -	 * have removed the MSI capability. So write combining is safe if -	 * there is an MSI capability. -	 */ -	use_wc = (!EFX_WORKAROUND_22643(efx) || -		  pci_find_capability(pci_dev, PCI_CAP_ID_MSI)); -	if (use_wc) -		efx->membase = ioremap_wc(efx->membase_phys, -					  efx->type->mem_map_size); -	else -		efx->membase = ioremap_nocache(efx->membase_phys, -					       efx->type->mem_map_size); +	efx->membase = ioremap_nocache(efx->membase_phys, +				       efx->type->mem_map_size);  	if (!efx->membase) {  		netif_err(efx, probe, efx->net_dev,  			  "could not map memory BAR at %llx+%x\n", diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h index cc978803d48..751d1ec112c 100644 --- a/drivers/net/sfc/io.h +++ b/drivers/net/sfc/io.h @@ -103,7 +103,6 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,  	_efx_writed(efx, value->u32[2], reg + 8);  	_efx_writed(efx, value->u32[3], reg + 12);  #endif -	wmb();  	mmiowb();  	spin_unlock_irqrestore(&efx->biu_lock, flags);  } @@ -126,7 +125,6 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,  	__raw_writel((__force u32)value->u32[0], membase + addr);  	__raw_writel((__force u32)value->u32[1], membase + addr + 4);  #endif -	wmb();  	mmiowb();  	spin_unlock_irqrestore(&efx->biu_lock, flags);  } @@ -141,7 +139,6 @@ static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,  	/* No lock required */  	_efx_writed(efx, value->u32[0], reg); -	wmb();  }  /* Read a 128-bit CSR, locking as appropriate. */ @@ -152,7 +149,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,  	spin_lock_irqsave(&efx->biu_lock, flags);  	value->u32[0] = _efx_readd(efx, reg + 0); -	rmb();  	value->u32[1] = _efx_readd(efx, reg + 4);  	value->u32[2] = _efx_readd(efx, reg + 8);  	value->u32[3] = _efx_readd(efx, reg + 12); @@ -175,7 +171,6 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,  	value->u64[0] = (__force __le64)__raw_readq(membase + addr);  #else  	value->u32[0] = (__force __le32)__raw_readl(membase + addr); -	rmb();  	value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);  #endif  	spin_unlock_irqrestore(&efx->biu_lock, flags); @@ -249,7 +244,6 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,  	_efx_writed(efx, value->u32[2], reg + 8);  	_efx_writed(efx, value->u32[3], reg + 12);  #endif -	wmb();  }  #define efx_writeo_page(efx, value, reg, page)				\  	_efx_writeo_page(efx, value,					\ diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c index 3dd45ed61f0..81a42539746 100644 --- a/drivers/net/sfc/mcdi.c +++ b/drivers/net/sfc/mcdi.c @@ -50,20 +50,6 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)  	return &nic_data->mcdi;  } -static inline void -efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg) -{ -	struct siena_nic_data *nic_data = efx->nic_data; -	value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg); -} - -static inline void -efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg) -{ -	struct siena_nic_data *nic_data = efx->nic_data; -	__raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg); -} -  void efx_mcdi_init(struct efx_nic *efx)  {  	struct efx_mcdi_iface *mcdi; @@ -84,8 +70,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,  			    const u8 *inbuf, size_t inlen)  {  	struct efx_mcdi_iface *mcdi = efx_mcdi(efx); -	unsigned pdu = MCDI_PDU(efx); -	unsigned doorbell = MCDI_DOORBELL(efx); +	unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); +	unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);  	unsigned int i;  	efx_dword_t hdr;  	u32 xflags, seqno; @@ -106,28 +92,29 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,  			     MCDI_HEADER_SEQ, seqno,  			     MCDI_HEADER_XFLAGS, xflags); -	efx_mcdi_writed(efx, &hdr, pdu); +	efx_writed(efx, &hdr, pdu);  	for (i = 0; i < inlen; i += 4) -		efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i), -				pdu + 4 + i); +		_efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); + +	/* Ensure the payload is written out before the header */ +	wmb();  	/* ring the doorbell with a distinctive value */ -	EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc); -	efx_mcdi_writed(efx, &hdr, doorbell); +	_efx_writed(efx, (__force __le32) 0x45789abc, doorbell);  }  static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)  {  	struct efx_mcdi_iface *mcdi = efx_mcdi(efx); -	unsigned int pdu = MCDI_PDU(efx); +	unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);  	int i;  	BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);  	BUG_ON(outlen & 3 || outlen >= 0x100);  	for (i = 0; i < outlen; i += 4) -		efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i); +		*((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);  }  static int efx_mcdi_poll(struct efx_nic *efx) @@ -135,7 +122,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)  	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);  	unsigned int time, finish;  	unsigned int respseq, respcmd, error; -	unsigned int pdu = MCDI_PDU(efx); +	unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);  	unsigned int rc, spins;  	efx_dword_t reg; @@ -161,7 +148,8 @@ static int efx_mcdi_poll(struct efx_nic *efx)  		time = get_seconds(); -		efx_mcdi_readd(efx, ®, pdu); +		rmb(); +		efx_readd(efx, ®, pdu);  		/* All 1's indicates that shared memory is in reset (and is  		 * not a valid header). Wait for it to come out reset before @@ -188,7 +176,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)  			  respseq, mcdi->seqno);  		rc = EIO;  	} else if (error) { -		efx_mcdi_readd(efx, ®, pdu + 4); +		efx_readd(efx, ®, pdu + 4);  		switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {  #define TRANSLATE_ERROR(name)					\  		case MC_CMD_ERR_ ## name:			\ @@ -222,21 +210,21 @@ out:  /* Test and clear MC-rebooted flag for this port/function */  int efx_mcdi_poll_reboot(struct efx_nic *efx)  { -	unsigned int addr = MCDI_REBOOT_FLAG(efx); +	unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);  	efx_dword_t reg;  	uint32_t value;  	if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)  		return false; -	efx_mcdi_readd(efx, ®, addr); +	efx_readd(efx, ®, addr);  	value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);  	if (value == 0)  		return 0;  	EFX_ZERO_DWORD(reg); -	efx_mcdi_writed(efx, ®, addr); +	efx_writed(efx, ®, addr);  	if (value == MC_STATUS_DWORD_ASSERT)  		return -EINTR; diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c index bafa23a6874..3edfbaf5f02 100644 --- a/drivers/net/sfc/nic.c +++ b/drivers/net/sfc/nic.c @@ -1936,13 +1936,6 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)  		size = min_t(size_t, table->step, 16); -		if (table->offset >= efx->type->mem_map_size) { -			/* No longer mapped; return dummy data */ -			memcpy(buf, "\xde\xc0\xad\xde", 4); -			buf += table->rows * size; -			continue; -		} -  		for (i = 0; i < table->rows; i++) {  			switch (table->step) {  			case 4: /* 32-bit register or SRAM */ diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h index 4bd1f2839df..7443f99c977 100644 --- a/drivers/net/sfc/nic.h +++ b/drivers/net/sfc/nic.h @@ -143,12 +143,10 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)  /**   * struct siena_nic_data - Siena NIC state   * @mcdi: Management-Controller-to-Driver Interface - * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable.   * @wol_filter_id: Wake-on-LAN packet filter id   */  struct siena_nic_data {  	struct efx_mcdi_iface mcdi; -	void __iomem *mcdi_smem;  	int wol_filter_id;  }; diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c index 5735e84c69d..2c3bd93fab5 100644 --- a/drivers/net/sfc/siena.c +++ b/drivers/net/sfc/siena.c @@ -250,26 +250,12 @@ static int siena_probe_nic(struct efx_nic *efx)  	efx_reado(efx, ®, FR_AZ_CS_DEBUG);  	efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; -	/* Initialise MCDI */ -	nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys + -					      FR_CZ_MC_TREG_SMEM, -					      FR_CZ_MC_TREG_SMEM_STEP * -					      FR_CZ_MC_TREG_SMEM_ROWS); -	if (!nic_data->mcdi_smem) { -		netif_err(efx, probe, efx->net_dev, -			  "could not map MCDI at %llx+%x\n", -			  (unsigned long long)efx->membase_phys + -			  FR_CZ_MC_TREG_SMEM, -			  FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS); -		rc = -ENOMEM; -		goto fail1; -	}  	efx_mcdi_init(efx);  	/* Recover from a failed assertion before probing */  	rc = efx_mcdi_handle_assertion(efx);  	if (rc) -		goto fail2; +		goto fail1;  	/* Let the BMC know that the driver is now in charge of link and  	 * filter settings. We must do this before we reset the NIC */ @@ -324,7 +310,6 @@ fail4:  fail3:  	efx_mcdi_drv_attach(efx, false, NULL);  fail2: -	iounmap(nic_data->mcdi_smem);  fail1:  	kfree(efx->nic_data);  	return rc; @@ -404,8 +389,6 @@ static int siena_init_nic(struct efx_nic *efx)  static void siena_remove_nic(struct efx_nic *efx)  { -	struct siena_nic_data *nic_data = efx->nic_data; -  	efx_nic_free_buffer(efx, &efx->irq_status);  	siena_reset_hw(efx, RESET_TYPE_ALL); @@ -415,8 +398,7 @@ static void siena_remove_nic(struct efx_nic *efx)  		efx_mcdi_drv_attach(efx, false, NULL);  	/* Tear down the private nic state */ -	iounmap(nic_data->mcdi_smem); -	kfree(nic_data); +	kfree(efx->nic_data);  	efx->nic_data = NULL;  } @@ -656,7 +638,8 @@ const struct efx_nic_type siena_a0_nic_type = {  	.default_mac_ops = &efx_mcdi_mac_operations,  	.revision = EFX_REV_SIENA_A0, -	.mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */ +	.mem_map_size = (FR_CZ_MC_TREG_SMEM + +			 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),  	.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,  	.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,  	.buf_tbl_base = FR_BZ_BUF_FULL_TBL, diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h index 99ff11400ce..e4dd3a7f304 100644 --- a/drivers/net/sfc/workarounds.h +++ b/drivers/net/sfc/workarounds.h @@ -38,8 +38,6 @@  #define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS  /* Legacy interrupt storm when interrupt fifo fills */  #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA -/* Write combining and sriov=enabled are incompatible */ -#define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA  /* Spurious parity errors in TSORT buffers */  #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index ad35c210b83..1c1666e9910 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c @@ -21,6 +21,7 @@   */  #include <linux/init.h> +#include <linux/interrupt.h>  #include <linux/dma-mapping.h>  #include <linux/etherdevice.h>  #include <linux/delay.h> @@ -30,6 +31,7 @@  #include <linux/phy.h>  #include <linux/cache.h>  #include <linux/io.h> +#include <linux/interrupt.h>  #include <linux/pm_runtime.h>  #include <linux/slab.h>  #include <linux/ethtool.h> diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 57339da7632..d7f879dfb1c 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -2057,7 +2057,7 @@ static void sky2_hw_down(struct sky2_port *sky2)  	sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); -	/* Force any delayed status interrrupt and NAPI */ +	/* Force any delayed status interrupt and NAPI */  	sky2_write32(hw, STAT_LEV_TIMER_CNT, 0);  	sky2_write32(hw, STAT_TX_TIMER_CNT, 0);  	sky2_write32(hw, STAT_ISR_TIMER_CNT, 0); diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index b9016a30cdc..c90ddb61cc5 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -26,6 +26,7 @@   *   LAN9215, LAN9216, LAN9217, LAN9218   *   LAN9210, LAN9211   *   LAN9220, LAN9221 + *   LAN89218   *   */ @@ -1983,6 +1984,7 @@ static int __devinit smsc911x_init(struct net_device *dev)  	case 0x01170000:  	case 0x01160000:  	case 0x01150000: +	case 0x218A0000:  		/* LAN911[5678] family */  		pdata->generation = pdata->idrev & 0x0000FFFF;  		break; diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index dc3fbf61910..c11a2b8327f 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -6234,12 +6234,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)  		}  	} -#ifdef BCM_KERNEL_SUPPORTS_8021Q  	if (vlan_tx_tag_present(skb)) {  		base_flags |= TXD_FLAG_VLAN;  		vlan = vlan_tx_tag_get(skb);  	} -#endif  	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&  	    !mss && skb->len > VLAN_ETH_FRAME_LEN) @@ -15579,7 +15577,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)  		cancel_work_sync(&tp->reset_task); -		if (!tg3_flag(tp, USE_PHYLIB)) { +		if (tg3_flag(tp, USE_PHYLIB)) {  			tg3_phy_fini(tp);  			tg3_mdio_fini(tp);  		} diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c index 1e2af96fc29..7b46e75deb5 100644 --- a/drivers/net/tile/tilepro.c +++ b/drivers/net/tile/tilepro.c @@ -177,7 +177,7 @@ struct tile_net_cpu {  	struct tile_net_stats_t stats;  	/* True iff NAPI is enabled. */  	bool napi_enabled; -	/* True if this tile has succcessfully registered with the IPP. */ +	/* True if this tile has successfully registered with the IPP. */  	bool registered;  	/* True if the link was down last time we tried to register. */  	bool link_down; diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c index 092c3faa882..25b8deedbef 100644 --- a/drivers/net/tulip/21142.c +++ b/drivers/net/tulip/21142.c @@ -7,9 +7,6 @@  	This software may be used and distributed according to the terms  	of the GNU General Public License, incorporated herein by reference. -	Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} -	for more information on this driver. -  	DC21143 manual "21143 PCI/CardBus 10/100Mb/s Ethernet LAN Controller  	Hardware Reference Manual" is currently available at :  	http://developer.intel.com/design/network/manuals/278074.htm diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c index fa5eee925f2..14d5b611783 100644 --- a/drivers/net/tulip/eeprom.c +++ b/drivers/net/tulip/eeprom.c @@ -7,8 +7,6 @@  	This software may be used and distributed according to the terms  	of the GNU General Public License, incorporated herein by reference. -	Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} -	for more information on this driver.  	Please submit bug reports to http://bugzilla.kernel.org/.  */ diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c index 5350d753e0f..4fb8c8c0a42 100644 --- a/drivers/net/tulip/interrupt.c +++ b/drivers/net/tulip/interrupt.c @@ -7,10 +7,7 @@  	This software may be used and distributed according to the terms  	of the GNU General Public License, incorporated herein by reference. -	Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} -	for more information on this driver.          Please submit bugs to http://bugzilla.kernel.org/ . -  */  #include <linux/pci.h> diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c index 4bd13922875..beeb17b52ad 100644 --- a/drivers/net/tulip/media.c +++ b/drivers/net/tulip/media.c @@ -7,9 +7,6 @@  	This software may be used and distributed according to the terms  	of the GNU General Public License, incorporated herein by reference. -	Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} -	for more information on this driver. -  	Please submit bugs to http://bugzilla.kernel.org/ .  */ diff --git a/drivers/net/tulip/pnic.c b/drivers/net/tulip/pnic.c index 52d898bdbeb..9c16e4ad02a 100644 --- a/drivers/net/tulip/pnic.c +++ b/drivers/net/tulip/pnic.c @@ -7,9 +7,6 @@  	This software may be used and distributed according to the terms  	of the GNU General Public License, incorporated herein by reference. -	Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} -	for more information on this driver. -  	Please submit bugs to http://bugzilla.kernel.org/ .  */ diff --git a/drivers/net/tulip/pnic2.c b/drivers/net/tulip/pnic2.c index 93358ee4d83..04a7e477eaf 100644 --- a/drivers/net/tulip/pnic2.c +++ b/drivers/net/tulip/pnic2.c @@ -8,9 +8,6 @@  	This software may be used and distributed according to the terms  	of the GNU General Public License, incorporated herein by reference. -	Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} -	for more information on this driver. -          Please submit bugs to http://bugzilla.kernel.org/ .  */ diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c index 2017faf2d0e..19078d28ffb 100644 --- a/drivers/net/tulip/timer.c +++ b/drivers/net/tulip/timer.c @@ -7,9 +7,6 @@  	This software may be used and distributed according to the terms  	of the GNU General Public License, incorporated herein by reference. -	Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} -	for more information on this driver. -  	Please submit bugs to http://bugzilla.kernel.org/ .  */ diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h index 9db528967da..fb3887c18dc 100644 --- a/drivers/net/tulip/tulip.h +++ b/drivers/net/tulip/tulip.h @@ -7,9 +7,6 @@  	This software may be used and distributed according to the terms  	of the GNU General Public License, incorporated herein by reference. -	Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} -	for more information on this driver. -  	Please submit bugs to http://bugzilla.kernel.org/ .  */ diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index 1246998a677..b905c0dc564 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c @@ -6,9 +6,6 @@  	This software may be used and distributed according to the terms  	of the GNU General Public License, incorporated herein by reference. -	Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} -	for more information on this driver. -  	Please submit bugs to http://bugzilla.kernel.org/ .  */ diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index a03336e086d..f06fb78383a 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -228,23 +228,40 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)  	if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {  		if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) { -			struct usb_cdc_ncm_ndp_input_size ndp_in_sz; +			struct usb_cdc_ncm_ndp_input_size *ndp_in_sz; + +			ndp_in_sz = kzalloc(sizeof(*ndp_in_sz), GFP_KERNEL); +			if (!ndp_in_sz) { +				err = -ENOMEM; +				goto size_err; +			} +  			err = usb_control_msg(ctx->udev,  					usb_sndctrlpipe(ctx->udev, 0),  					USB_CDC_SET_NTB_INPUT_SIZE,  					USB_TYPE_CLASS | USB_DIR_OUT  					 | USB_RECIP_INTERFACE, -					0, iface_no, &ndp_in_sz, 8, 1000); +					0, iface_no, ndp_in_sz, 8, 1000); +			kfree(ndp_in_sz);  		} else { -			__le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); +			__le32 *dwNtbInMaxSize; +			dwNtbInMaxSize = kzalloc(sizeof(*dwNtbInMaxSize), +					GFP_KERNEL); +			if (!dwNtbInMaxSize) { +				err = -ENOMEM; +				goto size_err; +			} +			*dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); +  			err = usb_control_msg(ctx->udev,  					usb_sndctrlpipe(ctx->udev, 0),  					USB_CDC_SET_NTB_INPUT_SIZE,  					USB_TYPE_CLASS | USB_DIR_OUT  					 | USB_RECIP_INTERFACE, -					0, iface_no, &dwNtbInMaxSize, 4, 1000); +					0, iface_no, dwNtbInMaxSize, 4, 1000); +			kfree(dwNtbInMaxSize);  		} - +size_err:  		if (err < 0)  			pr_debug("Setting NTB Input Size failed\n");  	} @@ -325,19 +342,29 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)  	/* set Max Datagram Size (MTU) */  	if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) { -		__le16 max_datagram_size; +		__le16 *max_datagram_size;  		u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); + +		max_datagram_size = kzalloc(sizeof(*max_datagram_size), +				GFP_KERNEL); +		if (!max_datagram_size) { +			err = -ENOMEM; +			goto max_dgram_err; +		} +  		err = usb_control_msg(ctx->udev, usb_rcvctrlpipe(ctx->udev, 0),  				USB_CDC_GET_MAX_DATAGRAM_SIZE,  				USB_TYPE_CLASS | USB_DIR_IN  				 | USB_RECIP_INTERFACE, -				0, iface_no, &max_datagram_size, +				0, iface_no, max_datagram_size,  				2, 1000);  		if (err < 0) {  			pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",  						CDC_NCM_MIN_DATAGRAM_SIZE); +			kfree(max_datagram_size);  		} else { -			ctx->max_datagram_size = le16_to_cpu(max_datagram_size); +			ctx->max_datagram_size = +				le16_to_cpu(*max_datagram_size);  			/* Check Eth descriptor value */  			if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) {  				if (ctx->max_datagram_size > eth_max_sz) @@ -360,8 +387,10 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)  						USB_TYPE_CLASS | USB_DIR_OUT  						 | USB_RECIP_INTERFACE,  						0, -						iface_no, &max_datagram_size, +						iface_no, max_datagram_size,  						2, 1000); +			kfree(max_datagram_size); +max_dgram_err:  			if (err < 0)  				pr_debug("SET_MAX_DATAGRAM_SIZE failed\n");  		} diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 15772b1b6a9..13c1f044b40 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -59,6 +59,7 @@  #define USB_PRODUCT_IPHONE_3G   0x1292  #define USB_PRODUCT_IPHONE_3GS  0x1294  #define USB_PRODUCT_IPHONE_4	0x1297 +#define USB_PRODUCT_IPHONE_4_VZW 0x129c  #define IPHETH_USBINTF_CLASS    255  #define IPHETH_USBINTF_SUBCLASS 253 @@ -98,6 +99,10 @@ static struct usb_device_id ipheth_table[] = {  		USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,  		IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,  		IPHETH_USBINTF_PROTO) }, +	{ USB_DEVICE_AND_INTERFACE_INFO( +		USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW, +		IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, +		IPHETH_USBINTF_PROTO) },  	{ }  };  MODULE_DEVICE_TABLE(usb, ipheth_table); diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index deb1eca13c9..7c5336c5c37 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -515,10 +515,6 @@ static void velocity_init_cam_filter(struct velocity_info *vptr)  	mac_set_cam_mask(regs, vptr->mCAMmask);  	/* Enable VCAMs */ - -	if (test_bit(0, vptr->active_vlans)) -		WORD_REG_BITS_ON(MCFG_RTGOPT, ®s->MCFG); -  	for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {  		mac_set_vlan_cam(regs, i, (u8 *) &vid);  		vptr->vCAMmask[i / 8] |= 0x1 << (i % 8); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 1cbacb38965..0959583feb2 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1929,14 +1929,17 @@ static void  vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)  {  	struct vmxnet3_adapter *adapter = netdev_priv(netdev); -	u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; -	unsigned long flags; -	VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); -	spin_lock_irqsave(&adapter->cmd_lock, flags); -	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, -			       VMXNET3_CMD_UPDATE_VLAN_FILTERS); -	spin_unlock_irqrestore(&adapter->cmd_lock, flags); +	if (!(netdev->flags & IFF_PROMISC)) { +		u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; +		unsigned long flags; + +		VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); +		spin_lock_irqsave(&adapter->cmd_lock, flags); +		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, +				       VMXNET3_CMD_UPDATE_VLAN_FILTERS); +		spin_unlock_irqrestore(&adapter->cmd_lock, flags); +	}  	set_bit(vid, adapter->active_vlans);  } @@ -1946,14 +1949,17 @@ static void  vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)  {  	struct vmxnet3_adapter *adapter = netdev_priv(netdev); -	u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; -	unsigned long flags; -	VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); -	spin_lock_irqsave(&adapter->cmd_lock, flags); -	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, -			       VMXNET3_CMD_UPDATE_VLAN_FILTERS); -	spin_unlock_irqrestore(&adapter->cmd_lock, flags); +	if (!(netdev->flags & IFF_PROMISC)) { +		u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; +		unsigned long flags; + +		VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); +		spin_lock_irqsave(&adapter->cmd_lock, flags); +		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, +				       VMXNET3_CMD_UPDATE_VLAN_FILTERS); +		spin_unlock_irqrestore(&adapter->cmd_lock, flags); +	}  	clear_bit(vid, adapter->active_vlans);  } diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c index 2d4c0910295..2d394af8217 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c @@ -41,7 +41,8 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,  	case ADC_DC_CAL:  		/* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */  		if (!IS_CHAN_B(chan) && -		    !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan))) +		    !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) && +		      IS_CHAN_HT20(chan)))  			supported = true;  		break;  	} diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h index 2339728a730..3e69c631ebb 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h @@ -1514,7 +1514,7 @@ static const u32 ar9300_2p2_mac_core[][2] = {  	{0x00008258, 0x00000000},  	{0x0000825c, 0x40000000},  	{0x00008260, 0x00080922}, -	{0x00008264, 0x9bc00010}, +	{0x00008264, 0x9d400010},  	{0x00008268, 0xffffffff},  	{0x0000826c, 0x0000ffff},  	{0x00008270, 0x00000000}, diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index c34bef1bf2b..1b9400371ea 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -69,7 +69,7 @@ static int ar9003_hw_power_interpolate(int32_t x,  static const struct ar9300_eeprom ar9300_default = {  	.eepromVersion = 2,  	.templateVersion = 2, -	.macAddr = {1, 2, 3, 4, 5, 6}, +	.macAddr = {0, 2, 3, 4, 5, 6},  	.custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,  		     0, 0, 0, 0, 0, 0, 0, 0, 0, 0},  	.baseEepHeader = { diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 1baca8e4715..fcafec0605f 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -671,7 +671,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,  		REG_WRITE_ARRAY(&ah->iniModesAdditional,  				modesIndex, regWrites); -	if (AR_SREV_9300(ah)) +	if (AR_SREV_9330(ah))  		REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites);  	if (AR_SREV_9340(ah) && !ah->is_clk_25mhz) diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 9098aaad97a..722967b86cf 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -2283,7 +2283,11 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)  	mutex_lock(&sc->mutex);  	ah->coverage_class = coverage_class; + +	ath9k_ps_wakeup(sc);  	ath9k_hw_init_global_settings(ah); +	ath9k_ps_restore(sc); +  	mutex_unlock(&sc->mutex);  } @@ -2299,6 +2303,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)  	mutex_lock(&sc->mutex);  	cancel_delayed_work_sync(&sc->tx_complete_work); +	if (ah->ah_flags & AH_UNPLUGGED) { +		ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n"); +		mutex_unlock(&sc->mutex); +		return; +	} +  	if (sc->sc_flags & SC_OP_INVALID) {  		ath_dbg(common, ATH_DBG_ANY, "Device not present\n");  		mutex_unlock(&sc->mutex); diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 9a4850154fb..4c21f8cbdeb 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -205,14 +205,22 @@ static void ath_rx_remove_buffer(struct ath_softc *sc,  static void ath_rx_edma_cleanup(struct ath_softc *sc)  { +	struct ath_hw *ah = sc->sc_ah; +	struct ath_common *common = ath9k_hw_common(ah);  	struct ath_buf *bf;  	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);  	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);  	list_for_each_entry(bf, &sc->rx.rxbuf, list) { -		if (bf->bf_mpdu) +		if (bf->bf_mpdu) { +			dma_unmap_single(sc->dev, bf->bf_buf_addr, +					common->rx_bufsize, +					DMA_BIDIRECTIONAL);  			dev_kfree_skb_any(bf->bf_mpdu); +			bf->bf_buf_addr = 0; +			bf->bf_mpdu = NULL; +		}  	}  	INIT_LIST_HEAD(&sc->rx.rxbuf); diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 0122930b14c..0474e6638d2 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -1066,8 +1066,10 @@ static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,  	 * the high througput speed in 802.11n networks.  	 */ -	if (!is_main_vif(ar, vif)) +	if (!is_main_vif(ar, vif)) { +		mutex_lock(&ar->mutex);  		goto err_softw; +	}  	/*  	 * While the hardware supports *catch-all* key, for offloading diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 26f1ab840cc..e293a7921bf 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -1632,7 +1632,8 @@ static void handle_irq_beacon(struct b43_wldev *dev)  	u32 cmd, beacon0_valid, beacon1_valid;  	if (!b43_is_mode(wl, NL80211_IFTYPE_AP) && -	    !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT)) +	    !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT) && +	    !b43_is_mode(wl, NL80211_IFTYPE_ADHOC))  		return;  	/* This is the bottom half of the asynchronous beacon update. */ diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 3774dd03474..ef9ad79d1bf 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -1903,15 +1903,17 @@ static void ipw2100_down(struct ipw2100_priv *priv)  static int ipw2100_net_init(struct net_device *dev)  {  	struct ipw2100_priv *priv = libipw_priv(dev); + +	return ipw2100_up(priv, 1); +} + +static int ipw2100_wdev_init(struct net_device *dev) +{ +	struct ipw2100_priv *priv = libipw_priv(dev);  	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);  	struct wireless_dev *wdev = &priv->ieee->wdev; -	int ret;  	int i; -	ret = ipw2100_up(priv, 1); -	if (ret) -		return ret; -  	memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);  	/* fill-out priv->ieee->bg_band */ @@ -6350,9 +6352,13 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,  		       "Error calling register_netdev.\n");  		goto fail;  	} +	registered = 1; + +	err = ipw2100_wdev_init(dev); +	if (err) +		goto fail;  	mutex_lock(&priv->action_mutex); -	registered = 1;  	IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev)); @@ -6389,7 +6395,8 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,        fail_unlock:  	mutex_unlock(&priv->action_mutex); - +	wiphy_unregister(priv->ieee->wdev.wiphy); +	kfree(priv->ieee->bg_band.channels);        fail:  	if (dev) {  		if (registered) diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index 87813c33bdc..4ffebede5e0 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -11425,16 +11425,23 @@ static void ipw_bg_down(struct work_struct *work)  /* Called by register_netdev() */  static int ipw_net_init(struct net_device *dev)  { +	int rc = 0; +	struct ipw_priv *priv = libipw_priv(dev); + +	mutex_lock(&priv->mutex); +	if (ipw_up(priv)) +		rc = -EIO; +	mutex_unlock(&priv->mutex); + +	return rc; +} + +static int ipw_wdev_init(struct net_device *dev) +{  	int i, rc = 0;  	struct ipw_priv *priv = libipw_priv(dev);  	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);  	struct wireless_dev *wdev = &priv->ieee->wdev; -	mutex_lock(&priv->mutex); - -	if (ipw_up(priv)) { -		rc = -EIO; -		goto out; -	}  	memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); @@ -11519,13 +11526,9 @@ static int ipw_net_init(struct net_device *dev)  	set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);  	/* With that information in place, we can now register the wiphy... */ -	if (wiphy_register(wdev->wiphy)) { +	if (wiphy_register(wdev->wiphy))  		rc = -EIO; -		goto out; -	} -  out: -	mutex_unlock(&priv->mutex);  	return rc;  } @@ -11832,14 +11835,22 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,  		goto out_remove_sysfs;  	} +	err = ipw_wdev_init(net_dev); +	if (err) { +		IPW_ERROR("failed to register wireless device\n"); +		goto out_unregister_netdev; +	} +  #ifdef CONFIG_IPW2200_PROMISCUOUS  	if (rtap_iface) {  	        err = ipw_prom_alloc(priv);  		if (err) {  			IPW_ERROR("Failed to register promiscuous network "  				  "device (error %d).\n", err); -			unregister_netdev(priv->net_dev); -			goto out_remove_sysfs; +			wiphy_unregister(priv->ieee->wdev.wiphy); +			kfree(priv->ieee->a_band.channels); +			kfree(priv->ieee->bg_band.channels); +			goto out_unregister_netdev;  		}  	}  #endif @@ -11851,6 +11862,8 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,  	return 0; +      out_unregister_netdev: +	unregister_netdev(priv->net_dev);        out_remove_sysfs:  	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);        out_release_irq: diff --git a/drivers/net/wireless/ipw2x00/libipw_tx.c b/drivers/net/wireless/ipw2x00/libipw_tx.c index 01c88a71abe..e8c039879b0 100644 --- a/drivers/net/wireless/ipw2x00/libipw_tx.c +++ b/drivers/net/wireless/ipw2x00/libipw_tx.c @@ -395,7 +395,7 @@ netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev)  		    (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))  			bytes_per_frag -= LIBIPW_FCS_LEN; -		/* Each fragment may need to have room for encryptiong +		/* Each fragment may need to have room for encryption  		 * pre/postfix */  		if (host_encrypt)  			bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len + diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c index 977bd2477c6..164bcae821f 100644 --- a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c +++ b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c @@ -822,12 +822,15 @@ static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,   out: -	rs_sta->last_txrate_idx = index; -	if (sband->band == IEEE80211_BAND_5GHZ) -		info->control.rates[0].idx = rs_sta->last_txrate_idx - -				IWL_FIRST_OFDM_RATE; -	else +	if (sband->band == IEEE80211_BAND_5GHZ) { +		if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE)) +			index = IWL_FIRST_OFDM_RATE; +		rs_sta->last_txrate_idx = index; +		info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE; +	} else { +		rs_sta->last_txrate_idx = index;  		info->control.rates[0].idx = rs_sta->last_txrate_idx; +	}  	IWL_DEBUG_RATE(priv, "leave: %d\n", index);  } diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c index 35cd2537e7f..e5971fe9d16 100644 --- a/drivers/net/wireless/iwlegacy/iwl-core.c +++ b/drivers/net/wireless/iwlegacy/iwl-core.c @@ -937,7 +937,7 @@ void iwl_legacy_irq_handle_error(struct iwl_priv *priv)  					&priv->contexts[IWL_RXON_CTX_BSS]);  #endif -	wake_up_interruptible(&priv->wait_command_queue); +	wake_up(&priv->wait_command_queue);  	/* Keep the restart process from trying to send host  	 * commands by clearing the INIT status bit */ @@ -1746,7 +1746,7 @@ int iwl_legacy_force_reset(struct iwl_priv *priv, bool external)  	/* Set the FW error flag -- cleared on iwl_down */  	set_bit(STATUS_FW_ERROR, &priv->status); -	wake_up_interruptible(&priv->wait_command_queue); +	wake_up(&priv->wait_command_queue);  	/*  	 * Keep the restart process from trying to send host  	 * commands by clearing the INIT status bit diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c index 62b4b09122c..ce1fc9feb61 100644 --- a/drivers/net/wireless/iwlegacy/iwl-hcmd.c +++ b/drivers/net/wireless/iwlegacy/iwl-hcmd.c @@ -167,7 +167,7 @@ int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)  		goto out;  	} -	ret = wait_event_interruptible_timeout(priv->wait_command_queue, +	ret = wait_event_timeout(priv->wait_command_queue,  			!test_bit(STATUS_HCMD_ACTIVE, &priv->status),  			HOST_COMPLETE_TIMEOUT);  	if (!ret) { diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c index 4fff995c6f3..ef9e268bf8a 100644 --- a/drivers/net/wireless/iwlegacy/iwl-tx.c +++ b/drivers/net/wireless/iwlegacy/iwl-tx.c @@ -625,6 +625,8 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)  	cmd = txq->cmd[cmd_index];  	meta = &txq->meta[cmd_index]; +	txq->time_stamp = jiffies; +  	pci_unmap_single(priv->pci_dev,  			 dma_unmap_addr(meta, mapping),  			 dma_unmap_len(meta, len), @@ -645,7 +647,7 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)  		clear_bit(STATUS_HCMD_ACTIVE, &priv->status);  		IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",  			       iwl_legacy_get_cmd_string(cmd->hdr.cmd)); -		wake_up_interruptible(&priv->wait_command_queue); +		wake_up(&priv->wait_command_queue);  	}  	/* Mark as unmapped */ diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c index 795826a014e..66ee15629a7 100644 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c @@ -841,7 +841,7 @@ static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,  		wiphy_rfkill_set_hw_state(priv->hw->wiphy,  				test_bit(STATUS_RF_KILL_HW, &priv->status));  	else -		wake_up_interruptible(&priv->wait_command_queue); +		wake_up(&priv->wait_command_queue);  }  /** @@ -2269,7 +2269,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)  	iwl3945_reg_txpower_periodic(priv);  	IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); -	wake_up_interruptible(&priv->wait_command_queue); +	wake_up(&priv->wait_command_queue);  	return; @@ -2300,7 +2300,7 @@ static void __iwl3945_down(struct iwl_priv *priv)  	iwl_legacy_clear_driver_stations(priv);  	/* Unblock any waiting calls */ -	wake_up_interruptible_all(&priv->wait_command_queue); +	wake_up_all(&priv->wait_command_queue);  	/* Wipe out the EXIT_PENDING status bit if we are not actually  	 * exiting the module */ @@ -2853,7 +2853,7 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)  	/* Wait for START_ALIVE from ucode. Otherwise callbacks from  	 * mac80211 will not be run successfully. */ -	ret = wait_event_interruptible_timeout(priv->wait_command_queue, +	ret = wait_event_timeout(priv->wait_command_queue,  			test_bit(STATUS_READY, &priv->status),  			UCODE_READY_TIMEOUT);  	if (!ret) { diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c index 14334668034..aa0c2539761 100644 --- a/drivers/net/wireless/iwlegacy/iwl4965-base.c +++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c @@ -576,7 +576,7 @@ static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,  		wiphy_rfkill_set_hw_state(priv->hw->wiphy,  			test_bit(STATUS_RF_KILL_HW, &priv->status));  	else -		wake_up_interruptible(&priv->wait_command_queue); +		wake_up(&priv->wait_command_queue);  }  /** @@ -926,7 +926,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)  		handled |= CSR_INT_BIT_FH_TX;  		/* Wake up uCode load routine, now that load is complete */  		priv->ucode_write_complete = 1; -		wake_up_interruptible(&priv->wait_command_queue); +		wake_up(&priv->wait_command_queue);  	}  	if (inta & ~handled) { @@ -1795,7 +1795,7 @@ static void iwl4965_alive_start(struct iwl_priv *priv)  	iwl4965_rf_kill_ct_config(priv);  	IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); -	wake_up_interruptible(&priv->wait_command_queue); +	wake_up(&priv->wait_command_queue);  	iwl_legacy_power_update_mode(priv, true);  	IWL_DEBUG_INFO(priv, "Updated power mode\n"); @@ -1828,7 +1828,7 @@ static void __iwl4965_down(struct iwl_priv *priv)  	iwl_legacy_clear_driver_stations(priv);  	/* Unblock any waiting calls */ -	wake_up_interruptible_all(&priv->wait_command_queue); +	wake_up_all(&priv->wait_command_queue);  	/* Wipe out the EXIT_PENDING status bit if we are not actually  	 * exiting the module */ @@ -2266,7 +2266,7 @@ int iwl4965_mac_start(struct ieee80211_hw *hw)  	/* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from  	 * mac80211 will not be run successfully. */ -	ret = wait_event_interruptible_timeout(priv->wait_command_queue, +	ret = wait_event_timeout(priv->wait_command_queue,  			test_bit(STATUS_READY, &priv->status),  			UCODE_READY_TIMEOUT);  	if (!ret) { diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c index a895a099d08..56211006a18 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c @@ -167,7 +167,7 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)  	memset(&cmd, 0, sizeof(cmd));  	iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); -	memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(offset_calib)); +	memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib));  	if (!(cmd.radio_sensor_offset))  		cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index b0ae4de7f08..f9c3cd95d61 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -2140,7 +2140,12 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,  		    IEEE80211_HW_SPECTRUM_MGMT |  		    IEEE80211_HW_REPORTS_TX_ACK_STATUS; +	/* +	 * Including the following line will crash some AP's.  This +	 * workaround removes the stimulus which causes the crash until +	 * the AP software can be fixed.  	hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; +	 */  	hw->flags |= IEEE80211_HW_SUPPORTS_PS |  		     IEEE80211_HW_SUPPORTS_DYNAMIC_PS; diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c index 69d4ec467dc..2fdbffa079c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-pci.c +++ b/drivers/net/wireless/iwlwifi/iwl-pci.c @@ -478,27 +478,22 @@ out_no_pci:  	return err;  } -static void iwl_pci_down(struct iwl_bus *bus) -{ -	struct iwl_pci_bus *pci_bus = (struct iwl_pci_bus *) bus->bus_specific; - -	pci_disable_msi(pci_bus->pci_dev); -	pci_iounmap(pci_bus->pci_dev, pci_bus->hw_base); -	pci_release_regions(pci_bus->pci_dev); -	pci_disable_device(pci_bus->pci_dev); -	pci_set_drvdata(pci_bus->pci_dev, NULL); - -	kfree(bus); -} -  static void __devexit iwl_pci_remove(struct pci_dev *pdev)  {  	struct iwl_priv *priv = pci_get_drvdata(pdev); -	void *bus_specific = priv->bus->bus_specific; +	struct iwl_bus *bus = priv->bus; +	struct iwl_pci_bus *pci_bus = IWL_BUS_GET_PCI_BUS(bus); +	struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);  	iwl_remove(priv); -	iwl_pci_down(bus_specific); +	pci_disable_msi(pci_dev); +	pci_iounmap(pci_dev, pci_bus->hw_base); +	pci_release_regions(pci_dev); +	pci_disable_device(pci_dev); +	pci_set_drvdata(pci_dev, NULL); + +	kfree(bus);  }  #ifdef CONFIG_PM diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index dd6937e9705..77e528f5db8 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c @@ -405,31 +405,33 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,  	mutex_lock(&priv->mutex); -	if (test_bit(STATUS_SCANNING, &priv->status) && -	    priv->scan_type != IWL_SCAN_NORMAL) { -		IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); -		ret = -EAGAIN; -		goto out_unlock; -	} - -	/* mac80211 will only ask for one band at a time */ -	priv->scan_request = req; -	priv->scan_vif = vif; -  	/*  	 * If an internal scan is in progress, just set  	 * up the scan_request as per above.  	 */  	if (priv->scan_type != IWL_SCAN_NORMAL) { -		IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n"); +		IWL_DEBUG_SCAN(priv, +			       "SCAN request during internal scan - defer\n"); +		priv->scan_request = req; +		priv->scan_vif = vif;  		ret = 0; -	} else +	} else { +		priv->scan_request = req; +		priv->scan_vif = vif; +		/* +		 * mac80211 will only ask for one band at a time +		 * so using channels[0] here is ok +		 */  		ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,  					req->channels[0]->band); +		if (ret) { +			priv->scan_request = NULL; +			priv->scan_vif = NULL; +		} +	}  	IWL_DEBUG_MAC80211(priv, "leave\n"); -out_unlock:  	mutex_unlock(&priv->mutex);  	return ret; diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c index a6b2b1db0b1..222d410c586 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c @@ -771,6 +771,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)  	cmd = txq->cmd[cmd_index];  	meta = &txq->meta[cmd_index]; +	txq->time_stamp = jiffies; +  	iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);  	/* Input error checking is done when commands are added to queue. */ diff --git a/drivers/net/wireless/libertas_tf/deb_defs.h b/drivers/net/wireless/libertas_tf/deb_defs.h index ae753962d8b..4bd3dc5adf7 100644 --- a/drivers/net/wireless/libertas_tf/deb_defs.h +++ b/drivers/net/wireless/libertas_tf/deb_defs.h @@ -3,7 +3,7 @@    * global variable declaration.    */  #ifndef _LBS_DEB_DEFS_H_ -#define _LBS_DEB_EFS_H_ +#define _LBS_DEB_DEFS_H_  #ifndef DRV_NAME  #define DRV_NAME "libertas_tf" diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index ef67f6786a8..0019dfd8fb0 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -3697,14 +3697,15 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)  	rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, ®);  	/* Apparently the data is read from end to start */ -	rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, -					(u32 *)&rt2x00dev->eeprom[i]); -	rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, -					(u32 *)&rt2x00dev->eeprom[i + 2]); -	rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, -					(u32 *)&rt2x00dev->eeprom[i + 4]); -	rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, -					(u32 *)&rt2x00dev->eeprom[i + 6]); +	rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, ®); +	/* The returned value is in CPU order, but eeprom is le */ +	rt2x00dev->eeprom[i] = cpu_to_le32(reg); +	rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, ®); +	*(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg); +	rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, ®); +	*(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg); +	rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, ®); +	*(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg);  	mutex_unlock(&rt2x00dev->csr_mutex);  } @@ -3870,19 +3871,23 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)  		return -ENODEV;  	} -	if (!rt2x00_rf(rt2x00dev, RF2820) && -	    !rt2x00_rf(rt2x00dev, RF2850) && -	    !rt2x00_rf(rt2x00dev, RF2720) && -	    !rt2x00_rf(rt2x00dev, RF2750) && -	    !rt2x00_rf(rt2x00dev, RF3020) && -	    !rt2x00_rf(rt2x00dev, RF2020) && -	    !rt2x00_rf(rt2x00dev, RF3021) && -	    !rt2x00_rf(rt2x00dev, RF3022) && -	    !rt2x00_rf(rt2x00dev, RF3052) && -	    !rt2x00_rf(rt2x00dev, RF3320) && -	    !rt2x00_rf(rt2x00dev, RF5370) && -	    !rt2x00_rf(rt2x00dev, RF5390)) { -		ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); +	switch (rt2x00dev->chip.rf) { +	case RF2820: +	case RF2850: +	case RF2720: +	case RF2750: +	case RF3020: +	case RF2020: +	case RF3021: +	case RF3022: +	case RF3052: +	case RF3320: +	case RF5370: +	case RF5390: +		break; +	default: +		ERROR(rt2x00dev, "Invalid RF chipset 0x%x detected.\n", +		      rt2x00dev->chip.rf);  		return -ENODEV;  	} diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 939563162fb..dbf501ca317 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -464,6 +464,15 @@ static bool rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)  	int wcid, ack, pid;  	int tx_wcid, tx_ack, tx_pid; +	if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || +	    !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) { +		WARNING(entry->queue->rt2x00dev, +			"Data pending for entry %u in queue %u\n", +			entry->entry_idx, entry->queue->qid); +		cond_resched(); +		return false; +	} +  	wcid	= rt2x00_get_field32(reg, TX_STA_FIFO_WCID);  	ack	= rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);  	pid	= rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE); @@ -529,12 +538,11 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)  			entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);  			if (rt2800usb_txdone_entry_check(entry, reg))  				break; +			entry = NULL;  		} -		if (!entry || rt2x00queue_empty(queue)) -			break; - -		rt2800_txdone_entry(entry, reg); +		if (entry) +			rt2800_txdone_entry(entry, reg);  	}  } @@ -558,8 +566,10 @@ static void rt2800usb_work_txdone(struct work_struct *work)  		while (!rt2x00queue_empty(queue)) {  			entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); -			if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) +			if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || +			    !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))  				break; +  			if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))  				rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);  			else if (rt2x00queue_status_timeout(entry)) diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c index b6b4542c246..1e31050dafc 100644 --- a/drivers/net/wireless/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c @@ -262,23 +262,20 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)  	struct queue_entry *entry = (struct queue_entry *)urb->context;  	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; -	if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) +	if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))  		return; - -	if (rt2x00dev->ops->lib->tx_dma_done) -		rt2x00dev->ops->lib->tx_dma_done(entry); - -	/* -	 * Report the frame as DMA done -	 */ -	rt2x00lib_dmadone(entry); -  	/*  	 * Check if the frame was correctly uploaded  	 */  	if (urb->status)  		set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); +	/* +	 * Report the frame as DMA done +	 */ +	rt2x00lib_dmadone(entry); +	if (rt2x00dev->ops->lib->tx_dma_done) +		rt2x00dev->ops->lib->tx_dma_done(entry);  	/*  	 * Schedule the delayed work for reading the TX status  	 * from the device. @@ -874,18 +871,8 @@ int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state)  {  	struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);  	struct rt2x00_dev *rt2x00dev = hw->priv; -	int retval; - -	retval = rt2x00lib_suspend(rt2x00dev, state); -	if (retval) -		return retval; -	/* -	 * Decrease usbdev refcount. -	 */ -	usb_put_dev(interface_to_usbdev(usb_intf)); - -	return 0; +	return rt2x00lib_suspend(rt2x00dev, state);  }  EXPORT_SYMBOL_GPL(rt2x00usb_suspend); @@ -894,8 +881,6 @@ int rt2x00usb_resume(struct usb_interface *usb_intf)  	struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);  	struct rt2x00_dev *rt2x00dev = hw->priv; -	usb_get_dev(interface_to_usbdev(usb_intf)); -  	return rt2x00lib_resume(rt2x00dev);  }  EXPORT_SYMBOL_GPL(rt2x00usb_resume); diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c index 1bdc1aa305c..04c4e9eb6ee 100644 --- a/drivers/net/wireless/rtlwifi/core.c +++ b/drivers/net/wireless/rtlwifi/core.c @@ -610,6 +610,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,  			mac->link_state = MAC80211_NOLINK;  			memset(mac->bssid, 0, 6); + +			/* reset sec info */ +			rtl_cam_reset_sec_info(hw); + +			rtl_cam_reset_all_entry(hw);  			mac->vendor = PEER_UNKNOWN;  			RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, @@ -1063,6 +1068,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,  		 *or clear all entry here.  		 */  		rtl_cam_delete_one_entry(hw, mac_addr, key_idx); + +		rtl_cam_reset_sec_info(hw); +  		break;  	default:  		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index 56f12358389..f8648b7288d 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c @@ -488,7 +488,7 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)  	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));  	struct sk_buff *skb = NULL;  	struct ieee80211_tx_info *info = NULL; -	int tid; /* should be int */ +	int tid;  	if (!rtlpriv->rtlhal.earlymode_enable)  		return; @@ -1538,7 +1538,7 @@ static int rtl_pci_start(struct ieee80211_hw *hw)  	rtl_init_rx_config(hw); -	/*should after adapter start and interrupt enable. */ +	/*should be after adapter start and interrupt enable. */  	set_hal_start(rtlhal);  	RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); @@ -1559,7 +1559,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)  	u8 RFInProgressTimeOut = 0;  	/* -	 *should before disable interrrupt&adapter +	 *should be before disable interrupt&adapter  	 *and will do it immediately.  	 */  	set_hal_stop(rtlhal); diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c index 906e7aa55bc..3e52a549622 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c @@ -549,15 +549,16 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,  			       (tcb_desc->rts_use_shortpreamble ? 1 : 0)  			       : (tcb_desc->rts_use_shortgi ? 1 : 0)));  	if (mac->bw_40) { -		if (tcb_desc->packet_bw) { +		if (rate_flag & IEEE80211_TX_RC_DUP_DATA) {  			SET_TX_DESC_DATA_BW(txdesc, 1);  			SET_TX_DESC_DATA_SC(txdesc, 3); +		} else if(rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH){ +			SET_TX_DESC_DATA_BW(txdesc, 1); +			SET_TX_DESC_DATA_SC(txdesc, mac->cur_40_prime_sc);  		} else {  			SET_TX_DESC_DATA_BW(txdesc, 0); -				if (rate_flag & IEEE80211_TX_RC_DUP_DATA) -					SET_TX_DESC_DATA_SC(txdesc, -							  mac->cur_40_prime_sc); -			} +			SET_TX_DESC_DATA_SC(txdesc, 0); +		}  	} else {  		SET_TX_DESC_DATA_BW(txdesc, 0);  		SET_TX_DESC_DATA_SC(txdesc, 0); diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index 8b1cef0ffde..4bf3cf457ef 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c @@ -863,6 +863,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,  	u8 tid = 0;  	u16 seq_number = 0; +	memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));  	if (ieee80211_is_auth(fc)) {  		RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n"));  		rtl_ips_nic_on(hw); diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c index 7e33f1f4f3d..34f6ab53e51 100644 --- a/drivers/net/wireless/wl12xx/acx.c +++ b/drivers/net/wireless/wl12xx/acx.c @@ -77,8 +77,6 @@ int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth)  	auth->sleep_auth = sleep_auth;  	ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); -	if (ret < 0) -		return ret;  out:  	kfree(auth); @@ -624,10 +622,8 @@ int wl1271_acx_cca_threshold(struct wl1271 *wl)  	ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD,  				   detection, sizeof(*detection)); -	if (ret < 0) { +	if (ret < 0)  		wl1271_warning("failed to set cca threshold: %d", ret); -		return ret; -	}  out:  	kfree(detection); diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c index e58c22d21e3..b70ae40ad66 100644 --- a/drivers/net/wireless/wl12xx/main.c +++ b/drivers/net/wireless/wl12xx/main.c @@ -4283,6 +4283,7 @@ int wl1271_init_ieee80211(struct wl1271 *wl)  	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |  		BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);  	wl->hw->wiphy->max_scan_ssids = 1; +	wl->hw->wiphy->max_sched_scan_ssids = 1;  	/*  	 * Maximum length of elements in scanning probe request templates  	 * should be the maximum length possible for a template, without diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c index 5cf18c2c23f..fb1fd5af75e 100644 --- a/drivers/net/wireless/wl12xx/sdio.c +++ b/drivers/net/wireless/wl12xx/sdio.c @@ -164,7 +164,7 @@ static int wl1271_sdio_power_on(struct wl1271 *wl)  	/* If enabled, tell runtime PM not to power off the card */  	if (pm_runtime_enabled(&func->dev)) {  		ret = pm_runtime_get_sync(&func->dev); -		if (ret) +		if (ret < 0)  			goto out;  	} else {  		/* Runtime PM is disabled: power up the card manually */ diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c index 5d5e1ef8720..4ae8effaee2 100644 --- a/drivers/net/wireless/wl12xx/testmode.c +++ b/drivers/net/wireless/wl12xx/testmode.c @@ -36,7 +36,6 @@ enum wl1271_tm_commands {  	WL1271_TM_CMD_TEST,  	WL1271_TM_CMD_INTERROGATE,  	WL1271_TM_CMD_CONFIGURE, -	WL1271_TM_CMD_NVS_PUSH,  	WL1271_TM_CMD_SET_PLT_MODE,  	WL1271_TM_CMD_RECOVER, @@ -139,12 +138,15 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])  	if (ret < 0) {  		wl1271_warning("testmode cmd interrogate failed: %d", ret); +		kfree(cmd);  		return ret;  	}  	skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd)); -	if (!skb) +	if (!skb) { +		kfree(cmd);  		return -ENOMEM; +	}  	NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd); @@ -187,48 +189,6 @@ static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])  	return 0;  } -static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[]) -{ -	int ret = 0; -	size_t len; -	void *buf; - -	wl1271_debug(DEBUG_TESTMODE, "testmode cmd nvs push"); - -	if (!tb[WL1271_TM_ATTR_DATA]) -		return -EINVAL; - -	buf = nla_data(tb[WL1271_TM_ATTR_DATA]); -	len = nla_len(tb[WL1271_TM_ATTR_DATA]); - -	mutex_lock(&wl->mutex); - -	kfree(wl->nvs); - -	if ((wl->chip.id == CHIP_ID_1283_PG20) && -	    (len != sizeof(struct wl128x_nvs_file))) -		return -EINVAL; -	else if (len != sizeof(struct wl1271_nvs_file)) -		return -EINVAL; - -	wl->nvs = kzalloc(len, GFP_KERNEL); -	if (!wl->nvs) { -		wl1271_error("could not allocate memory for the nvs file"); -		ret = -ENOMEM; -		goto out; -	} - -	memcpy(wl->nvs, buf, len); -	wl->nvs_len = len; - -	wl1271_debug(DEBUG_TESTMODE, "testmode pushed nvs"); - -out: -	mutex_unlock(&wl->mutex); - -	return ret; -} -  static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])  {  	u32 val; @@ -285,8 +245,6 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)  		return wl1271_tm_cmd_interrogate(wl, tb);  	case WL1271_TM_CMD_CONFIGURE:  		return wl1271_tm_cmd_configure(wl, tb); -	case WL1271_TM_CMD_NVS_PUSH: -		return wl1271_tm_cmd_nvs_push(wl, tb);  	case WL1271_TM_CMD_SET_PLT_MODE:  		return wl1271_tm_cmd_set_plt_mode(wl, tb);  	case WL1271_TM_CMD_RECOVER: diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 0ca86f9ec4e..182562952c7 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -327,12 +327,12 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,  	xenvif_get(vif);  	rtnl_lock(); -	if (netif_running(vif->dev)) -		xenvif_up(vif);  	if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)  		dev_set_mtu(vif->dev, ETH_DATA_LEN);  	netdev_update_features(vif->dev);  	netif_carrier_on(vif->dev); +	if (netif_running(vif->dev)) +		xenvif_up(vif);  	rtnl_unlock();  	return 0; diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 57a6d19eba4..a6f762188bc 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -668,7 +668,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)   * @dev: instance of PCI owned by the driver that's asking   * @mask:  number of address bits this PCI device can handle   * - * See Documentation/PCI/PCI-DMA-mapping.txt + * See Documentation/DMA-API-HOWTO.txt   */  static int sba_dma_supported( struct device *dev, u64 mask)  { @@ -680,7 +680,7 @@ static int sba_dma_supported( struct device *dev, u64 mask)  		return(0);  	} -	/* Documentation/PCI/PCI-DMA-mapping.txt tells drivers to try 64-bit +	/* Documentation/DMA-API-HOWTO.txt tells drivers to try 64-bit  	 * first, then fall back to 32-bit if that fails.  	 * We are just "encouraging" 32-bit DMA masks here since we can  	 * never allow IOMMU bypass unless we add special support for ZX1. @@ -706,7 +706,7 @@ static int sba_dma_supported( struct device *dev, u64 mask)   * @size:  number of bytes to map in driver buffer.   * @direction:  R/W or both.   * - * See Documentation/PCI/PCI-DMA-mapping.txt + * See Documentation/DMA-API-HOWTO.txt   */  static dma_addr_t  sba_map_single(struct device *dev, void *addr, size_t size, @@ -785,7 +785,7 @@ sba_map_single(struct device *dev, void *addr, size_t size,   * @size:  number of bytes mapped in driver buffer.   * @direction:  R/W or both.   * - * See Documentation/PCI/PCI-DMA-mapping.txt + * See Documentation/DMA-API-HOWTO.txt   */  static void  sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, @@ -861,7 +861,7 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,   * @size:  number of bytes mapped in driver buffer.   * @dma_handle:  IOVA of new buffer.   * - * See Documentation/PCI/PCI-DMA-mapping.txt + * See Documentation/DMA-API-HOWTO.txt   */  static void *sba_alloc_consistent(struct device *hwdev, size_t size,  					dma_addr_t *dma_handle, gfp_t gfp) @@ -892,7 +892,7 @@ static void *sba_alloc_consistent(struct device *hwdev, size_t size,   * @vaddr:  virtual address IOVA of "consistent" buffer.   * @dma_handler:  IO virtual address of "consistent" buffer.   * - * See Documentation/PCI/PCI-DMA-mapping.txt + * See Documentation/DMA-API-HOWTO.txt   */  static void  sba_free_consistent(struct device *hwdev, size_t size, void *vaddr, @@ -927,7 +927,7 @@ int dump_run_sg = 0;   * @nents:  number of entries in list   * @direction:  R/W or both.   * - * See Documentation/PCI/PCI-DMA-mapping.txt + * See Documentation/DMA-API-HOWTO.txt   */  static int  sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, @@ -1011,7 +1011,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,   * @nents:  number of entries in list   * @direction:  R/W or both.   * - * See Documentation/PCI/PCI-DMA-mapping.txt + * See Documentation/DMA-API-HOWTO.txt   */  static void   sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c index 753b21aaea6..3ffd9c1acc0 100644 --- a/drivers/pci/hotplug/pcihp_slot.c +++ b/drivers/pci/hotplug/pcihp_slot.c @@ -169,7 +169,9 @@ void pci_configure_slot(struct pci_dev *dev)  			(dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))  		return; -	pcie_bus_configure_settings(dev->bus, dev->bus->self->pcie_mpss); +	if (dev->bus && dev->bus->self) +		pcie_bus_configure_settings(dev->bus, +					    dev->bus->self->pcie_mpss);  	memset(&hpp, 0, sizeof(hpp));  	ret = pci_get_hp_params(dev, &hpp); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 0ce67423a0a..e9651f0a881 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -77,7 +77,7 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;  unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;  unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; -enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE; +enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;  /*   * The default CLS is used if arch didn't set CLS explicitly and not @@ -3568,10 +3568,14 @@ static int __init pci_setup(char *str)  				pci_hotplug_io_size = memparse(str + 9, &str);  			} else if (!strncmp(str, "hpmemsize=", 10)) {  				pci_hotplug_mem_size = memparse(str + 10, &str); +			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) { +				pcie_bus_config = PCIE_BUS_TUNE_OFF;  			} else if (!strncmp(str, "pcie_bus_safe", 13)) {  				pcie_bus_config = PCIE_BUS_SAFE;  			} else if (!strncmp(str, "pcie_bus_perf", 13)) {  				pcie_bus_config = PCIE_BUS_PERFORMANCE; +			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) { +				pcie_bus_config = PCIE_BUS_PEER2PEER;  			} else {  				printk(KERN_ERR "PCI: Unknown option `%s'\n",  						str); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 8473727b29f..6ab6bd3df4b 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1351,7 +1351,8 @@ static int pcie_find_smpss(struct pci_dev *dev, void *data)  	 * will occur as normal.  	 */  	if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) || -	    dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT)) +	     (dev->bus->self && +	      dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT)))  		*smpss = 0;  	if (*smpss > dev->pcie_mpss) @@ -1396,34 +1397,37 @@ static void pcie_write_mps(struct pci_dev *dev, int mps)  static void pcie_write_mrrs(struct pci_dev *dev, int mps)  { -	int rc, mrrs; +	int rc, mrrs, dev_mpss; -	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { -		int dev_mpss = 128 << dev->pcie_mpss; +	/* In the "safe" case, do not configure the MRRS.  There appear to be +	 * issues with setting MRRS to 0 on a number of devices. +	 */ -		/* For Max performance, the MRRS must be set to the largest -		 * supported value.  However, it cannot be configured larger -		 * than the MPS the device or the bus can support.  This assumes -		 * that the largest MRRS available on the device cannot be -		 * smaller than the device MPSS. -		 */ -		mrrs = mps < dev_mpss ? mps : dev_mpss; -	} else -		/* In the "safe" case, configure the MRRS for fairness on the -		 * bus by making all devices have the same size -		 */ -		mrrs = mps; +	if (pcie_bus_config != PCIE_BUS_PERFORMANCE) +		return; +	dev_mpss = 128 << dev->pcie_mpss; + +	/* For Max performance, the MRRS must be set to the largest supported +	 * value.  However, it cannot be configured larger than the MPS the +	 * device or the bus can support.  This assumes that the largest MRRS +	 * available on the device cannot be smaller than the device MPSS. +	 */ +	mrrs = min(mps, dev_mpss);  	/* MRRS is a R/W register.  Invalid values can be written, but a -	 * subsiquent read will verify if the value is acceptable or not. +	 * subsequent read will verify if the value is acceptable or not.  	 * If the MRRS value provided is not acceptable (e.g., too large),  	 * shrink the value until it is acceptable to the HW.   	 */  	while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { +		dev_warn(&dev->dev, "Attempting to modify the PCI-E MRRS value" +			 " to %d.  If any issues are encountered, please try " +			 "running with pci=pcie_bus_safe\n", mrrs);  		rc = pcie_set_readrq(dev, mrrs);  		if (rc) -			dev_err(&dev->dev, "Failed attempting to set the MRRS\n"); +			dev_err(&dev->dev, +				"Failed attempting to set the MRRS\n");  		mrrs /= 2;  	} @@ -1436,13 +1440,13 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)  	if (!pci_is_pcie(dev))  		return 0; -	dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", +	dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",  		 pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));  	pcie_write_mps(dev, mps);  	pcie_write_mrrs(dev, mps); -	dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", +	dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",  		 pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));  	return 0; @@ -1454,15 +1458,24 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)   */  void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)  { -	u8 smpss = mpss; +	u8 smpss; -	if (!bus->self) +	if (!pci_is_pcie(bus->self))  		return; -	if (!pci_is_pcie(bus->self)) +	if (pcie_bus_config == PCIE_BUS_TUNE_OFF)  		return; +	/* FIXME - Peer to peer DMA is possible, though the endpoint would need +	 * to be aware to the MPS of the destination.  To work around this, +	 * simply force the MPS of the entire system to the smallest possible. +	 */ +	if (pcie_bus_config == PCIE_BUS_PEER2PEER) +		smpss = 0; +  	if (pcie_bus_config == PCIE_BUS_SAFE) { +		smpss = mpss; +  		pcie_find_smpss(bus->self, &smpss);  		pci_walk_bus(bus, pcie_find_smpss, &smpss);  	} diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index 6fa215a3861..90832a95599 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c @@ -400,9 +400,8 @@ static int pcifront_claim_resource(struct pci_dev *dev, void *data)  			dev_info(&pdev->xdev->dev, "claiming resource %s/%d\n",  				pci_name(dev), i);  			if (pci_claim_resource(dev, i)) { -				dev_err(&pdev->xdev->dev, "Could not claim " -					"resource %s/%d! Device offline. Try " -					"giving less than 4GB to domain.\n", +				dev_err(&pdev->xdev->dev, "Could not claim resource %s/%d! " +					"Device offline. Try using e820_host=1 in the guest config.\n",  					pci_name(dev), i);  			}  		} diff --git a/drivers/pcmcia/sa1100_simpad.c b/drivers/pcmcia/sa1100_simpad.c index c998f7aaadb..0fac9658b02 100644 --- a/drivers/pcmcia/sa1100_simpad.c +++ b/drivers/pcmcia/sa1100_simpad.c @@ -15,10 +15,6 @@  #include <mach/simpad.h>  #include "sa1100_generic.h" -extern long get_cs3_shadow(void); -extern void set_cs3_bit(int value);  -extern void clear_cs3_bit(int value); -  static struct pcmcia_irqs irqs[] = {  	{ 1, IRQ_GPIO_CF_CD, "CF_CD" },  }; @@ -26,7 +22,7 @@ static struct pcmcia_irqs irqs[] = {  static int simpad_pcmcia_hw_init(struct soc_pcmcia_socket *skt)  { -	clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1); +	simpad_clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);  	skt->socket.pci_irq = IRQ_GPIO_CF_IRQ; @@ -38,8 +34,8 @@ static void simpad_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)  	soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs));  	/* Disable CF bus: */ -	//set_cs3_bit(PCMCIA_BUFF_DIS); -	clear_cs3_bit(PCMCIA_RESET);        +	/*simpad_set_cs3_bit(PCMCIA_BUFF_DIS);*/ +	simpad_clear_cs3_bit(PCMCIA_RESET);  }  static void @@ -47,15 +43,16 @@ simpad_pcmcia_socket_state(struct soc_pcmcia_socket *skt,  			   struct pcmcia_state *state)  {  	unsigned long levels = GPLR; -	long cs3reg = get_cs3_shadow(); +	long cs3reg = simpad_get_cs3_ro();  	state->detect=((levels & GPIO_CF_CD)==0)?1:0;  	state->ready=(levels & GPIO_CF_IRQ)?1:0; -	state->bvd1=1; /* Not available on Simpad. */ -	state->bvd2=1; /* Not available on Simpad. */ +	state->bvd1 = 1; /* Might be cs3reg & PCMCIA_BVD1 */ +	state->bvd2 = 1; /* Might be cs3reg & PCMCIA_BVD2 */  	state->wrprot=0; /* Not available on Simpad. */ -   -	if((cs3reg & 0x0c) == 0x0c) { + +	if ((cs3reg & (PCMCIA_VS1|PCMCIA_VS2)) == +			(PCMCIA_VS1|PCMCIA_VS2)) {  		state->vs_3v=0;  		state->vs_Xv=0;  	} else { @@ -75,23 +72,23 @@ simpad_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,  	/* Murphy: see table of MIC2562a-1 */  	switch (state->Vcc) {  	case 0: -		clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1); +		simpad_clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);  		break;  	case 33:   -		clear_cs3_bit(VCC_3V_EN|EN1); -		set_cs3_bit(VCC_5V_EN|EN0); +		simpad_clear_cs3_bit(VCC_3V_EN|EN1); +		simpad_set_cs3_bit(VCC_5V_EN|EN0);  		break;  	case 50: -		clear_cs3_bit(VCC_5V_EN|EN1); -		set_cs3_bit(VCC_3V_EN|EN0); +		simpad_clear_cs3_bit(VCC_5V_EN|EN1); +		simpad_set_cs3_bit(VCC_3V_EN|EN0);  		break;  	default:  		printk(KERN_ERR "%s(): unrecognized Vcc %u\n",  			__func__, state->Vcc); -		clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1); +		simpad_clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);  		local_irq_restore(flags);  		return -1;  	} @@ -110,7 +107,7 @@ static void simpad_pcmcia_socket_init(struct soc_pcmcia_socket *skt)  static void simpad_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)  {  	soc_pcmcia_disable_irqs(skt, irqs, ARRAY_SIZE(irqs)); -	set_cs3_bit(PCMCIA_RESET); +	simpad_set_cs3_bit(PCMCIA_RESET);  }  static struct pcmcia_low_level simpad_pcmcia_ops = {  diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 1e88d478532..10cf2500522 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -31,9 +31,6 @@ config ACER_WMI  	  wireless radio and bluetooth control, and on some laptops,  	  exposes the mail LED and LCD backlight. -	  For more information about this driver see -	  <file:Documentation/laptops/acer-wmi.txt> -  	  If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M  	  here. @@ -164,7 +161,7 @@ config HP_ACCEL  	  Support for a led indicating disk protection will be provided as  	  hp::hddprotect. For more information on the feature, refer to -	  Documentation/hwmon/lis3lv02d. +	  Documentation/misc-devices/lis3lv02d.  	  To compile this driver as a module, choose M here: the module will  	  be called hp_accel. diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 7bd829f247e..7b828680b21 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -4007,7 +4007,7 @@ static void bluetooth_shutdown(void)  		pr_notice("failed to save bluetooth state to NVRAM\n");  	else  		vdbg_printk(TPACPI_DBG_RFKILL, -			"bluestooth state saved to NVRAM\n"); +			"bluetooth state saved to NVRAM\n");  }  static void bluetooth_exit(void) diff --git a/drivers/power/max8997_charger.c b/drivers/power/max8997_charger.c index 7106b49b26e..ffc5033ea9c 100644 --- a/drivers/power/max8997_charger.c +++ b/drivers/power/max8997_charger.c @@ -20,6 +20,7 @@   */  #include <linux/err.h> +#include <linux/module.h>  #include <linux/slab.h>  #include <linux/platform_device.h>  #include <linux/power_supply.h> diff --git a/drivers/power/max8998_charger.c b/drivers/power/max8998_charger.c index cc21fa2120b..ef8efadb58c 100644 --- a/drivers/power/max8998_charger.c +++ b/drivers/power/max8998_charger.c @@ -20,6 +20,7 @@   */  #include <linux/err.h> +#include <linux/module.h>  #include <linux/slab.h>  #include <linux/platform_device.h>  #include <linux/power_supply.h> diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/s3c_adc_battery.c index a675e31b4f1..d32d0d70f9b 100644 --- a/drivers/power/s3c_adc_battery.c +++ b/drivers/power/s3c_adc_battery.c @@ -20,6 +20,7 @@  #include <linux/s3c_adc_battery.h>  #include <linux/errno.h>  #include <linux/init.h> +#include <linux/module.h>  #include <plat/adc.h> diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index ee893581d4b..ebe77dd87da 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c @@ -505,8 +505,7 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,  	rdev->dev.dma_mask = &rdev->dma_mask;  	rdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); -	if ((rdev->pef & RIO_PEF_INB_DOORBELL) && -	    (rdev->dst_ops & RIO_DST_OPS_DOORBELL)) +	if (rdev->dst_ops & RIO_DST_OPS_DOORBELL)  		rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],  				   0, 0xffff); diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c index 335551d333b..14a42a1edc6 100644 --- a/drivers/rtc/rtc-ep93xx.c +++ b/drivers/rtc/rtc-ep93xx.c @@ -36,6 +36,7 @@   */  struct ep93xx_rtc {  	void __iomem	*mmio_base; +	struct rtc_device *rtc;  };  static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload, @@ -130,7 +131,6 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)  {  	struct ep93xx_rtc *ep93xx_rtc;  	struct resource *res; -	struct rtc_device *rtc;  	int err;  	ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL); @@ -151,12 +151,12 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)  		return -ENXIO;  	pdev->dev.platform_data = ep93xx_rtc; -	platform_set_drvdata(pdev, rtc); +	platform_set_drvdata(pdev, ep93xx_rtc); -	rtc = rtc_device_register(pdev->name, +	ep93xx_rtc->rtc = rtc_device_register(pdev->name,  				&pdev->dev, &ep93xx_rtc_ops, THIS_MODULE); -	if (IS_ERR(rtc)) { -		err = PTR_ERR(rtc); +	if (IS_ERR(ep93xx_rtc->rtc)) { +		err = PTR_ERR(ep93xx_rtc->rtc);  		goto exit;  	} @@ -167,7 +167,7 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)  	return 0;  fail: -	rtc_device_unregister(rtc); +	rtc_device_unregister(ep93xx_rtc->rtc);  exit:  	platform_set_drvdata(pdev, NULL);  	pdev->dev.platform_data = NULL; @@ -176,11 +176,11 @@ exit:  static int __exit ep93xx_rtc_remove(struct platform_device *pdev)  { -	struct rtc_device *rtc = platform_get_drvdata(pdev); +	struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev);  	sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files);  	platform_set_drvdata(pdev, NULL); -	rtc_device_unregister(rtc); +	rtc_device_unregister(ep93xx_rtc->rtc);  	pdev->dev.platform_data = NULL;  	return 0; diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c index 2dd3c016327..d93a9608b1f 100644 --- a/drivers/rtc/rtc-imxdi.c +++ b/drivers/rtc/rtc-imxdi.c @@ -35,6 +35,7 @@  #include <linux/module.h>  #include <linux/platform_device.h>  #include <linux/rtc.h> +#include <linux/sched.h>  #include <linux/workqueue.h>  /* DryIce Register Definitions */ diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c index 075f1708dea..c4cf0573111 100644 --- a/drivers/rtc/rtc-lib.c +++ b/drivers/rtc/rtc-lib.c @@ -85,6 +85,8 @@ void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)  	time -= tm->tm_hour * 3600;  	tm->tm_min = time / 60;  	tm->tm_sec = time - tm->tm_min * 60; + +	tm->tm_isdst = 0;  }  EXPORT_SYMBOL(rtc_time_to_tm); diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 9329dbb9eba..7639ab906f0 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -51,6 +51,27 @@ static enum s3c_cpu_type s3c_rtc_cpu_type;  static DEFINE_SPINLOCK(s3c_rtc_pie_lock); +static void s3c_rtc_alarm_clk_enable(bool enable) +{ +	static DEFINE_SPINLOCK(s3c_rtc_alarm_clk_lock); +	static bool alarm_clk_enabled; +	unsigned long irq_flags; + +	spin_lock_irqsave(&s3c_rtc_alarm_clk_lock, irq_flags); +	if (enable) { +		if (!alarm_clk_enabled) { +			clk_enable(rtc_clk); +			alarm_clk_enabled = true; +		} +	} else { +		if (alarm_clk_enabled) { +			clk_disable(rtc_clk); +			alarm_clk_enabled = false; +		} +	} +	spin_unlock_irqrestore(&s3c_rtc_alarm_clk_lock, irq_flags); +} +  /* IRQ Handlers */  static irqreturn_t s3c_rtc_alarmirq(int irq, void *id) @@ -64,6 +85,9 @@ static irqreturn_t s3c_rtc_alarmirq(int irq, void *id)  		writeb(S3C2410_INTP_ALM, s3c_rtc_base + S3C2410_INTP);  	clk_disable(rtc_clk); + +	s3c_rtc_alarm_clk_enable(false); +  	return IRQ_HANDLED;  } @@ -97,6 +121,8 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)  	writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);  	clk_disable(rtc_clk); +	s3c_rtc_alarm_clk_enable(enabled); +  	return 0;  } @@ -152,10 +178,6 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)  		goto retry_get_time;  	} -	pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n", -		 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday, -		 rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec); -  	rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);  	rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);  	rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour); @@ -164,6 +186,11 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)  	rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);  	rtc_tm->tm_year += 100; + +	pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n", +		 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday, +		 rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec); +  	rtc_tm->tm_mon -= 1;  	clk_disable(rtc_clk); @@ -269,10 +296,9 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)  	clk_enable(rtc_clk);  	pr_debug("s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n",  		 alrm->enabled, -		 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, +		 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,  		 tm->tm_hour, tm->tm_min, tm->tm_sec); -  	alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN;  	writeb(0x00, base + S3C2410_RTCALM); @@ -319,49 +345,7 @@ static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)  	return 0;  } -static int s3c_rtc_open(struct device *dev) -{ -	struct platform_device *pdev = to_platform_device(dev); -	struct rtc_device *rtc_dev = platform_get_drvdata(pdev); -	int ret; - -	ret = request_irq(s3c_rtc_alarmno, s3c_rtc_alarmirq, -			  IRQF_DISABLED,  "s3c2410-rtc alarm", rtc_dev); - -	if (ret) { -		dev_err(dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret); -		return ret; -	} - -	ret = request_irq(s3c_rtc_tickno, s3c_rtc_tickirq, -			  IRQF_DISABLED,  "s3c2410-rtc tick", rtc_dev); - -	if (ret) { -		dev_err(dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret); -		goto tick_err; -	} - -	return ret; - - tick_err: -	free_irq(s3c_rtc_alarmno, rtc_dev); -	return ret; -} - -static void s3c_rtc_release(struct device *dev) -{ -	struct platform_device *pdev = to_platform_device(dev); -	struct rtc_device *rtc_dev = platform_get_drvdata(pdev); - -	/* do not clear AIE here, it may be needed for wake */ - -	free_irq(s3c_rtc_alarmno, rtc_dev); -	free_irq(s3c_rtc_tickno, rtc_dev); -} -  static const struct rtc_class_ops s3c_rtcops = { -	.open		= s3c_rtc_open, -	.release	= s3c_rtc_release,  	.read_time	= s3c_rtc_gettime,  	.set_time	= s3c_rtc_settime,  	.read_alarm	= s3c_rtc_getalarm, @@ -425,6 +409,9 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)  {  	struct rtc_device *rtc = platform_get_drvdata(dev); +	free_irq(s3c_rtc_alarmno, rtc); +	free_irq(s3c_rtc_tickno, rtc); +  	platform_set_drvdata(dev, NULL);  	rtc_device_unregister(rtc); @@ -548,10 +535,32 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)  	s3c_rtc_setfreq(&pdev->dev, 1); +	ret = request_irq(s3c_rtc_alarmno, s3c_rtc_alarmirq, +			  IRQF_DISABLED,  "s3c2410-rtc alarm", rtc); +	if (ret) { +		dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret); +		goto err_alarm_irq; +	} + +	ret = request_irq(s3c_rtc_tickno, s3c_rtc_tickirq, +			  IRQF_DISABLED,  "s3c2410-rtc tick", rtc); +	if (ret) { +		dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret); +		free_irq(s3c_rtc_alarmno, rtc); +		goto err_tick_irq; +	} +  	clk_disable(rtc_clk);  	return 0; + err_tick_irq: +	free_irq(s3c_rtc_alarmno, rtc); + + err_alarm_irq: +	platform_set_drvdata(pdev, NULL); +	rtc_device_unregister(rtc); +   err_nortc:  	s3c_rtc_enable(pdev, 0);  	clk_disable(rtc_clk); diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c index 9a81f778d6b..20687d55e7a 100644 --- a/drivers/rtc/rtc-twl.c +++ b/drivers/rtc/rtc-twl.c @@ -362,14 +362,6 @@ static irqreturn_t twl_rtc_interrupt(int irq, void *rtc)  	int res;  	u8 rd_reg; -#ifdef CONFIG_LOCKDEP -	/* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which -	 * we don't want and can't tolerate.  Although it might be -	 * friendlier not to borrow this thread context... -	 */ -	local_irq_enable(); -#endif -  	res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);  	if (res)  		goto out; @@ -428,24 +420,12 @@ static struct rtc_class_ops twl_rtc_ops = {  static int __devinit twl_rtc_probe(struct platform_device *pdev)  {  	struct rtc_device *rtc; -	int ret = 0; +	int ret = -EINVAL;  	int irq = platform_get_irq(pdev, 0);  	u8 rd_reg;  	if (irq <= 0) -		return -EINVAL; - -	rtc = rtc_device_register(pdev->name, -				  &pdev->dev, &twl_rtc_ops, THIS_MODULE); -	if (IS_ERR(rtc)) { -		ret = PTR_ERR(rtc); -		dev_err(&pdev->dev, "can't register RTC device, err %ld\n", -			PTR_ERR(rtc)); -		goto out0; - -	} - -	platform_set_drvdata(pdev, rtc); +		goto out1;  	ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);  	if (ret < 0) @@ -462,14 +442,6 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)  	if (ret < 0)  		goto out1; -	ret = request_irq(irq, twl_rtc_interrupt, -				IRQF_TRIGGER_RISING, -				dev_name(&rtc->dev), rtc); -	if (ret < 0) { -		dev_err(&pdev->dev, "IRQ is not free.\n"); -		goto out1; -	} -  	if (twl_class_is_6030()) {  		twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK,  			REG_INT_MSK_LINE_A); @@ -480,28 +452,44 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)  	/* Check RTC module status, Enable if it is off */  	ret = twl_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG);  	if (ret < 0) -		goto out2; +		goto out1;  	if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) {  		dev_info(&pdev->dev, "Enabling TWL-RTC.\n");  		rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M;  		ret = twl_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG);  		if (ret < 0) -			goto out2; +			goto out1;  	}  	/* init cached IRQ enable bits */  	ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);  	if (ret < 0) +		goto out1; + +	rtc = rtc_device_register(pdev->name, +				  &pdev->dev, &twl_rtc_ops, THIS_MODULE); +	if (IS_ERR(rtc)) { +		ret = PTR_ERR(rtc); +		dev_err(&pdev->dev, "can't register RTC device, err %ld\n", +			PTR_ERR(rtc)); +		goto out1; +	} + +	ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt, +				   IRQF_TRIGGER_RISING, +				   dev_name(&rtc->dev), rtc); +	if (ret < 0) { +		dev_err(&pdev->dev, "IRQ is not free.\n");  		goto out2; +	} -	return ret; +	platform_set_drvdata(pdev, rtc); +	return 0;  out2: -	free_irq(irq, rtc); -out1:  	rtc_device_unregister(rtc); -out0: +out1:  	return ret;  } diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index eb4e034378c..f1a2016829f 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -249,6 +249,7 @@ static int dasd_ioctl_reset_profile(struct dasd_block *block)  static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)  {  	struct dasd_profile_info_t *data; +	int rc = 0;  	data = kmalloc(sizeof(*data), GFP_KERNEL);  	if (!data) @@ -279,11 +280,14 @@ static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)  		spin_unlock_bh(&block->profile.lock);  	} else {  		spin_unlock_bh(&block->profile.lock); -		return -EIO; +		rc = -EIO; +		goto out;  	}  	if (copy_to_user(argp, data, sizeof(*data))) -		return -EFAULT; -	return 0; +		rc = -EFAULT; +out: +	kfree(data); +	return rc;  }  #else  static int dasd_ioctl_reset_profile(struct dasd_block *block) diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index be55fb2b1b1..837e010299a 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -383,8 +383,10 @@ static int sclp_attach_storage(u8 id)  	switch (sccb->header.response_code) {  	case 0x0020:  		set_bit(id, sclp_storage_ids); -		for (i = 0; i < sccb->assigned; i++) -			sclp_unassign_storage(sccb->entries[i] >> 16); +		for (i = 0; i < sccb->assigned; i++) { +			if (sccb->entries[i]) +				sclp_unassign_storage(sccb->entries[i] >> 16); +		}  		break;  	default:  		rc = -EIO; diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index 76058a5166e..08c66035dd1 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -335,10 +335,9 @@ cio_ignore_write(struct file *file, const char __user *user_buf,  		return -EINVAL;  	if (user_len > 65536)  		user_len = 65536; -	buf = vmalloc (user_len + 1); /* maybe better use the stack? */ +	buf = vzalloc(user_len + 1); /* maybe better use the stack? */  	if (buf == NULL)  		return -ENOMEM; -	memset(buf, 0, user_len + 1);  	if (strncpy_from_user (buf, user_buf, user_len) < 0) {  		rc = -EFAULT; diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index cbde448f994..eb3140ee821 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -654,8 +654,8 @@ static struct io_subchannel_private console_priv;  static int console_subchannel_in_use;  /* - * Use tpi to get a pending interrupt, call the interrupt handler and - * return a pointer to the subchannel structure. + * Use cio_tpi to get a pending interrupt and call the interrupt handler. + * Return non-zero if an interrupt was processed, zero otherwise.   */  static int cio_tpi(void)  { @@ -667,6 +667,10 @@ static int cio_tpi(void)  	tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;  	if (tpi(NULL) != 1)  		return 0; +	if (tpi_info->adapter_IO) { +		do_adapter_IO(tpi_info->isc); +		return 1; +	}  	irb = (struct irb *)&S390_lowcore.irb;  	/* Store interrupt response block to lowcore. */  	if (tsch(tpi_info->schid, irb) != 0) diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index b7bd5b0cc7a..3868ab2397c 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -1800,10 +1800,12 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_  	switch (retval) {  	case SCSI_MLQUEUE_HOST_BUSY:  		twa_free_request_id(tw_dev, request_id); +		twa_unmap_scsi_data(tw_dev, request_id);  		break;  	case 1:  		tw_dev->state[request_id] = TW_S_COMPLETED;  		twa_free_request_id(tw_dev, request_id); +		twa_unmap_scsi_data(tw_dev, request_id);  		SCpnt->result = (DID_ERROR << 16);  		done(SCpnt);  		retval = 0; diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 8d9dae89f06..3878b739508 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -837,6 +837,7 @@ config SCSI_ISCI  	# (temporary): known alpha quality driver  	depends on EXPERIMENTAL  	select SCSI_SAS_LIBSAS +	select SCSI_SAS_HOST_SMP  	---help---  	  This driver supports the 6Gb/s SAS capabilities of the storage  	  control unit found in the Intel(R) C600 series chipset. diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 3c08f5352b2..6153a66a8a3 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -88,7 +88,7 @@ obj-$(CONFIG_SCSI_QLOGIC_FAS)	+= qlogicfas408.o	qlogicfas.o  obj-$(CONFIG_PCMCIA_QLOGIC)	+= qlogicfas408.o  obj-$(CONFIG_SCSI_QLOGIC_1280)	+= qla1280.o   obj-$(CONFIG_SCSI_QLA_FC)	+= qla2xxx/ -obj-$(CONFIG_SCSI_QLA_ISCSI)	+= qla4xxx/ +obj-$(CONFIG_SCSI_QLA_ISCSI)	+= libiscsi.o qla4xxx/  obj-$(CONFIG_SCSI_LPFC)		+= lpfc/  obj-$(CONFIG_SCSI_BFA_FC)	+= bfa/  obj-$(CONFIG_SCSI_PAS16)	+= pas16.o diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index e7d0d47b918..e5f2d7d9002 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1283,6 +1283,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)  	kfree(aac->queues);  	aac->queues = NULL;  	free_irq(aac->pdev->irq, aac); +	if (aac->msi) +		pci_disable_msi(aac->pdev);  	kfree(aac->fsa_dev);  	aac->fsa_dev = NULL;  	quirks = aac_get_driver_ident(index)->quirks; diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index c2049466060..957595a7a45 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -37,7 +37,6 @@  #include <linux/slab.h>  #include <linux/blkdev.h>  #include <linux/delay.h> -#include <linux/version.h>  #include <linux/completion.h>  #include <linux/time.h>  #include <linux/interrupt.h> diff --git a/drivers/scsi/aic7xxx/Kconfig.aic79xx b/drivers/scsi/aic7xxx/Kconfig.aic79xx index 5e6620f8dab..6739069477d 100644 --- a/drivers/scsi/aic7xxx/Kconfig.aic79xx +++ b/drivers/scsi/aic7xxx/Kconfig.aic79xx @@ -31,8 +31,7 @@ config AIC79XX_CMDS_PER_DEVICE  	on some devices.  The upper bound is 253.  0 disables tagged queueing.  	Per device tag depth can be controlled via the kernel command line -	"tag_info" option.  See drivers/scsi/aic7xxx/README.aic79xx -	for details. +	"tag_info" option.  See Documentation/scsi/aic79xx.txt for details.  config AIC79XX_RESET_DELAY_MS  	int "Initial bus reset delay in milli-seconds" diff --git a/drivers/scsi/aic7xxx/Kconfig.aic7xxx b/drivers/scsi/aic7xxx/Kconfig.aic7xxx index 88da670a791..55ac55ee606 100644 --- a/drivers/scsi/aic7xxx/Kconfig.aic7xxx +++ b/drivers/scsi/aic7xxx/Kconfig.aic7xxx @@ -36,8 +36,7 @@ config AIC7XXX_CMDS_PER_DEVICE  	on some devices.  The upper bound is 253.  0 disables tagged queueing.  	Per device tag depth can be controlled via the kernel command line -	"tag_info" option.  See drivers/scsi/aic7xxx/README.aic7xxx -	for details. +	"tag_info" option.  See Documentation/scsi/aic7xxx.txt for details.  config AIC7XXX_RESET_DELAY_MS  	int "Initial bus reset delay in milli-seconds" diff --git a/drivers/scsi/aic94xx/aic94xx_dump.c b/drivers/scsi/aic94xx/aic94xx_dump.c index 67eeba3bdb0..a16a77c8b9c 100644 --- a/drivers/scsi/aic94xx/aic94xx_dump.c +++ b/drivers/scsi/aic94xx/aic94xx_dump.c @@ -29,7 +29,7 @@   *   */ -#include "linux/pci.h" +#include <linux/pci.h>  #include "aic94xx.h"  #include "aic94xx_reg.h"  #include "aic94xx_reg_def.h" diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index bda999ad9f5..5e19a5f820e 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h @@ -27,7 +27,6 @@  #define __BFAD_DRV_H__  #include <linux/types.h> -#include <linux/version.h>  #include <linux/pci.h>  #include <linux/dma-mapping.h>  #include <linux/idr.h> diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 9ae80cd5953..dba72a4e6a1 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -563,7 +563,7 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,  	nopout_wqe->itt = ((u16)task->itt |  			   (ISCSI_TASK_TYPE_MPATH <<  			    ISCSI_TMF_REQUEST_TYPE_SHIFT)); -	nopout_wqe->ttt = nopout_hdr->ttt; +	nopout_wqe->ttt = be32_to_cpu(nopout_hdr->ttt);  	nopout_wqe->flags = 0;  	if (!unsol)  		nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION; diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index bd22041e278..1242c7c04a0 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -14,7 +14,6 @@  #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ -#include <linux/version.h>  #include <linux/module.h>  #include <linux/moduleparam.h>  #include <scsi/scsi_host.h> @@ -913,7 +912,7 @@ static void l2t_put(struct cxgbi_sock *csk)  	struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;  	if (csk->l2t) { -		l2t_release(L2DATA(t3dev), csk->l2t); +		l2t_release(t3dev, csk->l2t);  		csk->l2t = NULL;  		cxgbi_sock_put(csk);  	} diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index ae13c4993aa..31c79bde697 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -13,7 +13,6 @@  #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ -#include <linux/version.h>  #include <linux/module.h>  #include <linux/moduleparam.h>  #include <scsi/scsi_host.h> diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index ba710e350ac..34c8d82b742 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -18,7 +18,6 @@   */  #include <linux/module.h> -#include <linux/version.h>  #include <linux/spinlock.h>  #include <linux/netdevice.h>  #include <linux/etherdevice.h> @@ -432,6 +431,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)  	u8 flogi_maddr[ETH_ALEN];  	const struct net_device_ops *ops; +	rtnl_lock(); +  	/*  	 * Don't listen for Ethernet packets anymore.  	 * synchronize_net() ensures that the packet handlers are not running @@ -461,6 +462,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)  					" specific feature for LLD.\n");  	} +	rtnl_unlock(); +  	/* Release the self-reference taken during fcoe_interface_create() */  	fcoe_interface_put(fcoe);  } @@ -1951,11 +1954,8 @@ static void fcoe_destroy_work(struct work_struct *work)  	fcoe_if_destroy(port->lport);  	/* Do not tear down the fcoe interface for NPIV port */ -	if (!npiv) { -		rtnl_lock(); +	if (!npiv)  		fcoe_interface_cleanup(fcoe); -		rtnl_unlock(); -	}  	mutex_unlock(&fcoe_config_mutex);  } @@ -2009,8 +2009,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)  		printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",  		       netdev->name);  		rc = -EIO; +		rtnl_unlock();  		fcoe_interface_cleanup(fcoe); -		goto out_nodev; +		goto out_nortnl;  	}  	/* Make this the "master" N_Port */ @@ -2027,6 +2028,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)  out_nodev:  	rtnl_unlock(); +out_nortnl:  	mutex_unlock(&fcoe_config_mutex);  	return rc;  } diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index ec61bdb833a..b200b736b00 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -676,6 +676,16 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,  	BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);  	removed[*nremoved] = h->dev[entry];  	(*nremoved)++; + +	/* +	 * New physical devices won't have target/lun assigned yet +	 * so we need to preserve the values in the slot we are replacing. +	 */ +	if (new_entry->target == -1) { +		new_entry->target = h->dev[entry]->target; +		new_entry->lun = h->dev[entry]->lun; +	} +  	h->dev[entry] = new_entry;  	added[*nadded] = new_entry;  	(*nadded)++; @@ -1548,10 +1558,17 @@ static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,  }  static int hpsa_update_device_info(struct ctlr_info *h, -	unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device) +	unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, +	unsigned char *is_OBDR_device)  { -#define OBDR_TAPE_INQ_SIZE 49 + +#define OBDR_SIG_OFFSET 43 +#define OBDR_TAPE_SIG "$DR-10" +#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) +#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) +  	unsigned char *inq_buff; +	unsigned char *obdr_sig;  	inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);  	if (!inq_buff) @@ -1583,6 +1600,16 @@ static int hpsa_update_device_info(struct ctlr_info *h,  	else  		this_device->raid_level = RAID_UNKNOWN; +	if (is_OBDR_device) { +		/* See if this is a One-Button-Disaster-Recovery device +		 * by looking for "$DR-10" at offset 43 in inquiry data. +		 */ +		obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; +		*is_OBDR_device = (this_device->devtype == TYPE_ROM && +					strncmp(obdr_sig, OBDR_TAPE_SIG, +						OBDR_SIG_LEN) == 0); +	} +  	kfree(inq_buff);  	return 0; @@ -1716,7 +1743,7 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,  		return 0;  	} -	if (hpsa_update_device_info(h, scsi3addr, this_device)) +	if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))  		return 0;  	(*nmsa2xxx_enclosures)++;  	hpsa_set_bus_target_lun(this_device, bus, target, 0); @@ -1808,7 +1835,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)  	 */  	struct ReportLUNdata *physdev_list = NULL;  	struct ReportLUNdata *logdev_list = NULL; -	unsigned char *inq_buff = NULL;  	u32 nphysicals = 0;  	u32 nlogicals = 0;  	u32 ndev_allocated = 0; @@ -1824,11 +1850,9 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)  		GFP_KERNEL);  	physdev_list = kzalloc(reportlunsize, GFP_KERNEL);  	logdev_list = kzalloc(reportlunsize, GFP_KERNEL); -	inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);  	tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); -	if (!currentsd || !physdev_list || !logdev_list || -		!inq_buff || !tmpdevice) { +	if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {  		dev_err(&h->pdev->dev, "out of memory\n");  		goto out;  	} @@ -1863,7 +1887,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)  	/* adjust our table of devices */  	nmsa2xxx_enclosures = 0;  	for (i = 0; i < nphysicals + nlogicals + 1; i++) { -		u8 *lunaddrbytes; +		u8 *lunaddrbytes, is_OBDR = 0;  		/* Figure out where the LUN ID info is coming from */  		lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, @@ -1874,7 +1898,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)  			continue;  		/* Get device type, vendor, model, device id */ -		if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice)) +		if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice, +							&is_OBDR))  			continue; /* skip it if we can't talk to it. */  		figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,  			tmpdevice); @@ -1898,7 +1923,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)  		hpsa_set_bus_target_lun(this_device, bus, target, lun);  		switch (this_device->devtype) { -		case TYPE_ROM: { +		case TYPE_ROM:  			/* We don't *really* support actual CD-ROM devices,  			 * just "One Button Disaster Recovery" tape drive  			 * which temporarily pretends to be a CD-ROM drive. @@ -1906,15 +1931,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)  			 * device by checking for "$DR-10" in bytes 43-48 of  			 * the inquiry data.  			 */ -				char obdr_sig[7]; -#define OBDR_TAPE_SIG "$DR-10" -				strncpy(obdr_sig, &inq_buff[43], 6); -				obdr_sig[6] = '\0'; -				if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0) -					/* Not OBDR device, ignore it. */ -					break; -			} -			ncurrent++; +			if (is_OBDR) +				ncurrent++;  			break;  		case TYPE_DISK:  			if (i < nphysicals) @@ -1947,7 +1965,6 @@ out:  	for (i = 0; i < ndev_allocated; i++)  		kfree(currentsd[i]);  	kfree(currentsd); -	kfree(inq_buff);  	kfree(physdev_list);  	kfree(logdev_list);  } diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index 26072f1e985..6981b773a88 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -531,6 +531,9 @@ static void sci_controller_process_completions(struct isci_host *ihost)  			break;  		case SCU_COMPLETION_TYPE_EVENT: +			sci_controller_event_completion(ihost, ent); +			break; +  		case SCU_COMPLETION_TYPE_NOTIFY: {  			event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<  				       (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); @@ -1091,6 +1094,7 @@ static void isci_host_completion_routine(unsigned long data)  	struct isci_request *request;  	struct isci_request *next_request;  	struct sas_task     *task; +	u16 active;  	INIT_LIST_HEAD(&completed_request_list);  	INIT_LIST_HEAD(&errored_request_list); @@ -1181,6 +1185,13 @@ static void isci_host_completion_routine(unsigned long data)  		}  	} +	/* the coalesence timeout doubles at each encoding step, so +	 * update it based on the ilog2 value of the outstanding requests +	 */ +	active = isci_tci_active(ihost); +	writel(SMU_ICC_GEN_VAL(NUMBER, active) | +	       SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)), +	       &ihost->smu_registers->interrupt_coalesce_control);  }  /** @@ -1471,7 +1482,7 @@ static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)  	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);  	/* set the default interrupt coalescence number and timeout value. */ -	sci_controller_set_interrupt_coalescence(ihost, 0x10, 250); +	sci_controller_set_interrupt_coalescence(ihost, 0, 0);  }  static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm) diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index 062101a39f7..9f33831a2f0 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -369,6 +369,9 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev)  #define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1))  #define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1)) +/* interrupt coalescing baseline: 9 == 3 to 5us interrupt delay per command */ +#define ISCI_COALESCE_BASE 9 +  /* expander attached sata devices require 3 rnc slots */  static inline int sci_remote_device_node_count(struct isci_remote_device *idev)  { diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 61e0d09e2b5..29aa34efb0f 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -59,10 +59,19 @@  #include <linux/firmware.h>  #include <linux/efi.h>  #include <asm/string.h> +#include <scsi/scsi_host.h>  #include "isci.h"  #include "task.h"  #include "probe_roms.h" +#define MAJ 1 +#define MIN 0 +#define BUILD 0 +#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ +	__stringify(BUILD) + +MODULE_VERSION(DRV_VERSION); +  static struct scsi_transport_template *isci_transport_template;  static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = { @@ -113,6 +122,22 @@ unsigned char max_concurr_spinup = 1;  module_param(max_concurr_spinup, byte, 0);  MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup"); +static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) +{ +	struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); +	struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); +	struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha); + +	return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id); +} + +static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL); + +struct device_attribute *isci_host_attrs[] = { +	&dev_attr_isci_id, +	NULL +}; +  static struct scsi_host_template isci_sht = {  	.module				= THIS_MODULE, @@ -138,6 +163,7 @@ static struct scsi_host_template isci_sht = {  	.slave_alloc			= sas_slave_alloc,  	.target_destroy			= sas_target_destroy,  	.ioctl				= sas_ioctl, +	.shost_attrs			= isci_host_attrs,  };  static struct sas_domain_function_template isci_transport_ops  = { @@ -232,17 +258,6 @@ static int isci_register_sas_ha(struct isci_host *isci_host)  	return 0;  } -static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) -{ -	struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); -	struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); -	struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha); - -	return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id); -} - -static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL); -  static void isci_unregister(struct isci_host *isci_host)  {  	struct Scsi_Host *shost; @@ -251,7 +266,6 @@ static void isci_unregister(struct isci_host *isci_host)  		return;  	shost = isci_host->shost; -	device_remove_file(&shost->shost_dev, &dev_attr_isci_id);  	sas_unregister_ha(&isci_host->sas_ha); @@ -415,14 +429,8 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)  	if (err)  		goto err_shost_remove; -	err = device_create_file(&shost->shost_dev, &dev_attr_isci_id); -	if (err) -		goto err_unregister_ha; -  	return isci_host; - err_unregister_ha: -	sas_unregister_ha(&(isci_host->sas_ha));   err_shost_remove:  	scsi_remove_host(shost);   err_shost: @@ -540,7 +548,8 @@ static __init int isci_init(void)  {  	int err; -	pr_info("%s: Intel(R) C600 SAS Controller Driver\n", DRV_NAME); +	pr_info("%s: Intel(R) C600 SAS Controller Driver - version %s\n", +		DRV_NAME, DRV_VERSION);  	isci_transport_template = sas_domain_attach_transport(&isci_transport_ops);  	if (!isci_transport_template) diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c index 79313a7a235..09e61134037 100644 --- a/drivers/scsi/isci/phy.c +++ b/drivers/scsi/isci/phy.c @@ -104,6 +104,7 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,  	u32 parity_count = 0;  	u32 llctl, link_rate;  	u32 clksm_value = 0; +	u32 sp_timeouts = 0;  	iphy->link_layer_registers = reg; @@ -211,6 +212,18 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,  	llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);  	writel(llctl, &iphy->link_layer_registers->link_layer_control); +	sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts); + +	/* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */ +	sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF); + +	/* Set RATE_CHANGE timeout value to 0x3B (59us).  This ensures SCU can +	 * lock with 3Gb drive when SCU max rate is set to 1.5Gb. +	 */ +	sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B); + +	writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts); +  	if (is_a2(ihost->pdev)) {  		/* Program the max ARB time for the PHY to 700us so we inter-operate with  		 * the PMC expander which shuts down PHYs if the expander PHY generates too @@ -695,7 +708,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)  				 __func__,  				 event_code); -			return SCI_FAILURE;; +			return SCI_FAILURE;  		}  		return SCI_SUCCESS;  	case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h index 9b266c7428e..00afc738bbe 100644 --- a/drivers/scsi/isci/registers.h +++ b/drivers/scsi/isci/registers.h @@ -1299,6 +1299,18 @@ struct scu_transport_layer_registers {  #define SCU_AFE_XCVRCR_OFFSET       0x00DC  #define SCU_AFE_LUTCR_OFFSET        0x00E0 +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_SHIFT          (0UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_MASK           (0x000000FFUL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_SHIFT                 (8UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_MASK                  (0x0000FF00UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_SHIFT         (16UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_MASK          (0x00FF0000UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_SHIFT              (24UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_MASK               (0xFF000000UL) + +#define SCU_SAS_PHYTOV_GEN_VAL(name, value) \ +	SCU_GEN_VALUE(SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_##name, value) +  #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT                  (0)  #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK                   (0x00000003)  #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1                   (0) diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index a46e07ac789..b5d3a8c4d32 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -732,12 +732,20 @@ sci_io_request_terminate(struct isci_request *ireq)  		sci_change_state(&ireq->sm, SCI_REQ_ABORTING);  		return SCI_SUCCESS;  	case SCI_REQ_TASK_WAIT_TC_RESP: +		/* The task frame was already confirmed to have been +		 * sent by the SCU HW.  Since the state machine is +		 * now only waiting for the task response itself, +		 * abort the request and complete it immediately +		 * and don't wait for the task response. +		 */  		sci_change_state(&ireq->sm, SCI_REQ_ABORTING);  		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);  		return SCI_SUCCESS;  	case SCI_REQ_ABORTING: -		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); -		return SCI_SUCCESS; +		/* If a request has a termination requested twice, return +		 * a failure indication, since HW confirmation of the first +		 * abort is still outstanding. +		 */  	case SCI_REQ_COMPLETED:  	default:  		dev_warn(&ireq->owning_controller->pdev->dev, @@ -2399,22 +2407,19 @@ static void isci_task_save_for_upper_layer_completion(  	}  } -static void isci_request_process_stp_response(struct sas_task *task, -					      void *response_buffer) +static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)  { -	struct dev_to_host_fis *d2h_reg_fis = response_buffer;  	struct task_status_struct *ts = &task->task_status;  	struct ata_task_resp *resp = (void *)&ts->buf[0]; -	resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6)); -	memcpy(&resp->ending_fis[0], response_buffer + 16, 24); +	resp->frame_len = sizeof(*fis); +	memcpy(resp->ending_fis, fis, sizeof(*fis));  	ts->buf_valid_size = sizeof(*resp); -	/** -	 * If the device fault bit is set in the status register, then +	/* If the device fault bit is set in the status register, then  	 * set the sense data and return.  	 */ -	if (d2h_reg_fis->status & ATA_DF) +	if (fis->status & ATA_DF)  		ts->stat = SAS_PROTO_RESPONSE;  	else  		ts->stat = SAM_STAT_GOOD; @@ -2428,7 +2433,6 @@ static void isci_request_io_request_complete(struct isci_host *ihost,  {  	struct sas_task *task = isci_request_access_task(request);  	struct ssp_response_iu *resp_iu; -	void *resp_buf;  	unsigned long task_flags;  	struct isci_remote_device *idev = isci_lookup_device(task->dev);  	enum service_response response       = SAS_TASK_UNDELIVERED; @@ -2565,9 +2569,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,  				task);  			if (sas_protocol_ata(task->task_proto)) { -				resp_buf = &request->stp.rsp; -				isci_request_process_stp_response(task, -								  resp_buf); +				isci_process_stp_response(task, &request->stp.rsp);  			} else if (SAS_PROTOCOL_SSP == task->task_proto) {  				/* crack the iu response buffer. */ diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c index e9e1e2abacb..16f88ab939c 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.c +++ b/drivers/scsi/isci/unsolicited_frame_control.c @@ -72,7 +72,7 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)  	 */  	buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;  	header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header); -	size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(dma_addr_t); +	size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]);  	/*  	 * The Unsolicited Frame buffers are set at the start of the UF diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h index 31cb9506f52..75d896686f5 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.h +++ b/drivers/scsi/isci/unsolicited_frame_control.h @@ -214,7 +214,7 @@ struct sci_uf_address_table_array {  	 * starting address of the UF address table.  	 * 64-bit pointers are required by the hardware.  	 */ -	dma_addr_t *array; +	u64 *array;  	/**  	 * This field specifies the physical address location for the UF diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 01ff082dc34..d261e982a2f 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -494,6 +494,9 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,  	 */  	error = lport->tt.frame_send(lport, fp); +	if (fh->fh_type == FC_TYPE_BLS) +		return error; +  	/*  	 * Update the exchange and sequence flags,  	 * assuming all frames for the sequence have been sent. @@ -575,42 +578,35 @@ static void fc_seq_set_resp(struct fc_seq *sp,  }  /** - * fc_seq_exch_abort() - Abort an exchange and sequence - * @req_sp:	The sequence to be aborted + * fc_exch_abort_locked() - Abort an exchange + * @ep:	The exchange to be aborted   * @timer_msec: The period of time to wait before aborting   * - * Generally called because of a timeout or an abort from the upper layer. + * Locking notes:  Called with exch lock held + * + * Return value: 0 on success else error code   */ -static int fc_seq_exch_abort(const struct fc_seq *req_sp, -			     unsigned int timer_msec) +static int fc_exch_abort_locked(struct fc_exch *ep, +				unsigned int timer_msec)  {  	struct fc_seq *sp; -	struct fc_exch *ep;  	struct fc_frame *fp;  	int error; -	ep = fc_seq_exch(req_sp); - -	spin_lock_bh(&ep->ex_lock);  	if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) || -	    ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) { -		spin_unlock_bh(&ep->ex_lock); +	    ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP))  		return -ENXIO; -	}  	/*  	 * Send the abort on a new sequence if possible.  	 */  	sp = fc_seq_start_next_locked(&ep->seq); -	if (!sp) { -		spin_unlock_bh(&ep->ex_lock); +	if (!sp)  		return -ENOMEM; -	}  	ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;  	if (timer_msec)  		fc_exch_timer_set_locked(ep, timer_msec); -	spin_unlock_bh(&ep->ex_lock);  	/*  	 * If not logged into the fabric, don't send ABTS but leave @@ -633,6 +629,28 @@ static int fc_seq_exch_abort(const struct fc_seq *req_sp,  }  /** + * fc_seq_exch_abort() - Abort an exchange and sequence + * @req_sp:	The sequence to be aborted + * @timer_msec: The period of time to wait before aborting + * + * Generally called because of a timeout or an abort from the upper layer. + * + * Return value: 0 on success else error code + */ +static int fc_seq_exch_abort(const struct fc_seq *req_sp, +			     unsigned int timer_msec) +{ +	struct fc_exch *ep; +	int error; + +	ep = fc_seq_exch(req_sp); +	spin_lock_bh(&ep->ex_lock); +	error = fc_exch_abort_locked(ep, timer_msec); +	spin_unlock_bh(&ep->ex_lock); +	return error; +} + +/**   * fc_exch_timeout() - Handle exchange timer expiration   * @work: The work_struct identifying the exchange that timed out   */ @@ -1715,6 +1733,7 @@ static void fc_exch_reset(struct fc_exch *ep)  	int rc = 1;  	spin_lock_bh(&ep->ex_lock); +	fc_exch_abort_locked(ep, 0);  	ep->state |= FC_EX_RST_CLEANUP;  	if (cancel_delayed_work(&ep->timeout_work))  		atomic_dec(&ep->ex_refcnt);	/* drop hold for timer */ @@ -1962,6 +1981,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,  	struct fc_exch *ep;  	struct fc_seq *sp = NULL;  	struct fc_frame_header *fh; +	struct fc_fcp_pkt *fsp = NULL;  	int rc = 1;  	ep = fc_exch_alloc(lport, fp); @@ -1984,8 +2004,10 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,  	fc_exch_setup_hdr(ep, fp, ep->f_ctl);  	sp->cnt++; -	if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) +	if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) { +		fsp = fr_fsp(fp);  		fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); +	}  	if (unlikely(lport->tt.frame_send(lport, fp)))  		goto err; @@ -1999,7 +2021,8 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,  	spin_unlock_bh(&ep->ex_lock);  	return sp;  err: -	fc_fcp_ddp_done(fr_fsp(fp)); +	if (fsp) +		fc_fcp_ddp_done(fsp);  	rc = fc_exch_done_locked(ep);  	spin_unlock_bh(&ep->ex_lock);  	if (!rc) diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index afb63c84314..4c41ee816f0 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -2019,6 +2019,11 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)  	struct fc_fcp_internal *si;  	int rc = FAILED;  	unsigned long flags; +	int rval; + +	rval = fc_block_scsi_eh(sc_cmd); +	if (rval) +		return rval;  	lport = shost_priv(sc_cmd->device->host);  	if (lport->state != LPORT_ST_READY) @@ -2068,9 +2073,9 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)  	int rc = FAILED;  	int rval; -	rval = fc_remote_port_chkready(rport); +	rval = fc_block_scsi_eh(sc_cmd);  	if (rval) -		goto out; +		return rval;  	lport = shost_priv(sc_cmd->device->host); @@ -2116,6 +2121,8 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)  	FC_SCSI_DBG(lport, "Resetting host\n"); +	fc_block_scsi_eh(sc_cmd); +  	lport->tt.lport_reset(lport);  	wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;  	while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies, diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index e55ed9cf23f..628f347404f 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -88,6 +88,7 @@   */  #include <linux/timer.h> +#include <linux/delay.h>  #include <linux/slab.h>  #include <asm/unaligned.h> @@ -1029,8 +1030,16 @@ static void fc_lport_enter_reset(struct fc_lport *lport)  			   FCH_EVT_LIPRESET, 0);  	fc_vports_linkchange(lport);  	fc_lport_reset_locked(lport); -	if (lport->link_up) +	if (lport->link_up) { +		/* +		 * Wait upto resource allocation time out before +		 * doing re-login since incomplete FIP exchanged +		 * from last session may collide with exchanges +		 * in new session. +		 */ +		msleep(lport->r_a_tov);  		fc_lport_enter_flogi(lport); +	}  }  /** diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index f84084bba2f..16ad97df5ba 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -1721,7 +1721,7 @@ static int sas_find_bcast_dev(struct domain_device *dev,  	list_for_each_entry(ch, &ex->children, siblings) {  		if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) {  			res = sas_find_bcast_dev(ch, src_dev); -			if (src_dev) +			if (*src_dev)  				return res;  		}  	} @@ -1769,10 +1769,12 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,  		sas_disable_routing(parent, phy->attached_sas_addr);  	}  	memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); -	sas_port_delete_phy(phy->port, phy->phy); -	if (phy->port->num_phys == 0) -		sas_port_delete(phy->port); -	phy->port = NULL; +	if (phy->port) { +		sas_port_delete_phy(phy->port, phy->phy); +		if (phy->port->num_phys == 0) +			sas_port_delete(phy->port); +		phy->port = NULL; +	}  }  static int sas_discover_bfs_by_root_level(struct domain_device *root, diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index 2e6619eff3e..8883ca36f93 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -67,7 +67,7 @@   *   * NEC	MegaRAID PCI Express ROMB	1000	0408	1033	8287   * - * For history of changes, see Documentation/ChangeLog.megaraid + * For history of changes, see Documentation/scsi/ChangeLog.megaraid   */  #include <linux/slab.h> diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 83035bd1c48..6825772cfd6 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -42,7 +42,6 @@   * USA.   */ -#include <linux/version.h>  #include <linux/kernel.h>  #include <linux/module.h>  #include <linux/errno.h> diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c index 6861244249a..2b1101076cf 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_config.c +++ b/drivers/scsi/mpt2sas/mpt2sas_config.c @@ -41,7 +41,6 @@   * USA.   */ -#include <linux/version.h>  #include <linux/module.h>  #include <linux/kernel.h>  #include <linux/init.h> diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index 38ed0260959..246d5fbc6e5 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -42,7 +42,6 @@   * USA.   */ -#include <linux/version.h>  #include <linux/kernel.h>  #include <linux/module.h>  #include <linux/errno.h> diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 6abd2fcc43e..5202de3f3d3 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -41,7 +41,6 @@   * USA.   */ -#include <linux/version.h>  #include <linux/module.h>  #include <linux/kernel.h>  #include <linux/init.h> diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h index 44d7885a4a1..44b47451322 100644 --- a/drivers/scsi/mvsas/mv_sas.h +++ b/drivers/scsi/mvsas/mv_sas.h @@ -43,7 +43,6 @@  #include <scsi/scsi.h>  #include <scsi/scsi_tcq.h>  #include <scsi/sas_ata.h> -#include <linux/version.h>  #include "mv_defs.h"  #define DRV_NAME		"mvsas" diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index d079f9a3c6b..b86db84d6f3 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -39,7 +39,6 @@  #include <linux/module.h>  #include <linux/moduleparam.h>  #include <linux/hdreg.h> -#include <linux/version.h>  #include <linux/io.h>  #include <linux/slab.h>  #include <asm/irq.h> diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h index f920baf3ff2..ca496c7474e 100644 --- a/drivers/scsi/pmcraid.h +++ b/drivers/scsi/pmcraid.h @@ -24,7 +24,6 @@  #ifndef _PMCRAID_H  #define _PMCRAID_H -#include <linux/version.h>  #include <linux/types.h>  #include <linux/completion.h>  #include <linux/list.h> diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 7836eb01c7f..a31e05f3bfd 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1786,13 +1786,16 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)  			fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);  	} -	if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { +	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {  		if (ha->fw_attributes & BIT_4) { +			int prot = 0;  			vha->flags.difdix_supported = 1;  			ql_dbg(ql_dbg_user, vha, 0x7082,  			    "Registered for DIF/DIX type 1 and 3 protection.\n"); +			if (ql2xenabledif == 1) +				prot = SHOST_DIX_TYPE0_PROTECTION;  			scsi_host_set_prot(vha->host, -			    SHOST_DIF_TYPE1_PROTECTION +			    prot | SHOST_DIF_TYPE1_PROTECTION  			    | SHOST_DIF_TYPE2_PROTECTION  			    | SHOST_DIF_TYPE3_PROTECTION  			    | SHOST_DIX_TYPE1_PROTECTION diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 2155071f310..d79cd8a5f83 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -8,24 +8,24 @@  /*   * Table for showing the current message id in use for particular level   * Change this table for addition of log/debug messages. - * ----------------------------------------------------- - * |             Level            |   Last Value Used  | - * ----------------------------------------------------- - * | Module Init and Probe        |       0x0116       | - * | Mailbox commands             |       0x111e       | - * | Device Discovery             |       0x2083       | - * | Queue Command and IO tracing |       0x302e       | - * | DPC Thread                   |       0x401c       | - * | Async Events                 |       0x5059       | - * | Timer Routines               |       0x600d       | - * | User Space Interactions      |       0x709c       | - * | Task Management              |       0x8043       | - * | AER/EEH                      |       0x900f       | - * | Virtual Port                 |       0xa007       | - * | ISP82XX Specific             |       0xb027       | - * | MultiQ                       |       0xc00b       | - * | Misc                         |       0xd00b       | - * ----------------------------------------------------- + * ---------------------------------------------------------------------- + * |             Level            |   Last Value Used  |     Holes	| + * ---------------------------------------------------------------------- + * | Module Init and Probe        |       0x0116       |  		| + * | Mailbox commands             |       0x1126       |		| + * | Device Discovery             |       0x2083       |		| + * | Queue Command and IO tracing |       0x302e       |     0x3008     | + * | DPC Thread                   |       0x401c       |		| + * | Async Events                 |       0x5059       |		| + * | Timer Routines               |       0x600d       |		| + * | User Space Interactions      |       0x709d       |		| + * | Task Management              |       0x8041       |    		| + * | AER/EEH                      |       0x900f       |		| + * | Virtual Port                 |       0xa007       |		| + * | ISP82XX Specific             |       0xb04f       |    		| + * | MultiQ                       |       0xc00b       |		| + * | Misc                         |       0xd00b       |		| + * ----------------------------------------------------------------------   */  #include "qla_def.h" diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index cc5a79259d3..a03eaf40f37 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2529,6 +2529,7 @@ struct qla_hw_data {  #define DT_ISP8021			BIT_14  #define DT_ISP_LAST			(DT_ISP8021 << 1) +#define DT_T10_PI                       BIT_25  #define DT_IIDMA                        BIT_26  #define DT_FWI2                         BIT_27  #define DT_ZIO_SUPPORTED                BIT_28 @@ -2572,6 +2573,7 @@ struct qla_hw_data {  #define IS_NOCACHE_VPD_TYPE(ha)	(IS_QLA81XX(ha))  #define IS_ALOGIO_CAPABLE(ha)	(IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) +#define IS_T10_PI_CAPABLE(ha)   ((ha)->device_type & DT_T10_PI)  #define IS_IIDMA_CAPABLE(ha)    ((ha)->device_type & DT_IIDMA)  #define IS_FWI2_CAPABLE(ha)     ((ha)->device_type & DT_FWI2)  #define IS_ZIO_SUPPORTED(ha)    ((ha)->device_type & DT_ZIO_SUPPORTED) diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 691783abfb6..aa69486dc06 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -537,6 +537,11 @@ struct sts_entry_24xx {  	/*  	 * If DIF Error is set in comp_status, these additional fields are  	 * defined: +	 * +	 * !!! NOTE: Firmware sends expected/actual DIF data in big endian +	 * format; but all of the "data" field gets swab32-d in the beginning +	 * of qla2x00_status_entry(). +	 *  	 * &data[10] : uint8_t report_runt_bg[2];	- computed guard  	 * &data[12] : uint8_t actual_dif[8];		- DIF Data received  	 * &data[20] : uint8_t expected_dif[8];		- DIF Data computed diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index def694271bf..37da04d3db2 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -3838,15 +3838,12 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)  		req = vha->req;  	rsp = req->rsp; -	atomic_set(&vha->loop_state, LOOP_UPDATE);  	clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);  	if (vha->flags.online) {  		if (!(rval = qla2x00_fw_ready(vha))) {  			/* Wait at most MAX_TARGET RSCNs for a stable link. */  			wait_time = 256;  			do { -				atomic_set(&vha->loop_state, LOOP_UPDATE); -  				/* Issue a marker after FW becomes ready. */  				qla2x00_marker(vha, req, rsp, 0, 0,  					MK_SYNC_ALL); diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index d2e904bc21c..9902834e0b7 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -102,3 +102,32 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state)  		    fcport->d_id.b.al_pa);  	}  } + +static inline int +qla2x00_hba_err_chk_enabled(srb_t *sp) +{ +	/* +	 * Uncomment when corresponding SCSI changes are done. +	 * +	if (!sp->cmd->prot_chk) +		return 0; +	 * +	 */ + +	switch (scsi_get_prot_op(sp->cmd)) { +	case SCSI_PROT_READ_STRIP: +	case SCSI_PROT_WRITE_INSERT: +		if (ql2xenablehba_err_chk >= 1) +			return 1; +		break; +	case SCSI_PROT_READ_PASS: +	case SCSI_PROT_WRITE_PASS: +		if (ql2xenablehba_err_chk >= 2) +			return 1; +		break; +	case SCSI_PROT_READ_INSERT: +	case SCSI_PROT_WRITE_STRIP: +		return 1; +	} +	return 0; +} diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 49d6906af88..dbec89622a0 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -709,20 +709,28 @@ struct fw_dif_context {   *   */  static inline void -qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, +qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,      unsigned int protcnt)  { -	struct sd_dif_tuple *spt; +	struct scsi_cmnd *cmd = sp->cmd;  	scsi_qla_host_t *vha = shost_priv(cmd->device->host); -	unsigned char op = scsi_get_prot_op(cmd);  	switch (scsi_get_prot_type(cmd)) { -	/* For TYPE 0 protection: no checking */  	case SCSI_PROT_DIF_TYPE0: -		pkt->ref_tag_mask[0] = 0x00; -		pkt->ref_tag_mask[1] = 0x00; -		pkt->ref_tag_mask[2] = 0x00; -		pkt->ref_tag_mask[3] = 0x00; +		/* +		 * No check for ql2xenablehba_err_chk, as it would be an +		 * I/O error if hba tag generation is not done. +		 */ +		pkt->ref_tag = cpu_to_le32((uint32_t) +		    (0xffffffff & scsi_get_lba(cmd))); + +		if (!qla2x00_hba_err_chk_enabled(sp)) +			break; + +		pkt->ref_tag_mask[0] = 0xff; +		pkt->ref_tag_mask[1] = 0xff; +		pkt->ref_tag_mask[2] = 0xff; +		pkt->ref_tag_mask[3] = 0xff;  		break;  	/* @@ -730,20 +738,16 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,  	 * match LBA in CDB + N  	 */  	case SCSI_PROT_DIF_TYPE2: -		if (!ql2xenablehba_err_chk) -			break; - -		if (scsi_prot_sg_count(cmd)) { -			spt = page_address(sg_page(scsi_prot_sglist(cmd))) + -			    scsi_prot_sglist(cmd)[0].offset; -			pkt->app_tag = swab32(spt->app_tag); -			pkt->app_tag_mask[0] =  0xff; -			pkt->app_tag_mask[1] =  0xff; -		} +		pkt->app_tag = __constant_cpu_to_le16(0); +		pkt->app_tag_mask[0] = 0x0; +		pkt->app_tag_mask[1] = 0x0;  		pkt->ref_tag = cpu_to_le32((uint32_t)  		    (0xffffffff & scsi_get_lba(cmd))); +		if (!qla2x00_hba_err_chk_enabled(sp)) +			break; +  		/* enable ALL bytes of the ref tag */  		pkt->ref_tag_mask[0] = 0xff;  		pkt->ref_tag_mask[1] = 0xff; @@ -763,26 +767,15 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,  	 * 16 bit app tag.  	 */  	case SCSI_PROT_DIF_TYPE1: -		if (!ql2xenablehba_err_chk) +		pkt->ref_tag = cpu_to_le32((uint32_t) +		    (0xffffffff & scsi_get_lba(cmd))); +		pkt->app_tag = __constant_cpu_to_le16(0); +		pkt->app_tag_mask[0] = 0x0; +		pkt->app_tag_mask[1] = 0x0; + +		if (!qla2x00_hba_err_chk_enabled(sp))  			break; -		if (protcnt && (op == SCSI_PROT_WRITE_STRIP || -		    op == SCSI_PROT_WRITE_PASS)) { -			spt = page_address(sg_page(scsi_prot_sglist(cmd))) + -			    scsi_prot_sglist(cmd)[0].offset; -			ql_dbg(ql_dbg_io, vha, 0x3008, -			    "LBA from user %p, lba = 0x%x for cmd=%p.\n", -			    spt, (int)spt->ref_tag, cmd); -			pkt->ref_tag = swab32(spt->ref_tag); -			pkt->app_tag_mask[0] = 0x0; -			pkt->app_tag_mask[1] = 0x0; -		} else { -			pkt->ref_tag = cpu_to_le32((uint32_t) -			    (0xffffffff & scsi_get_lba(cmd))); -			pkt->app_tag = __constant_cpu_to_le16(0); -			pkt->app_tag_mask[0] = 0x0; -			pkt->app_tag_mask[1] = 0x0; -		}  		/* enable ALL bytes of the ref tag */  		pkt->ref_tag_mask[0] = 0xff;  		pkt->ref_tag_mask[1] = 0xff; @@ -798,8 +791,162 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,  	    scsi_get_prot_type(cmd), cmd);  } +struct qla2_sgx { +	dma_addr_t		dma_addr;	/* OUT */ +	uint32_t		dma_len;	/* OUT */ + +	uint32_t		tot_bytes;	/* IN */ +	struct scatterlist	*cur_sg;	/* IN */ + +	/* for book keeping, bzero on initial invocation */ +	uint32_t		bytes_consumed; +	uint32_t		num_bytes; +	uint32_t		tot_partial; + +	/* for debugging */ +	uint32_t		num_sg; +	srb_t			*sp; +};  static int +qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, +	uint32_t *partial) +{ +	struct scatterlist *sg; +	uint32_t cumulative_partial, sg_len; +	dma_addr_t sg_dma_addr; + +	if (sgx->num_bytes == sgx->tot_bytes) +		return 0; + +	sg = sgx->cur_sg; +	cumulative_partial = sgx->tot_partial; + +	sg_dma_addr = sg_dma_address(sg); +	sg_len = sg_dma_len(sg); + +	sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed; + +	if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) { +		sgx->dma_len = (blk_sz - cumulative_partial); +		sgx->tot_partial = 0; +		sgx->num_bytes += blk_sz; +		*partial = 0; +	} else { +		sgx->dma_len = sg_len - sgx->bytes_consumed; +		sgx->tot_partial += sgx->dma_len; +		*partial = 1; +	} + +	sgx->bytes_consumed += sgx->dma_len; + +	if (sg_len == sgx->bytes_consumed) { +		sg = sg_next(sg); +		sgx->num_sg++; +		sgx->cur_sg = sg; +		sgx->bytes_consumed = 0; +	} + +	return 1; +} + +static int +qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, +	uint32_t *dsd, uint16_t tot_dsds) +{ +	void *next_dsd; +	uint8_t avail_dsds = 0; +	uint32_t dsd_list_len; +	struct dsd_dma *dsd_ptr; +	struct scatterlist *sg_prot; +	uint32_t *cur_dsd = dsd; +	uint16_t	used_dsds = tot_dsds; + +	uint32_t	prot_int; +	uint32_t	partial; +	struct qla2_sgx sgx; +	dma_addr_t	sle_dma; +	uint32_t	sle_dma_len, tot_prot_dma_len = 0; +	struct scsi_cmnd *cmd = sp->cmd; + +	prot_int = cmd->device->sector_size; + +	memset(&sgx, 0, sizeof(struct qla2_sgx)); +	sgx.tot_bytes = scsi_bufflen(sp->cmd); +	sgx.cur_sg = scsi_sglist(sp->cmd); +	sgx.sp = sp; + +	sg_prot = scsi_prot_sglist(sp->cmd); + +	while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { + +		sle_dma = sgx.dma_addr; +		sle_dma_len = sgx.dma_len; +alloc_and_fill: +		/* Allocate additional continuation packets? */ +		if (avail_dsds == 0) { +			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? +					QLA_DSDS_PER_IOCB : used_dsds; +			dsd_list_len = (avail_dsds + 1) * 12; +			used_dsds -= avail_dsds; + +			/* allocate tracking DS */ +			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); +			if (!dsd_ptr) +				return 1; + +			/* allocate new list */ +			dsd_ptr->dsd_addr = next_dsd = +			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, +				&dsd_ptr->dsd_list_dma); + +			if (!next_dsd) { +				/* +				 * Need to cleanup only this dsd_ptr, rest +				 * will be done by sp_free_dma() +				 */ +				kfree(dsd_ptr); +				return 1; +			} + +			list_add_tail(&dsd_ptr->list, +			    &((struct crc_context *)sp->ctx)->dsd_list); + +			sp->flags |= SRB_CRC_CTX_DSD_VALID; + +			/* add new list to cmd iocb or last list */ +			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); +			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); +			*cur_dsd++ = dsd_list_len; +			cur_dsd = (uint32_t *)next_dsd; +		} +		*cur_dsd++ = cpu_to_le32(LSD(sle_dma)); +		*cur_dsd++ = cpu_to_le32(MSD(sle_dma)); +		*cur_dsd++ = cpu_to_le32(sle_dma_len); +		avail_dsds--; + +		if (partial == 0) { +			/* Got a full protection interval */ +			sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len; +			sle_dma_len = 8; + +			tot_prot_dma_len += sle_dma_len; +			if (tot_prot_dma_len == sg_dma_len(sg_prot)) { +				tot_prot_dma_len = 0; +				sg_prot = sg_next(sg_prot); +			} + +			partial = 1; /* So as to not re-enter this block */ +			goto alloc_and_fill; +		} +	} +	/* Null termination */ +	*cur_dsd++ = 0; +	*cur_dsd++ = 0; +	*cur_dsd++ = 0; +	return 0; +} +static int  qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,  	uint16_t tot_dsds)  { @@ -981,7 +1128,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,  	struct scsi_cmnd	*cmd;  	struct scatterlist	*cur_seg;  	int			sgc; -	uint32_t		total_bytes; +	uint32_t		total_bytes = 0;  	uint32_t		data_bytes;  	uint32_t		dif_bytes;  	uint8_t			bundling = 1; @@ -1023,8 +1170,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,  		    __constant_cpu_to_le16(CF_READ_DATA);  	} -	tot_prot_dsds = scsi_prot_sg_count(cmd); -	if (!tot_prot_dsds) +	if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) || +	    (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) || +	    (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) || +	    (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT))  		bundling = 0;  	/* Allocate CRC context from global pool */ @@ -1047,7 +1196,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,  	INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); -	qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *) +	qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)  	    &crc_ctx_pkt->ref_tag, tot_prot_dsds);  	cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); @@ -1076,7 +1225,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,  		fcp_cmnd->additional_cdb_len |= 2;  	int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun); -	host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun));  	memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);  	cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);  	cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32( @@ -1107,15 +1255,28 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,  	cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */  	/* Compute dif len and adjust data len to incude protection */ -	total_bytes = data_bytes;  	dif_bytes = 0;  	blk_size = cmd->device->sector_size; -	if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { -		dif_bytes = (data_bytes / blk_size) * 8; -		total_bytes += dif_bytes; +	dif_bytes = (data_bytes / blk_size) * 8; + +	switch (scsi_get_prot_op(sp->cmd)) { +	case SCSI_PROT_READ_INSERT: +	case SCSI_PROT_WRITE_STRIP: +	    total_bytes = data_bytes; +	    data_bytes += dif_bytes; +	    break; + +	case SCSI_PROT_READ_STRIP: +	case SCSI_PROT_WRITE_INSERT: +	case SCSI_PROT_READ_PASS: +	case SCSI_PROT_WRITE_PASS: +	    total_bytes = data_bytes + dif_bytes; +	    break; +	default: +	    BUG();  	} -	if (!ql2xenablehba_err_chk) +	if (!qla2x00_hba_err_chk_enabled(sp))  		fw_prot_opts |= 0x10; /* Disable Guard tag checking */  	if (!bundling) { @@ -1151,7 +1312,12 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,  	cmd_pkt->control_flags |=  	    __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); -	if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, + +	if (!bundling && tot_prot_dsds) { +		if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, +		    cur_dsd, tot_dsds)) +			goto crc_queuing_error; +	} else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,  	    (tot_dsds - tot_prot_dsds)))  		goto crc_queuing_error; @@ -1414,6 +1580,22 @@ qla24xx_dif_start_scsi(srb_t *sp)  			goto queuing_error;  		else  			sp->flags |= SRB_DMA_VALID; + +		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || +		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { +			struct qla2_sgx sgx; +			uint32_t	partial; + +			memset(&sgx, 0, sizeof(struct qla2_sgx)); +			sgx.tot_bytes = scsi_bufflen(cmd); +			sgx.cur_sg = scsi_sglist(cmd); +			sgx.sp = sp; + +			nseg = 0; +			while (qla24xx_get_one_block_sg( +			    cmd->device->sector_size, &sgx, &partial)) +				nseg++; +		}  	} else  		nseg = 0; @@ -1428,6 +1610,11 @@ qla24xx_dif_start_scsi(srb_t *sp)  			goto queuing_error;  		else  			sp->flags |= SRB_CRC_PROT_DMA_VALID; + +		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || +		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { +			nseg = scsi_bufflen(cmd) / cmd->device->sector_size; +		}  	} else {  		nseg = 0;  	} @@ -1454,6 +1641,7 @@ qla24xx_dif_start_scsi(srb_t *sp)  	/* Build header part of command packet (excluding the OPCODE). */  	req->current_outstanding_cmd = handle;  	req->outstanding_cmds[handle] = sp; +	sp->handle = handle;  	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;  	req->cnt -= req_cnt; diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index b16b7725dee..8a7591f035e 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -719,7 +719,6 @@ skip_rio:  			vha->flags.rscn_queue_overflow = 1;  		} -		atomic_set(&vha->loop_state, LOOP_UPDATE);  		atomic_set(&vha->loop_down_timer, 0);  		vha->flags.management_server_logged_in = 0; @@ -1435,25 +1434,27 @@ struct scsi_dif_tuple {   * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST   * to indicate to the kernel that the HBA detected error.   */ -static inline void +static inline int  qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)  {  	struct scsi_qla_host *vha = sp->fcport->vha;  	struct scsi_cmnd *cmd = sp->cmd; -	struct scsi_dif_tuple	*ep = -			(struct scsi_dif_tuple *)&sts24->data[20]; -	struct scsi_dif_tuple	*ap = -			(struct scsi_dif_tuple *)&sts24->data[12]; +	uint8_t		*ap = &sts24->data[12]; +	uint8_t		*ep = &sts24->data[20];  	uint32_t	e_ref_tag, a_ref_tag;  	uint16_t	e_app_tag, a_app_tag;  	uint16_t	e_guard, a_guard; -	e_ref_tag = be32_to_cpu(ep->ref_tag); -	a_ref_tag = be32_to_cpu(ap->ref_tag); -	e_app_tag = be16_to_cpu(ep->app_tag); -	a_app_tag = be16_to_cpu(ap->app_tag); -	e_guard = be16_to_cpu(ep->guard); -	a_guard = be16_to_cpu(ap->guard); +	/* +	 * swab32 of the "data" field in the beginning of qla2x00_status_entry() +	 * would make guard field appear at offset 2 +	 */ +	a_guard   = le16_to_cpu(*(uint16_t *)(ap + 2)); +	a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); +	a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); +	e_guard   = le16_to_cpu(*(uint16_t *)(ep + 2)); +	e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); +	e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));  	ql_dbg(ql_dbg_io, vha, 0x3023,  	    "iocb(s) %p Returned STATUS.\n", sts24); @@ -1465,6 +1466,63 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)  	    cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,  	    a_app_tag, e_app_tag, a_guard, e_guard); +	/* +	 * Ignore sector if: +	 * For type     3: ref & app tag is all 'f's +	 * For type 0,1,2: app tag is all 'f's +	 */ +	if ((a_app_tag == 0xffff) && +	    ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || +	     (a_ref_tag == 0xffffffff))) { +		uint32_t blocks_done, resid; +		sector_t lba_s = scsi_get_lba(cmd); + +		/* 2TB boundary case covered automatically with this */ +		blocks_done = e_ref_tag - (uint32_t)lba_s + 1; + +		resid = scsi_bufflen(cmd) - (blocks_done * +		    cmd->device->sector_size); + +		scsi_set_resid(cmd, resid); +		cmd->result = DID_OK << 16; + +		/* Update protection tag */ +		if (scsi_prot_sg_count(cmd)) { +			uint32_t i, j = 0, k = 0, num_ent; +			struct scatterlist *sg; +			struct sd_dif_tuple *spt; + +			/* Patch the corresponding protection tags */ +			scsi_for_each_prot_sg(cmd, sg, +			    scsi_prot_sg_count(cmd), i) { +				num_ent = sg_dma_len(sg) / 8; +				if (k + num_ent < blocks_done) { +					k += num_ent; +					continue; +				} +				j = blocks_done - k - 1; +				k = blocks_done; +				break; +			} + +			if (k != blocks_done) { +				qla_printk(KERN_WARNING, sp->fcport->vha->hw, +				    "unexpected tag values tag:lba=%x:%llx)\n", +				    e_ref_tag, (unsigned long long)lba_s); +				return 1; +			} + +			spt = page_address(sg_page(sg)) + sg->offset; +			spt += j; + +			spt->app_tag = 0xffff; +			if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) +				spt->ref_tag = 0xffffffff; +		} + +		return 0; +	} +  	/* check guard */  	if (e_guard != a_guard) {  		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, @@ -1472,28 +1530,30 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)  		set_driver_byte(cmd, DRIVER_SENSE);  		set_host_byte(cmd, DID_ABORT);  		cmd->result |= SAM_STAT_CHECK_CONDITION << 1; -		return; +		return 1;  	} -	/* check appl tag */ -	if (e_app_tag != a_app_tag) { +	/* check ref tag */ +	if (e_ref_tag != a_ref_tag) {  		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, -		    0x10, 0x2); +		    0x10, 0x3);  		set_driver_byte(cmd, DRIVER_SENSE);  		set_host_byte(cmd, DID_ABORT);  		cmd->result |= SAM_STAT_CHECK_CONDITION << 1; -		return; +		return 1;  	} -	/* check ref tag */ -	if (e_ref_tag != a_ref_tag) { +	/* check appl tag */ +	if (e_app_tag != a_app_tag) {  		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, -		    0x10, 0x3); +		    0x10, 0x2);  		set_driver_byte(cmd, DRIVER_SENSE);  		set_host_byte(cmd, DID_ABORT);  		cmd->result |= SAM_STAT_CHECK_CONDITION << 1; -		return; +		return 1;  	} + +	return 1;  }  /** @@ -1767,7 +1827,7 @@ check_scsi_status:  		break;  	case CS_DIF_ERROR: -		qla2x00_handle_dif_error(sp, sts24); +		logit = qla2x00_handle_dif_error(sp, sts24);  		break;  	default:  		cp->result = DID_ERROR << 16; @@ -2468,11 +2528,10 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)  		goto skip_msi;  	} -	if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || -		!QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { +	if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {  		ql_log(ql_log_warn, vha, 0x0035,  		    "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", -		    ha->pdev->revision, ha->fw_attributes); +		    ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);  		goto skip_msix;  	} diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index c706ed37000..f488cc69fc7 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -472,7 +472,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)  	host->can_queue = base_vha->req->length + 128;  	host->this_id = 255;  	host->cmd_per_lun = 3; -	if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) +	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)  		host->max_cmd_len = 32;  	else  		host->max_cmd_len = MAX_CMDSZ; diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 5cbf33a50b1..049807cda41 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -2208,6 +2208,7 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)  	struct qla_hw_data *ha;  	struct rsp_que *rsp;  	struct device_reg_82xx __iomem *reg; +	unsigned long flags;  	rsp = (struct rsp_que *) dev_id;  	if (!rsp) { @@ -2218,11 +2219,11 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)  	ha = rsp->hw;  	reg = &ha->iobase->isp82; -	spin_lock_irq(&ha->hardware_lock); +	spin_lock_irqsave(&ha->hardware_lock, flags);  	vha = pci_get_drvdata(ha->pdev);  	qla24xx_process_response_queue(vha, rsp);  	WRT_REG_DWORD(®->host_int, 0); -	spin_unlock_irq(&ha->hardware_lock); +	spin_unlock_irqrestore(&ha->hardware_lock, flags);  	return IRQ_HANDLED;  } @@ -2838,6 +2839,16 @@ sufficient_dsds:  		int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);  		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); +		/* build FCP_CMND IU */ +		memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); +		int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun); +		ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; + +		if (cmd->sc_data_direction == DMA_TO_DEVICE) +			ctx->fcp_cmnd->additional_cdb_len |= 1; +		else if (cmd->sc_data_direction == DMA_FROM_DEVICE) +			ctx->fcp_cmnd->additional_cdb_len |= 2; +  		/*  		 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).  		 */ @@ -2854,16 +2865,6 @@ sufficient_dsds:  			}  		} -		/* build FCP_CMND IU */ -		memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); -		int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun); -		ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; - -		if (cmd->sc_data_direction == DMA_TO_DEVICE) -			ctx->fcp_cmnd->additional_cdb_len |= 1; -		else if (cmd->sc_data_direction == DMA_FROM_DEVICE) -			ctx->fcp_cmnd->additional_cdb_len |= 2; -  		memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);  		fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 + diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index e02df276804..1e69527f1e4 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -106,17 +106,21 @@ MODULE_PARM_DESC(ql2xmaxqdepth,  		"Maximum queue depth to report for target devices.");  /* Do not change the value of this after module load */ -int ql2xenabledif = 1; +int ql2xenabledif = 0;  module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);  MODULE_PARM_DESC(ql2xenabledif,  		" Enable T10-CRC-DIF " -		" Default is 0 - No DIF Support. 1 - Enable it"); +		" Default is 0 - No DIF Support. 1 - Enable it" +		", 2 - Enable DIF for all types, except Type 0."); -int ql2xenablehba_err_chk; +int ql2xenablehba_err_chk = 2;  module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);  MODULE_PARM_DESC(ql2xenablehba_err_chk, -		" Enable T10-CRC-DIF Error isolation by HBA" -		" Default is 0 - Error isolation disabled, 1 - Enable it"); +		" Enable T10-CRC-DIF Error isolation by HBA:\n" +		" Default is 1.\n" +		"  0 -- Error isolation disabled\n" +		"  1 -- Error isolation enabled only for DIX Type 0\n" +		"  2 -- Error isolation enabled for all Types\n");  int ql2xiidmaenable=1;  module_param(ql2xiidmaenable, int, S_IRUGO); @@ -909,7 +913,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)  		    "Abort command mbx success.\n");  		wait = 1;  	} + +	spin_lock_irqsave(&ha->hardware_lock, flags);  	qla2x00_sp_compl(ha, sp); +	spin_unlock_irqrestore(&ha->hardware_lock, flags); + +	/* Did the command return during mailbox execution? */ +	if (ret == FAILED && !CMD_SP(cmd)) +		ret = SUCCESS;  	/* Wait for the command to be returned. */  	if (wait) { @@ -1317,10 +1328,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)  					qla2x00_sp_compl(ha, sp);  				} else {  					ctx = sp->ctx; -					if (ctx->type == SRB_LOGIN_CMD || -					    ctx->type == SRB_LOGOUT_CMD) { -						ctx->u.iocb_cmd->free(sp); -					} else { +					if (ctx->type == SRB_ELS_CMD_RPT || +					    ctx->type == SRB_ELS_CMD_HST || +					    ctx->type == SRB_CT_CMD) {  						struct fc_bsg_job *bsg_job =  						    ctx->u.bsg_job;  						if (bsg_job->request->msgcode @@ -1332,6 +1342,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)  						kfree(sp->ctx);  						mempool_free(sp,  							ha->srb_mempool); +					} else { +						ctx->u.iocb_cmd->free(sp);  					}  				}  			} @@ -2251,7 +2263,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)  	host->this_id = 255;  	host->cmd_per_lun = 3;  	host->unique_id = host->host_no; -	if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) +	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)  		host->max_cmd_len = 32;  	else  		host->max_cmd_len = MAX_CMDSZ; @@ -2378,13 +2390,16 @@ skip_dpc:  	    "Detected hba at address=%p.\n",  	    ha); -	if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { +	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {  		if (ha->fw_attributes & BIT_4) { +			int prot = 0;  			base_vha->flags.difdix_supported = 1;  			ql_dbg(ql_dbg_init, base_vha, 0x00f1,  			    "Registering for DIF/DIX type 1 and 3 protection.\n"); +			if (ql2xenabledif == 1) +				prot = SHOST_DIX_TYPE0_PROTECTION;  			scsi_host_set_prot(host, -			    SHOST_DIF_TYPE1_PROTECTION +			    prot | SHOST_DIF_TYPE1_PROTECTION  			    | SHOST_DIF_TYPE2_PROTECTION  			    | SHOST_DIF_TYPE3_PROTECTION  			    | SHOST_DIX_TYPE1_PROTECTION diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 062c97bf62f..13b6357c1fa 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@  /*   * Driver version   */ -#define QLA2XXX_VERSION      "8.03.07.03-k" +#define QLA2XXX_VERSION      "8.03.07.07-k"  #define QLA_DRIVER_MAJOR_VER	8  #define QLA_DRIVER_MINOR_VER	3 diff --git a/drivers/scsi/qla4xxx/Kconfig b/drivers/scsi/qla4xxx/Kconfig index 2c33ce6eac1..0f5599e0abf 100644 --- a/drivers/scsi/qla4xxx/Kconfig +++ b/drivers/scsi/qla4xxx/Kconfig @@ -1,6 +1,6 @@  config SCSI_QLA_ISCSI  	tristate "QLogic ISP4XXX and ISP82XX host adapter family support" -	depends on PCI && SCSI +	depends on PCI && SCSI && NET  	select SCSI_ISCSI_ATTRS  	---help---  	This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX) diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 909ed9ed24c..441a1c5b897 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -50,6 +50,7 @@ static int sg_version_num = 30534;	/* 2 digits for each component */  #include <linux/delay.h>  #include <linux/blktrace_api.h>  #include <linux/mutex.h> +#include <linux/ratelimit.h>  #include "scsi.h"  #include <scsi/scsi_dbg.h> @@ -626,14 +627,15 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)  	 */  	if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {  		static char cmd[TASK_COMM_LEN]; -		if (strcmp(current->comm, cmd) && printk_ratelimit()) { -			printk(KERN_WARNING -			       "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--" -			       "guessing data in;\n   " -			       "program %s not setting count and/or reply_len properly\n", -			       old_hdr.reply_len - (int)SZ_SG_HEADER, -			       input_size, (unsigned int) cmnd[0], -			       current->comm); +		if (strcmp(current->comm, cmd)) { +			printk_ratelimited(KERN_WARNING +					   "sg_write: data in/out %d/%d bytes " +					   "for SCSI command 0x%x-- guessing " +					   "data in;\n   program %s not setting " +					   "count and/or reply_len properly\n", +					   old_hdr.reply_len - (int)SZ_SG_HEADER, +					   input_size, (unsigned int) cmnd[0], +					   current->comm);  			strcpy(cmd, current->comm);  		}  	} diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c index f33e2dd9793..33b2ed451e0 100644 --- a/drivers/sh/intc/chip.c +++ b/drivers/sh/intc/chip.c @@ -186,6 +186,9 @@ static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {      !defined(CONFIG_CPU_SUBTYPE_SH7709)  	[IRQ_TYPE_LEVEL_HIGH] = VALID(3),  #endif +#if defined(CONFIG_ARCH_SH7372) +	[IRQ_TYPE_EDGE_BOTH] = VALID(4), +#endif  };  static int intc_set_type(struct irq_data *data, unsigned int type) diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c index d2407558773..24cacff5778 100644 --- a/drivers/spi/spi-fsl-spi.c +++ b/drivers/spi/spi-fsl-spi.c @@ -825,6 +825,9 @@ static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)  {  	struct device *dev = mspi->dev; +	if (!(mspi->flags & SPI_CPM_MODE)) +		return; +  	dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);  	dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);  	cpm_muram_free(cpm_muram_offset(mspi->tx_bd)); diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 8ac6542aedc..fa594d604ac 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -786,9 +786,11 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)  		int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);  		if (cs_gpio < 0)  			cs_gpio = mxc_platform_info->chipselect[i]; + +		spi_imx->chipselect[i] = cs_gpio;  		if (cs_gpio < 0)  			continue; -		spi_imx->chipselect[i] = cs_gpio; +  		ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);  		if (ret) {  			while (i > 0) { diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 1d23f383186..6a80749391d 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -50,6 +50,8 @@  #define PCH_RX_THOLD		7  #define PCH_RX_THOLD_MAX	15 +#define PCH_TX_THOLD		2 +  #define PCH_MAX_BAUDRATE	5000000  #define PCH_MAX_FIFO_DEPTH	16 @@ -58,6 +60,7 @@  #define PCH_SLEEP_TIME		10  #define SSN_LOW			0x02U +#define SSN_HIGH		0x03U  #define SSN_NO_CONTROL		0x00U  #define PCH_MAX_CS		0xFF  #define PCI_DEVICE_ID_GE_SPI	0x8816 @@ -316,16 +319,19 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,  	/* if transfer complete interrupt */  	if (reg_spsr_val & SPSR_FI_BIT) { -		if (tx_index < bpw_len) +		if ((tx_index == bpw_len) && (rx_index == tx_index)) { +			/* disable interrupts */ +			pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); + +			/* transfer is completed; +			   inform pch_spi_process_messages */ +			data->transfer_complete = true; +			data->transfer_active = false; +			wake_up(&data->wait); +		} else {  			dev_err(&data->master->dev,  				"%s : Transfer is not completed", __func__); -		/* disable interrupts */ -		pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); - -		/* transfer is completed;inform pch_spi_process_messages */ -		data->transfer_complete = true; -		data->transfer_active = false; -		wake_up(&data->wait); +		}  	}  } @@ -348,16 +354,26 @@ static irqreturn_t pch_spi_handler(int irq, void *dev_id)  			"%s returning due to suspend\n", __func__);  		return IRQ_NONE;  	} -	if (data->use_dma) -		return IRQ_NONE;  	io_remap_addr = data->io_remap_addr;  	spsr = io_remap_addr + PCH_SPSR;  	reg_spsr_val = ioread32(spsr); -	if (reg_spsr_val & SPSR_ORF_BIT) -		dev_err(&board_dat->pdev->dev, "%s Over run error", __func__); +	if (reg_spsr_val & SPSR_ORF_BIT) { +		dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__); +		if (data->current_msg->complete != 0) { +			data->transfer_complete = true; +			data->current_msg->status = -EIO; +			data->current_msg->complete(data->current_msg->context); +			data->bcurrent_msg_processing = false; +			data->current_msg = NULL; +			data->cur_trans = NULL; +		} +	} + +	if (data->use_dma) +		return IRQ_NONE;  	/* Check if the interrupt is for SPI device */  	if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { @@ -756,10 +772,6 @@ static void pch_spi_set_ir(struct pch_spi_data *data)  	wait_event_interruptible(data->wait, data->transfer_complete); -	pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); -	dev_dbg(&data->master->dev, -		"%s:no more control over SSN-writing 0 to SSNXCR.", __func__); -  	/* clear all interrupts */  	pch_spi_writereg(data->master, PCH_SPSR,  			 pch_spi_readreg(data->master, PCH_SPSR)); @@ -815,10 +827,11 @@ static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)  	}  } -static void pch_spi_start_transfer(struct pch_spi_data *data) +static int pch_spi_start_transfer(struct pch_spi_data *data)  {  	struct pch_spi_dma_ctrl *dma;  	unsigned long flags; +	int rtn;  	dma = &data->dma; @@ -833,19 +846,23 @@ static void pch_spi_start_transfer(struct pch_spi_data *data)  				 initiating the transfer. */  	dev_dbg(&data->master->dev,  		"%s:waiting for transfer to get over\n", __func__); -	wait_event_interruptible(data->wait, data->transfer_complete); +	rtn = wait_event_interruptible_timeout(data->wait, +					       data->transfer_complete, +					       msecs_to_jiffies(2 * HZ));  	dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,  			    DMA_FROM_DEVICE); + +	dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent, +			    DMA_FROM_DEVICE); +	memset(data->dma.tx_buf_virt, 0, PAGE_SIZE); +  	async_tx_ack(dma->desc_rx);  	async_tx_ack(dma->desc_tx);  	kfree(dma->sg_tx_p);  	kfree(dma->sg_rx_p);  	spin_lock_irqsave(&data->lock, flags); -	pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); -	dev_dbg(&data->master->dev, -		"%s:no more control over SSN-writing 0 to SSNXCR.", __func__);  	/* clear fifo threshold, disable interrupts, disable SPI transfer */  	pch_spi_setclr_reg(data->master, PCH_SPCR, 0, @@ -858,6 +875,8 @@ static void pch_spi_start_transfer(struct pch_spi_data *data)  	pch_spi_clear_fifo(data->master);  	spin_unlock_irqrestore(&data->lock, flags); + +	return rtn;  }  static void pch_dma_rx_complete(void *arg) @@ -1023,8 +1042,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)  	/* set receive fifo threshold and transmit fifo threshold */  	pch_spi_setclr_reg(data->master, PCH_SPCR,  			   ((size - 1) << SPCR_RFIC_FIELD) | -			   ((PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE) << -			    SPCR_TFIC_FIELD), +			   (PCH_TX_THOLD << SPCR_TFIC_FIELD),  			   MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);  	spin_unlock_irqrestore(&data->lock, flags); @@ -1035,13 +1053,20 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)  	/* offset, length setting */  	sg = dma->sg_rx_p;  	for (i = 0; i < num; i++, sg++) { -		if (i == 0) { -			sg->offset = 0; +		if (i == (num - 2)) { +			sg->offset = size * i; +			sg->offset = sg->offset * (*bpw / 8);  			sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem,  				    sg->offset);  			sg_dma_len(sg) = rem; +		} else if (i == (num - 1)) { +			sg->offset = size * (i - 1) + rem; +			sg->offset = sg->offset * (*bpw / 8); +			sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, +				    sg->offset); +			sg_dma_len(sg) = size;  		} else { -			sg->offset = rem + size * (i - 1); +			sg->offset = size * i;  			sg->offset = sg->offset * (*bpw / 8);  			sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,  				    sg->offset); @@ -1065,6 +1090,16 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)  	dma->desc_rx = desc_rx;  	/* TX */ +	if (data->bpw_len > PCH_DMA_TRANS_SIZE) { +		num = data->bpw_len / PCH_DMA_TRANS_SIZE; +		size = PCH_DMA_TRANS_SIZE; +		rem = 16; +	} else { +		num = 1; +		size = data->bpw_len; +		rem = data->bpw_len; +	} +  	dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);  	sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */  	/* offset, length setting */ @@ -1162,6 +1197,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)  	if (data->use_dma)  		pch_spi_request_dma(data,  				    data->current_msg->spi->bits_per_word); +	pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);  	do {  		/* If we are already processing a message get the next  		transfer structure from the message otherwise retrieve @@ -1184,7 +1220,8 @@ static void pch_spi_process_messages(struct work_struct *pwork)  		if (data->use_dma) {  			pch_spi_handle_dma(data, &bpw); -			pch_spi_start_transfer(data); +			if (!pch_spi_start_transfer(data)) +				goto out;  			pch_spi_copy_rx_data_for_dma(data, bpw);  		} else {  			pch_spi_set_tx(data, &bpw); @@ -1222,6 +1259,8 @@ static void pch_spi_process_messages(struct work_struct *pwork)  	} while (data->cur_trans != NULL); +out: +	pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH);  	if (data->use_dma)  		pch_spi_release_dma(data);  } diff --git a/drivers/staging/brcm80211/brcmsmac/otp.c b/drivers/staging/brcm80211/brcmsmac/otp.c index 34253cf3781..4a70180eba5 100644 --- a/drivers/staging/brcm80211/brcmsmac/otp.c +++ b/drivers/staging/brcm80211/brcmsmac/otp.c @@ -16,6 +16,7 @@  #include <linux/io.h>  #include <linux/errno.h> +#include <linux/string.h>  #include <brcm_hw_ids.h>  #include <chipcommon.h> diff --git a/drivers/staging/brcm80211/brcmsmac/types.h b/drivers/staging/brcm80211/brcmsmac/types.h index bbf21897ae0..823b5e4672e 100644 --- a/drivers/staging/brcm80211/brcmsmac/types.h +++ b/drivers/staging/brcm80211/brcmsmac/types.h @@ -18,6 +18,7 @@  #define _BRCM_TYPES_H_  #include <linux/types.h> +#include <linux/io.h>  /* Bus types */  #define	SI_BUS			0	/* SOC Interconnect */ diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c index 6859af0778c..7611def97d0 100644 --- a/drivers/staging/comedi/drivers/ni_labpc.c +++ b/drivers/staging/comedi/drivers/ni_labpc.c @@ -241,8 +241,10 @@ static int labpc_eeprom_write_insn(struct comedi_device *dev,  				   struct comedi_insn *insn,  				   unsigned int *data);  static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd); -#ifdef CONFIG_COMEDI_PCI +#ifdef CONFIG_ISA_DMA_API  static unsigned int labpc_suggest_transfer_size(struct comedi_cmd cmd); +#endif +#ifdef CONFIG_COMEDI_PCI  static int labpc_find_device(struct comedi_device *dev, int bus, int slot);  #endif  static int labpc_dio_mem_callback(int dir, int port, int data, diff --git a/drivers/staging/cxt1e1/Kconfig b/drivers/staging/cxt1e1/Kconfig index 73430ef6ae2..947f42a65c5 100644 --- a/drivers/staging/cxt1e1/Kconfig +++ b/drivers/staging/cxt1e1/Kconfig @@ -6,8 +6,7 @@ config CXT1E1        channelized stream WAN adapter card which contains a HDLC/Transparent        mode controller. -      If you want to compile this driver as a module -      say M here and read <file:Documentation/modules.txt>. +      If you want to compile this driver as a module say M here.        The module will be called 'cxt1e1'.        If unsure, say N. diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c index 52d1ea34963..1c86cf11ab9 100644 --- a/drivers/staging/iio/addac/adt7316-i2c.c +++ b/drivers/staging/iio/addac/adt7316-i2c.c @@ -109,7 +109,7 @@ static int __devinit adt7316_i2c_probe(struct i2c_client *client,  static int __devexit adt7316_i2c_remove(struct i2c_client *client)  { -	return adt7316_remove(&client->dev);; +	return adt7316_remove(&client->dev);  }  static const struct i2c_device_id adt7316_i2c_id[] = { diff --git a/drivers/staging/iio/dds/ad9832.c b/drivers/staging/iio/dds/ad9832.c index e3e61a469bb..6f0efe6580e 100644 --- a/drivers/staging/iio/dds/ad9832.c +++ b/drivers/staging/iio/dds/ad9832.c @@ -52,7 +52,7 @@ static int ad9832_write_frequency(struct ad9832_state *st,  					((addr - 3) << ADD_SHIFT) |  					((regval >> 0) & 0xFF)); -	return spi_sync(st->spi, &st->freq_msg);; +	return spi_sync(st->spi, &st->freq_msg);  }  static int ad9832_write_phase(struct ad9832_state *st, diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c index 9c0d2936e48..c3d73f8431a 100644 --- a/drivers/staging/octeon/ethernet-rgmii.c +++ b/drivers/staging/octeon/ethernet-rgmii.c @@ -26,6 +26,7 @@  **********************************************************************/  #include <linux/kernel.h>  #include <linux/netdevice.h> +#include <linux/interrupt.h>  #include <linux/phy.h>  #include <linux/ratelimit.h>  #include <net/dst.h> diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c index 1a7c19ae766..8b307b42879 100644 --- a/drivers/staging/octeon/ethernet-rx.c +++ b/drivers/staging/octeon/ethernet-rx.c @@ -411,7 +411,8 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)  				skb->protocol = eth_type_trans(skb, dev);  				skb->dev = dev; -				if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error)) +				if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || +					work->word2.s.L4_error || !work->word2.s.tcp_or_udp))  					skb->ip_summed = CHECKSUM_NONE;  				else  					skb->ip_summed = CHECKSUM_UNNECESSARY; diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c index 97082542188..d0e2d514968 100644 --- a/drivers/staging/octeon/ethernet-spi.c +++ b/drivers/staging/octeon/ethernet-spi.c @@ -26,6 +26,7 @@  **********************************************************************/  #include <linux/kernel.h>  #include <linux/netdevice.h> +#include <linux/interrupt.h>  #include <net/dst.h>  #include <asm/octeon/octeon.h> diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c index 589a0554332..3d1279c424a 100644 --- a/drivers/staging/tidspbridge/core/dsp-clock.c +++ b/drivers/staging/tidspbridge/core/dsp-clock.c @@ -209,7 +209,6 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)  		break;  #ifdef CONFIG_OMAP_MCBSP  	case MCBSP_CLK: -		omap_mcbsp_set_io_type(MCBSP_ID(clk_id), OMAP_MCBSP_POLL_IO);  		omap_mcbsp_request(MCBSP_ID(clk_id));  		omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PAD_SRC);  		break; diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c index 975e34bcd72..1ca66ea9b28 100644 --- a/drivers/staging/zcache/tmem.c +++ b/drivers/staging/zcache/tmem.c @@ -604,7 +604,7 @@ int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,  	struct tmem_obj *obj;  	void *pampd;  	bool ephemeral = is_ephemeral(pool); -	uint32_t ret = -1; +	int ret = -1;  	struct tmem_hashbucket *hb;  	bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral);  	bool lock_held = false; diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c index 855a5bb56a4..462fbc20561 100644 --- a/drivers/staging/zcache/zcache-main.c +++ b/drivers/staging/zcache/zcache-main.c @@ -1158,7 +1158,7 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,  	size_t clen;  	int ret;  	unsigned long count; -	struct page *page = virt_to_page(data); +	struct page *page = (struct page *)(data);  	struct zcache_client *cli = pool->client;  	uint16_t client_id = get_client_id_from_client(cli);  	unsigned long zv_mean_zsize; @@ -1227,7 +1227,7 @@ static int zcache_pampd_get_data(char *data, size_t *bufsize, bool raw,  	int ret = 0;  	BUG_ON(is_ephemeral(pool)); -	zv_decompress(virt_to_page(data), pampd); +	zv_decompress((struct page *)(data), pampd);  	return ret;  } @@ -1242,7 +1242,7 @@ static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,  	int ret = 0;  	BUG_ON(!is_ephemeral(pool)); -	zbud_decompress(virt_to_page(data), pampd); +	zbud_decompress((struct page *)(data), pampd);  	zbud_free_and_delist((struct zbud_hdr *)pampd);  	atomic_dec(&zcache_curr_eph_pampd_count);  	return ret; @@ -1539,7 +1539,7 @@ static int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp,  		goto out;  	if (!zcache_freeze && zcache_do_preload(pool) == 0) {  		/* preload does preempt_disable on success */ -		ret = tmem_put(pool, oidp, index, page_address(page), +		ret = tmem_put(pool, oidp, index, (char *)(page),  				PAGE_SIZE, 0, is_ephemeral(pool));  		if (ret < 0) {  			if (is_ephemeral(pool)) @@ -1572,7 +1572,7 @@ static int zcache_get_page(int cli_id, int pool_id, struct tmem_oid *oidp,  	pool = zcache_get_pool_by_id(cli_id, pool_id);  	if (likely(pool != NULL)) {  		if (atomic_read(&pool->obj_count) > 0) -			ret = tmem_get(pool, oidp, index, page_address(page), +			ret = tmem_get(pool, oidp, index, (char *)(page),  					&size, 0, is_ephemeral(pool));  		zcache_put_pool(pool);  	} diff --git a/drivers/target/Makefile b/drivers/target/Makefile index 1060c7b7f80..62e54053bcd 100644 --- a/drivers/target/Makefile +++ b/drivers/target/Makefile @@ -6,7 +6,6 @@ target_core_mod-y		:= target_core_configfs.o \  				   target_core_hba.o \  				   target_core_pr.o \  				   target_core_alua.o \ -				   target_core_scdb.o \  				   target_core_tmr.o \  				   target_core_tpg.o \  				   target_core_transport.o \ diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index c24fb10de60..4d01768fcd9 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -765,7 +765,7 @@ static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)  	u32 iov_count = (cmd->se_cmd.t_data_nents == 0) ? 1 :  				cmd->se_cmd.t_data_nents; -	iov_count += TRANSPORT_IOV_DATA_BUFFER; +	iov_count += ISCSI_IOV_DATA_BUFFER;  	cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL);  	if (!cmd->iov_data) { @@ -2243,7 +2243,6 @@ static int iscsit_handle_snack(  	case 0:  		return iscsit_handle_recovery_datain_or_r2t(conn, buf,  			hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength); -		return 0;  	case ISCSI_FLAG_SNACK_TYPE_STATUS:  		return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt,  			hdr->begrun, hdr->runlength); @@ -3539,16 +3538,8 @@ get_immediate:  				spin_lock_bh(&conn->cmd_lock);  				list_del(&cmd->i_list);  				spin_unlock_bh(&conn->cmd_lock); -				/* -				 * Determine if a struct se_cmd is assoicated with -				 * this struct iscsi_cmd. -				 */ -				if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) && -				    !(cmd->tmr_req)) -					iscsit_release_cmd(cmd); -				else -					transport_generic_free_cmd(&cmd->se_cmd, -								1, 0); + +				iscsit_free_cmd(cmd);  				goto get_immediate;  			case ISTATE_SEND_NOPIN_WANT_RESPONSE:  				spin_unlock_bh(&cmd->istate_lock); @@ -3941,7 +3932,6 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)  {  	struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;  	struct iscsi_session *sess = conn->sess; -	struct se_cmd *se_cmd;  	/*  	 * We expect this function to only ever be called from either RX or TX  	 * thread context via iscsit_close_connection() once the other context @@ -3949,35 +3939,13 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)  	 */  	spin_lock_bh(&conn->cmd_lock);  	list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) { -		if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) { -			list_del(&cmd->i_list); -			spin_unlock_bh(&conn->cmd_lock); -			iscsit_increment_maxcmdsn(cmd, sess); -			se_cmd = &cmd->se_cmd; -			/* -			 * Special cases for active iSCSI TMR, and -			 * transport_lookup_cmd_lun() failing from -			 * iscsit_get_lun_for_cmd() in iscsit_handle_scsi_cmd(). -			 */ -			if (cmd->tmr_req && se_cmd->transport_wait_for_tasks) -				se_cmd->transport_wait_for_tasks(se_cmd, 1, 1); -			else if (cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) -				transport_release_cmd(se_cmd); -			else -				iscsit_release_cmd(cmd); - -			spin_lock_bh(&conn->cmd_lock); -			continue; -		}  		list_del(&cmd->i_list);  		spin_unlock_bh(&conn->cmd_lock);  		iscsit_increment_maxcmdsn(cmd, sess); -		se_cmd = &cmd->se_cmd; -		if (se_cmd->transport_wait_for_tasks) -			se_cmd->transport_wait_for_tasks(se_cmd, 1, 1); +		iscsit_free_cmd(cmd);  		spin_lock_bh(&conn->cmd_lock);  	} diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index 11fd7430781..beb39469e7f 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -18,6 +18,7 @@   * GNU General Public License for more details.   ******************************************************************************/ +#include <linux/kernel.h>  #include <linux/string.h>  #include <linux/crypto.h>  #include <linux/err.h> @@ -27,40 +28,11 @@  #include "iscsi_target_nego.h"  #include "iscsi_target_auth.h" -static unsigned char chap_asciihex_to_binaryhex(unsigned char val[2]) -{ -	unsigned char result = 0; -	/* -	 * MSB -	 */ -	if ((val[0] >= 'a') && (val[0] <= 'f')) -		result = ((val[0] - 'a' + 10) & 0xf) << 4; -	else -		if ((val[0] >= 'A') && (val[0] <= 'F')) -			result = ((val[0] - 'A' + 10) & 0xf) << 4; -		else /* digit */ -			result = ((val[0] - '0') & 0xf) << 4; -	/* -	 * LSB -	 */ -	if ((val[1] >= 'a') && (val[1] <= 'f')) -		result |= ((val[1] - 'a' + 10) & 0xf); -	else -		if ((val[1] >= 'A') && (val[1] <= 'F')) -			result |= ((val[1] - 'A' + 10) & 0xf); -		else /* digit */ -			result |= ((val[1] - '0') & 0xf); - -	return result; -} -  static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)  { -	int i, j = 0; +	int j = DIV_ROUND_UP(len, 2); -	for (i = 0; i < len; i += 2) { -		dst[j++] = (unsigned char) chap_asciihex_to_binaryhex(&src[i]); -	} +	hex2bin(dst, src, j);  	dst[j] = '\0';  	return j; diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index f095e65b1cc..f1643dbf6a9 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -268,7 +268,7 @@ struct se_tpg_np *lio_target_call_addnptotpg(  				ISCSI_TCP);  	if (IS_ERR(tpg_np)) {  		iscsit_put_tpg(tpg); -		return ERR_PTR(PTR_ERR(tpg_np)); +		return ERR_CAST(tpg_np);  	}  	pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n"); @@ -1285,7 +1285,7 @@ struct se_wwn *lio_target_call_coreaddtiqn(  	tiqn = iscsit_add_tiqn((unsigned char *)name);  	if (IS_ERR(tiqn)) -		return ERR_PTR(PTR_ERR(tiqn)); +		return ERR_CAST(tiqn);  	/*  	 * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group.  	 */ diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 470ed551eeb..3723d90d5ae 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h @@ -57,6 +57,9 @@  #define TA_PROD_MODE_WRITE_PROTECT	0  #define TA_CACHE_CORE_NPS		0 + +#define ISCSI_IOV_DATA_BUFFER		5 +  enum tpg_np_network_transport_table {  	ISCSI_TCP				= 0,  	ISCSI_SCTP_TCP				= 1, @@ -425,7 +428,6 @@ struct iscsi_cmd {  	/* Number of times struct iscsi_cmd is present in immediate queue */  	atomic_t		immed_queue_count;  	atomic_t		response_queue_count; -	atomic_t		transport_sent;  	spinlock_t		datain_lock;  	spinlock_t		dataout_timeout_lock;  	/* spinlock for protecting struct iscsi_cmd->i_state */ diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c index 980650792cf..c4c68da3e50 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.c +++ b/drivers/target/iscsi/iscsi_target_erl1.c @@ -834,7 +834,7 @@ static int iscsit_attach_ooo_cmdsn(  			 */  			list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,  						ooo_list) { -				while (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) +				if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)  					continue;  				list_add(&ooo_cmdsn->ooo_list, diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c index 91a4d170bda..0b8404c3012 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.c +++ b/drivers/target/iscsi/iscsi_target_erl2.c @@ -143,12 +143,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)  			list_del(&cmd->i_list);  			cmd->conn = NULL;  			spin_unlock(&cr->conn_recovery_cmd_lock); -			if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || -			    !(cmd->se_cmd.transport_wait_for_tasks)) -				iscsit_release_cmd(cmd); -			else -				cmd->se_cmd.transport_wait_for_tasks( -						&cmd->se_cmd, 1, 1); +			iscsit_free_cmd(cmd);  			spin_lock(&cr->conn_recovery_cmd_lock);  		}  		spin_unlock(&cr->conn_recovery_cmd_lock); @@ -170,12 +165,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)  			list_del(&cmd->i_list);  			cmd->conn = NULL;  			spin_unlock(&cr->conn_recovery_cmd_lock); -			if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || -			    !(cmd->se_cmd.transport_wait_for_tasks)) -				iscsit_release_cmd(cmd); -			else -				cmd->se_cmd.transport_wait_for_tasks( -						&cmd->se_cmd, 1, 1); +			iscsit_free_cmd(cmd);  			spin_lock(&cr->conn_recovery_cmd_lock);  		}  		spin_unlock(&cr->conn_recovery_cmd_lock); @@ -260,12 +250,7 @@ void iscsit_discard_cr_cmds_by_expstatsn(  		iscsit_remove_cmd_from_connection_recovery(cmd, sess);  		spin_unlock(&cr->conn_recovery_cmd_lock); -		if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || -		    !(cmd->se_cmd.transport_wait_for_tasks)) -			iscsit_release_cmd(cmd); -		else -			cmd->se_cmd.transport_wait_for_tasks( -					&cmd->se_cmd, 1, 0); +		iscsit_free_cmd(cmd);  		spin_lock(&cr->conn_recovery_cmd_lock);  	}  	spin_unlock(&cr->conn_recovery_cmd_lock); @@ -319,12 +304,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)  		list_del(&cmd->i_list);  		spin_unlock_bh(&conn->cmd_lock); -		if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || -		    !(cmd->se_cmd.transport_wait_for_tasks)) -			iscsit_release_cmd(cmd); -		else -			cmd->se_cmd.transport_wait_for_tasks( -					&cmd->se_cmd, 1, 1); +		iscsit_free_cmd(cmd);  		spin_lock_bh(&conn->cmd_lock);  	}  	spin_unlock_bh(&conn->cmd_lock); @@ -377,13 +357,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)  			list_del(&cmd->i_list);  			spin_unlock_bh(&conn->cmd_lock); - -			if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || -			    !(cmd->se_cmd.transport_wait_for_tasks)) -				iscsit_release_cmd(cmd); -			else -				cmd->se_cmd.transport_wait_for_tasks( -						&cmd->se_cmd, 1, 0); +			iscsit_free_cmd(cmd);  			spin_lock_bh(&conn->cmd_lock);  			continue;  		} @@ -403,13 +377,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)  		     (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) {  			list_del(&cmd->i_list);  			spin_unlock_bh(&conn->cmd_lock); - -			if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || -			    !(cmd->se_cmd.transport_wait_for_tasks)) -				iscsit_release_cmd(cmd); -			else -				cmd->se_cmd.transport_wait_for_tasks( -						&cmd->se_cmd, 1, 1); +			iscsit_free_cmd(cmd);  			spin_lock_bh(&conn->cmd_lock);  			continue;  		} @@ -434,10 +402,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)  		iscsit_free_all_datain_reqs(cmd); -		if ((cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) && -		     cmd->se_cmd.transport_wait_for_tasks) -			cmd->se_cmd.transport_wait_for_tasks(&cmd->se_cmd, -					0, 0); +		transport_wait_for_tasks(&cmd->se_cmd);  		/*  		 * Add the struct iscsi_cmd to the connection recovery cmd list  		 */ diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index bcaf82f4703..daad362a93c 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -1013,19 +1013,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)  					ISCSI_LOGIN_STATUS_TARGET_ERROR);  			goto new_sess_out;  		} -#if 0 -		if (!iscsi_ntop6((const unsigned char *) -				&sock_in6.sin6_addr.in6_u, -				(char *)&conn->ipv6_login_ip[0], -				IPV6_ADDRESS_SPACE)) { -			pr_err("iscsi_ntop6() failed\n"); -			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, -					ISCSI_LOGIN_STATUS_TARGET_ERROR); -			goto new_sess_out; -		} -#else -		pr_debug("Skipping iscsi_ntop6()\n"); -#endif +		snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c", +				&sock_in6.sin6_addr.in6_u); +		conn->login_port = ntohs(sock_in6.sin6_port);  	} else {  		memset(&sock_in, 0, sizeof(struct sockaddr_in)); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 4d087ac1106..426cd4bf6a9 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -504,7 +504,7 @@ static int iscsi_target_do_authentication(  		break;  	case 1:  		pr_debug("iSCSI security negotiation" -			" completed sucessfully.\n"); +			" completed successfully.\n");  		login->auth_complete = 1;  		if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&  		    (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) { diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 252e246cf51..5b773160200 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -545,13 +545,13 @@ int iscsi_copy_param_list(  	struct iscsi_param_list *src_param_list,  	int leading)  { -	struct iscsi_param *new_param = NULL, *param = NULL; +	struct iscsi_param *param = NULL; +	struct iscsi_param *new_param = NULL;  	struct iscsi_param_list *param_list = NULL;  	param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);  	if (!param_list) { -		pr_err("Unable to allocate memory for" -				" struct iscsi_param_list.\n"); +		pr_err("Unable to allocate memory for struct iscsi_param_list.\n");  		goto err_out;  	}  	INIT_LIST_HEAD(¶m_list->param_list); @@ -567,8 +567,17 @@ int iscsi_copy_param_list(  		new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);  		if (!new_param) { -			pr_err("Unable to allocate memory for" -				" struct iscsi_param.\n"); +			pr_err("Unable to allocate memory for struct iscsi_param.\n"); +			goto err_out; +		} + +		new_param->name = kstrdup(param->name, GFP_KERNEL); +		new_param->value = kstrdup(param->value, GFP_KERNEL); +		if (!new_param->value || !new_param->name) { +			kfree(new_param->value); +			kfree(new_param->name); +			kfree(new_param); +			pr_err("Unable to allocate memory for parameter name/value.\n");  			goto err_out;  		} @@ -580,32 +589,12 @@ int iscsi_copy_param_list(  		new_param->use = param->use;  		new_param->type_range = param->type_range; -		new_param->name = kzalloc(strlen(param->name) + 1, GFP_KERNEL); -		if (!new_param->name) { -			pr_err("Unable to allocate memory for" -				" parameter name.\n"); -			goto err_out; -		} - -		new_param->value = kzalloc(strlen(param->value) + 1, -				GFP_KERNEL); -		if (!new_param->value) { -			pr_err("Unable to allocate memory for" -				" parameter value.\n"); -			goto err_out; -		} - -		memcpy(new_param->name, param->name, strlen(param->name)); -		new_param->name[strlen(param->name)] = '\0'; -		memcpy(new_param->value, param->value, strlen(param->value)); -		new_param->value[strlen(param->value)] = '\0'; -  		list_add_tail(&new_param->p_list, ¶m_list->param_list);  	} -	if (!list_empty(¶m_list->param_list)) +	if (!list_empty(¶m_list->param_list)) {  		*dst_param_list = param_list; -	else { +	} else {  		pr_err("No parameters allocated.\n");  		goto err_out;  	} @@ -1441,7 +1430,7 @@ static int iscsi_enforce_integrity_rules(  	u8 DataSequenceInOrder = 0;  	u8 ErrorRecoveryLevel = 0, SessionType = 0;  	u8 IFMarker = 0, OFMarker = 0; -	u8 IFMarkInt_Reject = 0, OFMarkInt_Reject = 0; +	u8 IFMarkInt_Reject = 1, OFMarkInt_Reject = 1;  	u32 FirstBurstLength = 0, MaxBurstLength = 0;  	struct iscsi_param *param = NULL; diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c index db1fe1ec84d..490207eacde 100644 --- a/drivers/target/iscsi/iscsi_target_tmr.c +++ b/drivers/target/iscsi/iscsi_target_tmr.c @@ -250,7 +250,7 @@ static int iscsit_task_reassign_complete_write(  	 * so if we have received all DataOUT we can safety ignore Initiator.  	 */  	if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { -		if (!atomic_read(&cmd->transport_sent)) { +		if (!atomic_read(&cmd->se_cmd.t_transport_sent)) {  			pr_debug("WRITE ITT: 0x%08x: t_state: %d"  				" never sent to transport\n",  				cmd->init_task_tag, cmd->se_cmd.t_state); @@ -314,11 +314,11 @@ static int iscsit_task_reassign_complete_read(  		cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);  	} -	if (!atomic_read(&cmd->transport_sent)) { +	if (!atomic_read(&cmd->se_cmd.t_transport_sent)) {  		pr_debug("READ ITT: 0x%08x: t_state: %d never sent to"  			" transport\n", cmd->init_task_tag,  			cmd->se_cmd.t_state); -		transport_generic_handle_cdb(se_cmd); +		transport_handle_cdb_direct(se_cmd);  		return 0;  	} diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index a1acb016790..02348f727bd 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -243,7 +243,7 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(  	if (!cmd->tmr_req) {  		pr_err("Unable to allocate memory for"  			" Task Management command!\n"); -		return NULL; +		goto out;  	}  	/*  	 * TASK_REASSIGN for ERL=2 / connection stays inside of @@ -289,7 +289,8 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(  	}  	se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, -				(void *)cmd->tmr_req, tcm_function); +				(void *)cmd->tmr_req, tcm_function, +				GFP_KERNEL);  	if (!se_cmd->se_tmr_req)  		goto out; @@ -298,8 +299,6 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(  	return cmd;  out:  	iscsit_release_cmd(cmd); -	if (se_cmd) -		transport_free_se_cmd(se_cmd);  	return NULL;  } @@ -841,6 +840,23 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)  	kmem_cache_free(lio_cmd_cache, cmd);  } +void iscsit_free_cmd(struct iscsi_cmd *cmd) +{ +	/* +	 * Determine if a struct se_cmd is assoicated with +	 * this struct iscsi_cmd. +	 */ +	switch (cmd->iscsi_opcode) { +	case ISCSI_OP_SCSI_CMD: +	case ISCSI_OP_SCSI_TMFUNC: +		transport_generic_free_cmd(&cmd->se_cmd, 1); +		break; +	default: +		iscsit_release_cmd(cmd); +		break; +	} +} +  int iscsit_check_session_usage_count(struct iscsi_session *sess)  {  	spin_lock_bh(&sess->session_usage_lock); @@ -877,40 +893,6 @@ void iscsit_inc_session_usage_count(struct iscsi_session *sess)  }  /* - *	Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker - *	array counts needed for sync and steering. - */ -static int iscsit_determine_sync_and_steering_counts( -	struct iscsi_conn *conn, -	struct iscsi_data_count *count) -{ -	u32 length = count->data_length; -	u32 marker, markint; - -	count->sync_and_steering = 1; - -	marker = (count->type == ISCSI_RX_DATA) ? -			conn->of_marker : conn->if_marker; -	markint = (count->type == ISCSI_RX_DATA) ? -			(conn->conn_ops->OFMarkInt * 4) : -			(conn->conn_ops->IFMarkInt * 4); -	count->ss_iov_count = count->iov_count; - -	while (length > 0) { -		if (length >= marker) { -			count->ss_iov_count += 3; -			count->ss_marker_count += 2; - -			length -= marker; -			marker = markint; -		} else -			length = 0; -	} - -	return 0; -} - -/*   *	Setup conn->if_marker and conn->of_marker values based upon   *	the initial marker-less interval. (see iSCSI v19 A.2)   */ @@ -1292,7 +1274,7 @@ int iscsit_fe_sendpage_sg(  	struct kvec iov;  	u32 tx_hdr_size, data_len;  	u32 offset = cmd->first_data_sg_off; -	int tx_sent; +	int tx_sent, iov_off;  send_hdr:  	tx_hdr_size = ISCSI_HDR_LEN; @@ -1312,9 +1294,19 @@ send_hdr:  	}  	data_len = cmd->tx_size - tx_hdr_size - cmd->padding; -	if (conn->conn_ops->DataDigest) +	/* +	 * Set iov_off used by padding and data digest tx_data() calls below +	 * in order to determine proper offset into cmd->iov_data[] +	 */ +	if (conn->conn_ops->DataDigest) {  		data_len -= ISCSI_CRC_LEN; - +		if (cmd->padding) +			iov_off = (cmd->iov_data_count - 2); +		else +			iov_off = (cmd->iov_data_count - 1); +	} else { +		iov_off = (cmd->iov_data_count - 1); +	}  	/*  	 * Perform sendpage() for each page in the scatterlist  	 */ @@ -1343,8 +1335,7 @@ send_pg:  send_padding:  	if (cmd->padding) { -		struct kvec *iov_p = -			&cmd->iov_data[cmd->iov_data_count-1]; +		struct kvec *iov_p = &cmd->iov_data[iov_off++];  		tx_sent = tx_data(conn, iov_p, 1, cmd->padding);  		if (cmd->padding != tx_sent) { @@ -1358,8 +1349,7 @@ send_padding:  send_datacrc:  	if (conn->conn_ops->DataDigest) { -		struct kvec *iov_d = -			&cmd->iov_data[cmd->iov_data_count]; +		struct kvec *iov_d = &cmd->iov_data[iov_off];  		tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);  		if (ISCSI_CRC_LEN != tx_sent) { @@ -1433,8 +1423,7 @@ static int iscsit_do_rx_data(  	struct iscsi_data_count *count)  {  	int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len; -	u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0; -	struct kvec iov[count->ss_iov_count], *iov_p; +	struct kvec *iov_p;  	struct msghdr msg;  	if (!conn || !conn->sock || !conn->conn_ops) @@ -1442,93 +1431,8 @@ static int iscsit_do_rx_data(  	memset(&msg, 0, sizeof(struct msghdr)); -	if (count->sync_and_steering) { -		int size = 0; -		u32 i, orig_iov_count = 0; -		u32 orig_iov_len = 0, orig_iov_loc = 0; -		u32 iov_count = 0, per_iov_bytes = 0; -		u32 *rx_marker, old_rx_marker = 0; -		struct kvec *iov_record; - -		memset(&rx_marker_val, 0, -				count->ss_marker_count * sizeof(u32)); -		memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec)); - -		iov_record = count->iov; -		orig_iov_count = count->iov_count; -		rx_marker = &conn->of_marker; - -		i = 0; -		size = data; -		orig_iov_len = iov_record[orig_iov_loc].iov_len; -		while (size > 0) { -			pr_debug("rx_data: #1 orig_iov_len %u," -			" orig_iov_loc %u\n", orig_iov_len, orig_iov_loc); -			pr_debug("rx_data: #2 rx_marker %u, size" -				" %u\n", *rx_marker, size); - -			if (orig_iov_len >= *rx_marker) { -				iov[iov_count].iov_len = *rx_marker; -				iov[iov_count++].iov_base = -					(iov_record[orig_iov_loc].iov_base + -						per_iov_bytes); - -				iov[iov_count].iov_len = (MARKER_SIZE / 2); -				iov[iov_count++].iov_base = -					&rx_marker_val[rx_marker_iov++]; -				iov[iov_count].iov_len = (MARKER_SIZE / 2); -				iov[iov_count++].iov_base = -					&rx_marker_val[rx_marker_iov++]; -				old_rx_marker = *rx_marker; - -				/* -				 * OFMarkInt is in 32-bit words. -				 */ -				*rx_marker = (conn->conn_ops->OFMarkInt * 4); -				size -= old_rx_marker; -				orig_iov_len -= old_rx_marker; -				per_iov_bytes += old_rx_marker; - -				pr_debug("rx_data: #3 new_rx_marker" -					" %u, size %u\n", *rx_marker, size); -			} else { -				iov[iov_count].iov_len = orig_iov_len; -				iov[iov_count++].iov_base = -					(iov_record[orig_iov_loc].iov_base + -						per_iov_bytes); - -				per_iov_bytes = 0; -				*rx_marker -= orig_iov_len; -				size -= orig_iov_len; - -				if (size) -					orig_iov_len = -					iov_record[++orig_iov_loc].iov_len; - -				pr_debug("rx_data: #4 new_rx_marker" -					" %u, size %u\n", *rx_marker, size); -			} -		} -		data += (rx_marker_iov * (MARKER_SIZE / 2)); - -		iov_p	= &iov[0]; -		iov_len	= iov_count; - -		if (iov_count > count->ss_iov_count) { -			pr_err("iov_count: %d, count->ss_iov_count:" -				" %d\n", iov_count, count->ss_iov_count); -			return -1; -		} -		if (rx_marker_iov > count->ss_marker_count) { -			pr_err("rx_marker_iov: %d, count->ss_marker" -				"_count: %d\n", rx_marker_iov, -				count->ss_marker_count); -			return -1; -		} -	} else { -		iov_p = count->iov; -		iov_len	= count->iov_count; -	} +	iov_p = count->iov; +	iov_len	= count->iov_count;  	while (total_rx < data) {  		rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len, @@ -1543,16 +1447,6 @@ static int iscsit_do_rx_data(  				rx_loop, total_rx, data);  	} -	if (count->sync_and_steering) { -		int j; -		for (j = 0; j < rx_marker_iov; j++) { -			pr_debug("rx_data: #5 j: %d, offset: %d\n", -				j, rx_marker_val[j]); -			conn->of_marker_offset = rx_marker_val[j]; -		} -		total_rx -= (rx_marker_iov * (MARKER_SIZE / 2)); -	} -  	return total_rx;  } @@ -1561,8 +1455,7 @@ static int iscsit_do_tx_data(  	struct iscsi_data_count *count)  {  	int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len; -	u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0; -	struct kvec iov[count->ss_iov_count], *iov_p; +	struct kvec *iov_p;  	struct msghdr msg;  	if (!conn || !conn->sock || !conn->conn_ops) @@ -1575,98 +1468,8 @@ static int iscsit_do_tx_data(  	memset(&msg, 0, sizeof(struct msghdr)); -	if (count->sync_and_steering) { -		int size = 0; -		u32 i, orig_iov_count = 0; -		u32 orig_iov_len = 0, orig_iov_loc = 0; -		u32 iov_count = 0, per_iov_bytes = 0; -		u32 *tx_marker, old_tx_marker = 0; -		struct kvec *iov_record; - -		memset(&tx_marker_val, 0, -			count->ss_marker_count * sizeof(u32)); -		memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec)); - -		iov_record = count->iov; -		orig_iov_count = count->iov_count; -		tx_marker = &conn->if_marker; - -		i = 0; -		size = data; -		orig_iov_len = iov_record[orig_iov_loc].iov_len; -		while (size > 0) { -			pr_debug("tx_data: #1 orig_iov_len %u," -			" orig_iov_loc %u\n", orig_iov_len, orig_iov_loc); -			pr_debug("tx_data: #2 tx_marker %u, size" -				" %u\n", *tx_marker, size); - -			if (orig_iov_len >= *tx_marker) { -				iov[iov_count].iov_len = *tx_marker; -				iov[iov_count++].iov_base = -					(iov_record[orig_iov_loc].iov_base + -						per_iov_bytes); - -				tx_marker_val[tx_marker_iov] = -						(size - *tx_marker); -				iov[iov_count].iov_len = (MARKER_SIZE / 2); -				iov[iov_count++].iov_base = -					&tx_marker_val[tx_marker_iov++]; -				iov[iov_count].iov_len = (MARKER_SIZE / 2); -				iov[iov_count++].iov_base = -					&tx_marker_val[tx_marker_iov++]; -				old_tx_marker = *tx_marker; - -				/* -				 * IFMarkInt is in 32-bit words. -				 */ -				*tx_marker = (conn->conn_ops->IFMarkInt * 4); -				size -= old_tx_marker; -				orig_iov_len -= old_tx_marker; -				per_iov_bytes += old_tx_marker; - -				pr_debug("tx_data: #3 new_tx_marker" -					" %u, size %u\n", *tx_marker, size); -				pr_debug("tx_data: #4 offset %u\n", -					tx_marker_val[tx_marker_iov-1]); -			} else { -				iov[iov_count].iov_len = orig_iov_len; -				iov[iov_count++].iov_base -					= (iov_record[orig_iov_loc].iov_base + -						per_iov_bytes); - -				per_iov_bytes = 0; -				*tx_marker -= orig_iov_len; -				size -= orig_iov_len; - -				if (size) -					orig_iov_len = -					iov_record[++orig_iov_loc].iov_len; - -				pr_debug("tx_data: #5 new_tx_marker" -					" %u, size %u\n", *tx_marker, size); -			} -		} - -		data += (tx_marker_iov * (MARKER_SIZE / 2)); - -		iov_p = &iov[0]; -		iov_len = iov_count; - -		if (iov_count > count->ss_iov_count) { -			pr_err("iov_count: %d, count->ss_iov_count:" -				" %d\n", iov_count, count->ss_iov_count); -			return -1; -		} -		if (tx_marker_iov > count->ss_marker_count) { -			pr_err("tx_marker_iov: %d, count->ss_marker" -				"_count: %d\n", tx_marker_iov, -				count->ss_marker_count); -			return -1; -		} -	} else { -		iov_p = count->iov; -		iov_len = count->iov_count; -	} +	iov_p = count->iov; +	iov_len = count->iov_count;  	while (total_tx < data) {  		tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, @@ -1681,9 +1484,6 @@ static int iscsit_do_tx_data(  					tx_loop, total_tx, data);  	} -	if (count->sync_and_steering) -		total_tx -= (tx_marker_iov * (MARKER_SIZE / 2)); -  	return total_tx;  } @@ -1704,12 +1504,6 @@ int rx_data(  	c.data_length = data;  	c.type = ISCSI_RX_DATA; -	if (conn->conn_ops->OFMarker && -	   (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) { -		if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0) -			return -1; -	} -  	return iscsit_do_rx_data(conn, &c);  } @@ -1730,12 +1524,6 @@ int tx_data(  	c.data_length = data;  	c.type = ISCSI_TX_DATA; -	if (conn->conn_ops->IFMarker && -	   (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) { -		if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0) -			return -1; -	} -  	return iscsit_do_tx_data(conn, &c);  } diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index 2cd49d607bd..835bf7de028 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h @@ -30,6 +30,7 @@ extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_c  extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);  extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);  extern void iscsit_release_cmd(struct iscsi_cmd *); +extern void iscsit_free_cmd(struct iscsi_cmd *);  extern int iscsit_check_session_usage_count(struct iscsi_session *);  extern void iscsit_dec_session_usage_count(struct iscsi_session *);  extern void iscsit_inc_session_usage_count(struct iscsi_session *); diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index aa2d6799723..b15d8cbf630 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -200,7 +200,7 @@ static void tcm_loop_check_stop_free(struct se_cmd *se_cmd)  	 * Release the struct se_cmd, which will make a callback to release  	 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()  	 */ -	transport_generic_free_cmd(se_cmd, 0, 0); +	transport_generic_free_cmd(se_cmd, 0);  }  static void tcm_loop_release_cmd(struct se_cmd *se_cmd) @@ -290,6 +290,15 @@ static int tcm_loop_queuecommand(  	 */  	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);  	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; +	/* +	 * Ensure that this tl_tpg reference from the incoming sc->device->id +	 * has already been configured via tcm_loop_make_naa_tpg(). +	 */ +	if (!tl_tpg->tl_hba) { +		set_host_byte(sc, DID_NO_CONNECT); +		sc->scsi_done(sc); +		return 0; +	}  	se_tpg = &tl_tpg->tl_se_tpg;  	/*  	 * Determine the SAM Task Attribute and allocate tl_cmd and @@ -366,7 +375,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)  	 * Allocate the LUN_RESET TMR  	 */  	se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr, -				TMR_LUN_RESET); +						TMR_LUN_RESET, GFP_KERNEL);  	if (IS_ERR(se_cmd->se_tmr_req))  		goto release;  	/* @@ -388,7 +397,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)  		SUCCESS : FAILED;  release:  	if (se_cmd) -		transport_generic_free_cmd(se_cmd, 1, 0); +		transport_generic_free_cmd(se_cmd, 1);  	else  		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);  	kfree(tl_tmr); @@ -1245,6 +1254,9 @@ void tcm_loop_drop_naa_tpg(  	 */  	core_tpg_deregister(se_tpg); +	tl_tpg->tl_hba = NULL; +	tl_tpg->tl_tpgt = 0; +  	pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"  		" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),  		config_item_name(&wwn->wwn_group.cg_item), tpgt); diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 98c98a3a025..8f4447749c7 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -24,7 +24,6 @@   *   ******************************************************************************/ -#include <linux/version.h>  #include <linux/slab.h>  #include <linux/spinlock.h>  #include <linux/configfs.h> @@ -68,6 +67,15 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)  	unsigned char *buf;  	u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first  				    Target port group descriptor */ +	/* +	 * Need at least 4 bytes of response data or else we can't +	 * even fit the return data length. +	 */ +	if (cmd->data_length < 4) { +		pr_warn("REPORT TARGET PORT GROUPS allocation length %u" +			" too small\n", cmd->data_length); +		return -EINVAL; +	}  	buf = transport_kmap_first_data_page(cmd); @@ -75,6 +83,17 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)  	list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,  			tg_pt_gp_list) {  		/* +		 * Check if the Target port group and Target port descriptor list +		 * based on tg_pt_gp_members count will fit into the response payload. +		 * Otherwise, bump rd_len to let the initiator know we have exceeded +		 * the allocation length and the response is truncated. +		 */ +		if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) > +		     cmd->data_length) { +			rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4); +			continue; +		} +		/*  		 * PREF: Preferred target port bit, determine if this  		 * bit should be set for port group.  		 */ diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 8ae09a1bdf7..38535eb1392 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -24,6 +24,7 @@   */  #include <linux/kernel.h> +#include <linux/module.h>  #include <asm/unaligned.h>  #include <scsi/scsi.h> @@ -67,6 +68,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)  {  	struct se_lun *lun = cmd->se_lun;  	struct se_device *dev = cmd->se_dev; +	struct se_portal_group *tpg = lun->lun_sep->sep_tpg;  	unsigned char *buf;  	/* @@ -81,9 +83,13 @@ target_emulate_inquiry_std(struct se_cmd *cmd)  	buf = transport_kmap_first_data_page(cmd); -	buf[0] = dev->transport->get_device_type(dev); -	if (buf[0] == TYPE_TAPE) -		buf[1] = 0x80; +	if (dev == tpg->tpg_virt_lun0.lun_se_dev) { +		buf[0] = 0x3f; /* Not connected */ +	} else { +		buf[0] = dev->transport->get_device_type(dev); +		if (buf[0] == TYPE_TAPE) +			buf[1] = 0x80; +	}  	buf[2] = dev->transport->get_device_rev(dev);  	/* @@ -149,6 +155,37 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)  	return 0;  } +static void +target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf) +{ +	unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0]; +	int cnt; +	bool next = true; + +	/* +	 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on +	 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field +	 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION +	 * to complete the payload.  These are based from VPD=0x80 PRODUCT SERIAL +	 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure +	 * per device uniqeness. +	 */ +	for (cnt = 0; *p && cnt < 13; p++) { +		int val = hex_to_bin(*p); + +		if (val < 0) +			continue; + +		if (next) { +			next = false; +			buf[cnt++] |= val; +		} else { +			next = true; +			buf[cnt] = val << 4; +		} +	} +} +  /*   * Device identification VPD, for a complete list of   * DESIGNATOR TYPEs see spc4r17 Table 459. @@ -214,8 +251,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)  	 * VENDOR_SPECIFIC_IDENTIFIER and  	 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION  	 */ -	buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]); -	hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12); +	target_parse_naa_6h_vendor_specific(dev, &buf[off]);  	len = 20;  	off = (len + 4); @@ -915,8 +951,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)  		length += target_modesense_control(dev, &buf[offset+length]);  		break;  	default: -		pr_err("Got Unknown Mode Page: 0x%02x\n", -				cdb[2] & 0x3f); +		pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", +		       cdb[2] & 0x3f, cdb[3]);  		return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;  	}  	offset += length; @@ -1072,8 +1108,6 @@ target_emulate_unmap(struct se_task *task)  		size -= 16;  	} -	task->task_scsi_status = GOOD; -	transport_complete_task(task, 1);  err:  	transport_kunmap_first_data_page(cmd); @@ -1085,24 +1119,17 @@ err:   * Note this is not used for TCM/pSCSI passthrough   */  static int -target_emulate_write_same(struct se_task *task, int write_same32) +target_emulate_write_same(struct se_task *task, u32 num_blocks)  {  	struct se_cmd *cmd = task->task_se_cmd;  	struct se_device *dev = cmd->se_dev;  	sector_t range;  	sector_t lba = cmd->t_task_lba; -	unsigned int num_blocks;  	int ret;  	/* -	 * Extract num_blocks from the WRITE_SAME_* CDB.  Then use the explict -	 * range when non zero is supplied, otherwise calculate the remaining -	 * range based on ->get_blocks() - starting LBA. +	 * Use the explicit range when non zero is supplied, otherwise calculate +	 * the remaining range based on ->get_blocks() - starting LBA.  	 */ -	if (write_same32) -		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); -	else -		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); -  	if (num_blocks != 0)  		range = num_blocks;  	else @@ -1117,8 +1144,6 @@ target_emulate_write_same(struct se_task *task, int write_same32)  		return ret;  	} -	task->task_scsi_status = GOOD; -	transport_complete_task(task, 1);  	return 0;  } @@ -1165,13 +1190,23 @@ transport_emulate_control_cdb(struct se_task *task)  		}  		ret = target_emulate_unmap(task);  		break; +	case WRITE_SAME: +		if (!dev->transport->do_discard) { +			pr_err("WRITE_SAME emulation not supported" +					" for: %s\n", dev->transport->name); +			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; +		} +		ret = target_emulate_write_same(task, +				get_unaligned_be16(&cmd->t_task_cdb[7])); +		break;  	case WRITE_SAME_16:  		if (!dev->transport->do_discard) {  			pr_err("WRITE_SAME_16 emulation not supported"  					" for: %s\n", dev->transport->name);  			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;  		} -		ret = target_emulate_write_same(task, 0); +		ret = target_emulate_write_same(task, +				get_unaligned_be32(&cmd->t_task_cdb[10]));  		break;  	case VARIABLE_LENGTH_CMD:  		service_action = @@ -1184,7 +1219,8 @@ transport_emulate_control_cdb(struct se_task *task)  					dev->transport->name);  				return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;  			} -			ret = target_emulate_write_same(task, 1); +			ret = target_emulate_write_same(task, +				get_unaligned_be32(&cmd->t_task_cdb[28]));  			break;  		default:  			pr_err("Unsupported VARIABLE_LENGTH_CMD SA:" @@ -1219,8 +1255,63 @@ transport_emulate_control_cdb(struct se_task *task)  	if (ret < 0)  		return ret; -	task->task_scsi_status = GOOD; -	transport_complete_task(task, 1); +	/* +	 * Handle the successful completion here unless a caller +	 * has explictly requested an asychronous completion. +	 */ +	if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) { +		task->task_scsi_status = GOOD; +		transport_complete_task(task, 1); +	}  	return PYX_TRANSPORT_SENT_TO_TRANSPORT;  } + +/* + * Write a CDB into @cdb that is based on the one the intiator sent us, + * but updated to only cover the sectors that the current task handles. + */ +void target_get_task_cdb(struct se_task *task, unsigned char *cdb) +{ +	struct se_cmd *cmd = task->task_se_cmd; +	unsigned int cdb_len = scsi_command_size(cmd->t_task_cdb); + +	memcpy(cdb, cmd->t_task_cdb, cdb_len); +	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { +		unsigned long long lba = task->task_lba; +		u32 sectors = task->task_sectors; + +		switch (cdb_len) { +		case 6: +			/* 21-bit LBA and 8-bit sectors */ +			cdb[1] = (lba >> 16) & 0x1f; +			cdb[2] = (lba >> 8) & 0xff; +			cdb[3] = lba & 0xff; +			cdb[4] = sectors & 0xff; +			break; +		case 10: +			/* 32-bit LBA and 16-bit sectors */ +			put_unaligned_be32(lba, &cdb[2]); +			put_unaligned_be16(sectors, &cdb[7]); +			break; +		case 12: +			/* 32-bit LBA and 32-bit sectors */ +			put_unaligned_be32(lba, &cdb[2]); +			put_unaligned_be32(sectors, &cdb[6]); +			break; +		case 16: +			/* 64-bit LBA and 32-bit sectors */ +			put_unaligned_be64(lba, &cdb[2]); +			put_unaligned_be32(sectors, &cdb[10]); +			break; +		case 32: +			/* 64-bit LBA and 32-bit sectors, extended CDB */ +			put_unaligned_be64(lba, &cdb[12]); +			put_unaligned_be32(sectors, &cdb[28]); +			break; +		default: +			BUG(); +		} +	} +} +EXPORT_SYMBOL(target_get_task_cdb); diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index b2575d8568c..e0c1e8a8dd4 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -23,7 +23,6 @@  #include <linux/module.h>  #include <linux/moduleparam.h> -#include <linux/version.h>  #include <generated/utsrelease.h>  #include <linux/utsname.h>  #include <linux/init.h> @@ -133,14 +132,6 @@ static struct config_group *target_core_register_fabric(  	pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"  			" %s\n", group, name);  	/* -	 * Ensure that TCM subsystem plugins are loaded at this point for -	 * using the RAMDISK_DR virtual LUN 0 and all other struct se_port -	 * LUN symlinks. -	 */ -	if (transport_subsystem_check_init() < 0) -		return ERR_PTR(-EINVAL); - -	/*  	 * Below are some hardcoded request_module() calls to automatically  	 * local fabric modules when the following is called:  	 * @@ -725,9 +716,6 @@ SE_DEV_ATTR_RO(hw_queue_depth);  DEF_DEV_ATTRIB(queue_depth);  SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR); -DEF_DEV_ATTRIB(task_timeout); -SE_DEV_ATTR(task_timeout, S_IRUGO | S_IWUSR); -  DEF_DEV_ATTRIB(max_unmap_lba_count);  SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR); @@ -761,7 +749,6 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {  	&target_core_dev_attrib_optimal_sectors.attr,  	&target_core_dev_attrib_hw_queue_depth.attr,  	&target_core_dev_attrib_queue_depth.attr, -	&target_core_dev_attrib_task_timeout.attr,  	&target_core_dev_attrib_max_unmap_lba_count.attr,  	&target_core_dev_attrib_max_unmap_block_desc_count.attr,  	&target_core_dev_attrib_unmap_granularity.attr, @@ -3080,8 +3067,7 @@ static struct config_group *target_core_call_addhbatotarget(  	/*  	 * Load up TCM subsystem plugins if they have not already been loaded.  	 */ -	if (transport_subsystem_check_init() < 0) -		return ERR_PTR(-EINVAL); +	transport_subsystem_check_init();  	hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);  	if (IS_ERR(hba)) diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index b38b6c993e6..f870c3bcfd8 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -472,9 +472,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)  	struct se_dev_entry *deve;  	u32 i; -	spin_lock_bh(&tpg->acl_node_lock); +	spin_lock_irq(&tpg->acl_node_lock);  	list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { -		spin_unlock_bh(&tpg->acl_node_lock); +		spin_unlock_irq(&tpg->acl_node_lock);  		spin_lock_irq(&nacl->device_list_lock);  		for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { @@ -491,9 +491,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)  		}  		spin_unlock_irq(&nacl->device_list_lock); -		spin_lock_bh(&tpg->acl_node_lock); +		spin_lock_irq(&tpg->acl_node_lock);  	} -	spin_unlock_bh(&tpg->acl_node_lock); +	spin_unlock_irq(&tpg->acl_node_lock);  }  static struct se_port *core_alloc_port(struct se_device *dev) @@ -839,6 +839,24 @@ int se_dev_check_shutdown(struct se_device *dev)  	return ret;  } +u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) +{ +	u32 tmp, aligned_max_sectors; +	/* +	 * Limit max_sectors to a PAGE_SIZE aligned value for modern +	 * transport_allocate_data_tasks() operation. +	 */ +	tmp = rounddown((max_sectors * block_size), PAGE_SIZE); +	aligned_max_sectors = (tmp / block_size); +	if (max_sectors != aligned_max_sectors) { +		printk(KERN_INFO "Rounding down aligned max_sectors from %u" +				" to %u\n", max_sectors, aligned_max_sectors); +		return aligned_max_sectors; +	} + +	return max_sectors; +} +  void se_dev_set_default_attribs(  	struct se_device *dev,  	struct se_dev_limits *dev_limits) @@ -878,6 +896,11 @@ void se_dev_set_default_attribs(  	 * max_sectors is based on subsystem plugin dependent requirements.  	 */  	dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; +	/* +	 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() +	 */ +	limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors, +						limits->logical_block_size);  	dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;  	/*  	 * Set optimal_sectors from max_sectors, which can be lowered via @@ -891,21 +914,6 @@ void se_dev_set_default_attribs(  	dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;  } -int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) -{ -	if (task_timeout > DA_TASK_TIMEOUT_MAX) { -		pr_err("dev[%p]: Passed task_timeout: %u larger then" -			" DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); -		return -EINVAL; -	} else { -		dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout; -		pr_debug("dev[%p]: Set SE Device task_timeout: %u\n", -			dev, task_timeout); -	} - -	return 0; -} -  int se_dev_set_max_unmap_lba_count(  	struct se_device *dev,  	u32 max_unmap_lba_count) @@ -949,36 +957,24 @@ int se_dev_set_unmap_granularity_alignment(  int se_dev_set_emulate_dpo(struct se_device *dev, int flag)  { -	if ((flag != 0) && (flag != 1)) { +	if (flag != 0 && flag != 1) {  		pr_err("Illegal value %d\n", flag);  		return -EINVAL;  	} -	if (dev->transport->dpo_emulated == NULL) { -		pr_err("dev->transport->dpo_emulated is NULL\n"); -		return -EINVAL; -	} -	if (dev->transport->dpo_emulated(dev) == 0) { -		pr_err("dev->transport->dpo_emulated not supported\n"); -		return -EINVAL; -	} -	dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag; -	pr_debug("dev[%p]: SE Device Page Out (DPO) Emulation" -			" bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo); -	return 0; + +	pr_err("dpo_emulated not supported\n"); +	return -EINVAL;  }  int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)  { -	if ((flag != 0) && (flag != 1)) { +	if (flag != 0 && flag != 1) {  		pr_err("Illegal value %d\n", flag);  		return -EINVAL;  	} -	if (dev->transport->fua_write_emulated == NULL) { -		pr_err("dev->transport->fua_write_emulated is NULL\n"); -		return -EINVAL; -	} -	if (dev->transport->fua_write_emulated(dev) == 0) { -		pr_err("dev->transport->fua_write_emulated not supported\n"); + +	if (dev->transport->fua_write_emulated == 0) { +		pr_err("fua_write_emulated not supported\n");  		return -EINVAL;  	}  	dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag; @@ -989,36 +985,23 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)  int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)  { -	if ((flag != 0) && (flag != 1)) { +	if (flag != 0 && flag != 1) {  		pr_err("Illegal value %d\n", flag);  		return -EINVAL;  	} -	if (dev->transport->fua_read_emulated == NULL) { -		pr_err("dev->transport->fua_read_emulated is NULL\n"); -		return -EINVAL; -	} -	if (dev->transport->fua_read_emulated(dev) == 0) { -		pr_err("dev->transport->fua_read_emulated not supported\n"); -		return -EINVAL; -	} -	dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag; -	pr_debug("dev[%p]: SE Device Forced Unit Access READs: %d\n", -			dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read); -	return 0; + +	pr_err("ua read emulated not supported\n"); +	return -EINVAL;  }  int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)  { -	if ((flag != 0) && (flag != 1)) { +	if (flag != 0 && flag != 1) {  		pr_err("Illegal value %d\n", flag);  		return -EINVAL;  	} -	if (dev->transport->write_cache_emulated == NULL) { -		pr_err("dev->transport->write_cache_emulated is NULL\n"); -		return -EINVAL; -	} -	if (dev->transport->write_cache_emulated(dev) == 0) { -		pr_err("dev->transport->write_cache_emulated not supported\n"); +	if (dev->transport->write_cache_emulated == 0) { +		pr_err("write_cache_emulated not supported\n");  		return -EINVAL;  	}  	dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag; @@ -1242,6 +1225,11 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)  			return -EINVAL;  		}  	} +	/* +	 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() +	 */ +	max_sectors = se_dev_align_max_sectors(max_sectors, +				dev->se_sub_dev->se_dev_attrib.block_size);  	dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;  	pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", @@ -1344,15 +1332,17 @@ struct se_lun *core_dev_add_lun(  	 */  	if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {  		struct se_node_acl *acl; -		spin_lock_bh(&tpg->acl_node_lock); +		spin_lock_irq(&tpg->acl_node_lock);  		list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { -			if (acl->dynamic_node_acl) { -				spin_unlock_bh(&tpg->acl_node_lock); +			if (acl->dynamic_node_acl && +			    (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || +			     !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { +				spin_unlock_irq(&tpg->acl_node_lock);  				core_tpg_add_node_to_devs(acl, tpg); -				spin_lock_bh(&tpg->acl_node_lock); +				spin_lock_irq(&tpg->acl_node_lock);  			}  		} -		spin_unlock_bh(&tpg->acl_node_lock); +		spin_unlock_irq(&tpg->acl_node_lock);  	}  	return lun_p; diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index f1654694f4e..09b6f8729f9 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -22,7 +22,6 @@  #include <linux/module.h>  #include <linux/moduleparam.h> -#include <linux/version.h>  #include <generated/utsrelease.h>  #include <linux/utsname.h>  #include <linux/init.h> @@ -481,7 +480,7 @@ static struct config_group *target_fabric_make_nodeacl(  	se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name);  	if (IS_ERR(se_nacl)) -		return ERR_PTR(PTR_ERR(se_nacl)); +		return ERR_CAST(se_nacl);  	nacl_cg = &se_nacl->acl_group;  	nacl_cg->default_groups = se_nacl->acl_default_groups; diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c index c4ea3a9a555..39f021b855e 100644 --- a/drivers/target/target_core_fabric_lib.c +++ b/drivers/target/target_core_fabric_lib.c @@ -63,6 +63,7 @@ u32 sas_get_pr_transport_id(  	unsigned char *buf)  {  	unsigned char *ptr; +	int ret;  	/*  	 * Set PROTOCOL IDENTIFIER to 6h for SAS @@ -74,7 +75,9 @@ u32 sas_get_pr_transport_id(  	 */  	ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */ -	hex2bin(&buf[4], ptr, 8); +	ret = hex2bin(&buf[4], ptr, 8); +	if (ret < 0) +		pr_debug("sas transport_id: invalid hex string\n");  	/*  	 * The SAS Transport ID is a hardcoded 24-byte length @@ -156,8 +159,9 @@ u32 fc_get_pr_transport_id(  	unsigned char *buf)  {  	unsigned char *ptr; -	int i; +	int i, ret;  	u32 off = 8; +  	/*  	 * PROTOCOL IDENTIFIER is 0h for FCP-2  	 * @@ -174,7 +178,9 @@ u32 fc_get_pr_transport_id(  			i++;  			continue;  		} -		hex2bin(&buf[off++], &ptr[i], 1); +		ret = hex2bin(&buf[off++], &ptr[i], 1); +		if (ret < 0) +			pr_debug("fc transport_id: invalid hex string\n");  		i += 2;  	}  	/* diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index bc1b33639b8..19a0be9c657 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -26,7 +26,6 @@   *   ******************************************************************************/ -#include <linux/version.h>  #include <linux/string.h>  #include <linux/parser.h>  #include <linux/timer.h> @@ -273,13 +272,14 @@ fd_alloc_task(unsigned char *cdb)  static int fd_do_readv(struct se_task *task)  {  	struct fd_request *req = FILE_REQ(task); -	struct fd_dev *dev = req->fd_task.se_dev->dev_ptr; +	struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev; +	struct fd_dev *dev = se_dev->dev_ptr;  	struct file *fd = dev->fd_file;  	struct scatterlist *sg = task->task_sg;  	struct iovec *iov;  	mm_segment_t old_fs;  	loff_t pos = (task->task_lba * -		      task->se_dev->se_sub_dev->se_dev_attrib.block_size); +		      se_dev->se_sub_dev->se_dev_attrib.block_size);  	int ret = 0, i;  	iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); @@ -325,13 +325,14 @@ static int fd_do_readv(struct se_task *task)  static int fd_do_writev(struct se_task *task)  {  	struct fd_request *req = FILE_REQ(task); -	struct fd_dev *dev = req->fd_task.se_dev->dev_ptr; +	struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev; +	struct fd_dev *dev = se_dev->dev_ptr;  	struct file *fd = dev->fd_file;  	struct scatterlist *sg = task->task_sg;  	struct iovec *iov;  	mm_segment_t old_fs;  	loff_t pos = (task->task_lba * -		      task->se_dev->se_sub_dev->se_dev_attrib.block_size); +		      se_dev->se_sub_dev->se_dev_attrib.block_size);  	int ret, i = 0;  	iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); @@ -399,33 +400,6 @@ static void fd_emulate_sync_cache(struct se_task *task)  }  /* - * Tell TCM Core that we are capable of WriteCache emulation for - * an underlying struct se_device. - */ -static int fd_emulated_write_cache(struct se_device *dev) -{ -	return 1; -} - -static int fd_emulated_dpo(struct se_device *dev) -{ -	return 0; -} -/* - * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs - * for TYPE_DISK. - */ -static int fd_emulated_fua_write(struct se_device *dev) -{ -	return 1; -} - -static int fd_emulated_fua_read(struct se_device *dev) -{ -	return 0; -} - -/*   * WRITE Force Unit Access (FUA) emulation on a per struct se_task   * LBA range basis..   */ @@ -608,17 +582,6 @@ static ssize_t fd_show_configfs_dev_params(  	return bl;  } -/*	fd_get_cdb(): (Part of se_subsystem_api_t template) - * - * - */ -static unsigned char *fd_get_cdb(struct se_task *task) -{ -	struct fd_request *req = FILE_REQ(task); - -	return req->fd_scsi_cdb; -} -  /*	fd_get_device_rev(): (Part of se_subsystem_api_t template)   *   * @@ -650,15 +613,13 @@ static struct se_subsystem_api fileio_template = {  	.name			= "fileio",  	.owner			= THIS_MODULE,  	.transport_type		= TRANSPORT_PLUGIN_VHBA_PDEV, +	.write_cache_emulated	= 1, +	.fua_write_emulated	= 1,  	.attach_hba		= fd_attach_hba,  	.detach_hba		= fd_detach_hba,  	.allocate_virtdevice	= fd_allocate_virtdevice,  	.create_virtdevice	= fd_create_virtdevice,  	.free_device		= fd_free_device, -	.dpo_emulated		= fd_emulated_dpo, -	.fua_write_emulated	= fd_emulated_fua_write, -	.fua_read_emulated	= fd_emulated_fua_read, -	.write_cache_emulated	= fd_emulated_write_cache,  	.alloc_task		= fd_alloc_task,  	.do_task		= fd_do_task,  	.do_sync_cache		= fd_emulate_sync_cache, @@ -666,7 +627,6 @@ static struct se_subsystem_api fileio_template = {  	.check_configfs_dev_params = fd_check_configfs_dev_params,  	.set_configfs_dev_params = fd_set_configfs_dev_params,  	.show_configfs_dev_params = fd_show_configfs_dev_params, -	.get_cdb		= fd_get_cdb,  	.get_device_rev		= fd_get_device_rev,  	.get_device_type	= fd_get_device_type,  	.get_blocks		= fd_get_blocks, diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index daebd710b89..59e6e73106c 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h @@ -14,9 +14,7 @@  struct fd_request {  	struct se_task	fd_task; -	/* SCSI CDB from iSCSI Command PDU */ -	unsigned char	fd_scsi_cdb[TCM_MAX_COMMAND_SIZE]; -} ____cacheline_aligned; +};  #define FBDF_HAS_PATH		0x01  #define FBDF_HAS_SIZE		0x02 diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 7e123410544..41ad02b5fb8 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -27,7 +27,6 @@   *   ******************************************************************************/ -#include <linux/version.h>  #include <linux/string.h>  #include <linux/parser.h>  #include <linux/timer.h> @@ -314,104 +313,42 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(  	return blocks_long;  } +static void iblock_end_io_flush(struct bio *bio, int err) +{ +	struct se_cmd *cmd = bio->bi_private; + +	if (err) +		pr_err("IBLOCK: cache flush failed: %d\n", err); + +	if (cmd) +		transport_complete_sync_cache(cmd, err == 0); +	bio_put(bio); +} +  /* - * Emulate SYCHRONIZE_CACHE_* + * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must + * always flush the whole cache.   */  static void iblock_emulate_sync_cache(struct se_task *task)  {  	struct se_cmd *cmd = task->task_se_cmd;  	struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;  	int immed = (cmd->t_task_cdb[1] & 0x2); -	sector_t error_sector; -	int ret; +	struct bio *bio;  	/*  	 * If the Immediate bit is set, queue up the GOOD response -	 * for this SYNCHRONIZE_CACHE op +	 * for this SYNCHRONIZE_CACHE op.  	 */  	if (immed)  		transport_complete_sync_cache(cmd, 1); -	/* -	 * blkdev_issue_flush() does not support a specifying a range, so -	 * we have to flush the entire cache. -	 */ -	ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector); -	if (ret != 0) { -		pr_err("IBLOCK: block_issue_flush() failed: %d " -			" error_sector: %llu\n", ret, -			(unsigned long long)error_sector); -	} - +	bio = bio_alloc(GFP_KERNEL, 0); +	bio->bi_end_io = iblock_end_io_flush; +	bio->bi_bdev = ib_dev->ibd_bd;  	if (!immed) -		transport_complete_sync_cache(cmd, ret == 0); -} - -/* - * Tell TCM Core that we are capable of WriteCache emulation for - * an underlying struct se_device. - */ -static int iblock_emulated_write_cache(struct se_device *dev) -{ -	return 1; -} - -static int iblock_emulated_dpo(struct se_device *dev) -{ -	return 0; -} - -/* - * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs - * for TYPE_DISK. - */ -static int iblock_emulated_fua_write(struct se_device *dev) -{ -	return 1; -} - -static int iblock_emulated_fua_read(struct se_device *dev) -{ -	return 0; -} - -static int iblock_do_task(struct se_task *task) -{ -	struct se_device *dev = task->task_se_cmd->se_dev; -	struct iblock_req *req = IBLOCK_REQ(task); -	struct bio *bio = req->ib_bio, *nbio = NULL; -	struct blk_plug plug; -	int rw; - -	if (task->task_data_direction == DMA_TO_DEVICE) { -		/* -		 * Force data to disk if we pretend to not have a volatile -		 * write cache, or the initiator set the Force Unit Access bit. -		 */ -		if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || -		    (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && -		     task->task_se_cmd->t_tasks_fua)) -			rw = WRITE_FUA; -		else -			rw = WRITE; -	} else { -		rw = READ; -	} - -	blk_start_plug(&plug); -	while (bio) { -		nbio = bio->bi_next; -		bio->bi_next = NULL; -		pr_debug("Calling submit_bio() task: %p bio: %p" -			" bio->bi_sector: %llu\n", task, bio, -			 (unsigned long long)bio->bi_sector); - -		submit_bio(rw, bio); -		bio = nbio; -	} -	blk_finish_plug(&plug); - -	return PYX_TRANSPORT_SENT_TO_TRANSPORT; +		bio->bi_private = cmd; +	submit_bio(WRITE_FLUSH, bio);  }  static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) @@ -425,20 +362,7 @@ static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)  static void iblock_free_task(struct se_task *task)  { -	struct iblock_req *req = IBLOCK_REQ(task); -	struct bio *bio, *hbio = req->ib_bio; -	/* -	 * We only release the bio(s) here if iblock_bio_done() has not called -	 * bio_put() -> iblock_bio_destructor(). -	 */ -	while (hbio != NULL) { -		bio = hbio; -		hbio = hbio->bi_next; -		bio->bi_next = NULL; -		bio_put(bio); -	} - -	kfree(req); +	kfree(IBLOCK_REQ(task));  }  enum { @@ -552,25 +476,21 @@ static ssize_t iblock_show_configfs_dev_params(  static void iblock_bio_destructor(struct bio *bio)  {  	struct se_task *task = bio->bi_private; -	struct iblock_dev *ib_dev = task->se_dev->dev_ptr; +	struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;  	bio_free(bio, ib_dev->ibd_bio_set);  } -static struct bio *iblock_get_bio( -	struct se_task *task, -	struct iblock_req *ib_req, -	struct iblock_dev *ib_dev, -	int *ret, -	sector_t lba, -	u32 sg_num) +static struct bio * +iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)  { +	struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; +	struct iblock_req *ib_req = IBLOCK_REQ(task);  	struct bio *bio;  	bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);  	if (!bio) {  		pr_err("Unable to allocate memory for bio\n"); -		*ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;  		return NULL;  	} @@ -591,17 +511,33 @@ static struct bio *iblock_get_bio(  	return bio;  } -static int iblock_map_data_SG(struct se_task *task) +static int iblock_do_task(struct se_task *task)  {  	struct se_cmd *cmd = task->task_se_cmd;  	struct se_device *dev = cmd->se_dev; -	struct iblock_dev *ib_dev = task->se_dev->dev_ptr; -	struct iblock_req *ib_req = IBLOCK_REQ(task); -	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; +	struct bio *bio; +	struct bio_list list;  	struct scatterlist *sg; -	int ret = 0;  	u32 i, sg_num = task->task_sg_nents;  	sector_t block_lba; +	struct blk_plug plug; +	int rw; + +	if (task->task_data_direction == DMA_TO_DEVICE) { +		/* +		 * Force data to disk if we pretend to not have a volatile +		 * write cache, or the initiator set the Force Unit Access bit. +		 */ +		if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || +		    (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && +		     task->task_se_cmd->t_tasks_fua)) +			rw = WRITE_FUA; +		else +			rw = WRITE; +	} else { +		rw = READ; +	} +  	/*  	 * Do starting conversion up from non 512-byte blocksize with  	 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. @@ -620,68 +556,43 @@ static int iblock_map_data_SG(struct se_task *task)  		return PYX_TRANSPORT_LU_COMM_FAILURE;  	} -	bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num); +	bio = iblock_get_bio(task, block_lba, sg_num);  	if (!bio) -		return ret; +		return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; -	ib_req->ib_bio = bio; -	hbio = tbio = bio; -	/* -	 * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist -	 * from task->task_sg -> struct scatterlist memory. -	 */ -	for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { -		pr_debug("task: %p bio: %p Calling bio_add_page(): page:" -			" %p len: %u offset: %u\n", task, bio, sg_page(sg), -				sg->length, sg->offset); -again: -		ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset); -		if (ret != sg->length) { - -			pr_debug("*** Set bio->bi_sector: %llu\n", -				 (unsigned long long)bio->bi_sector); -			pr_debug("** task->task_size: %u\n", -					task->task_size); -			pr_debug("*** bio->bi_max_vecs: %u\n", -					bio->bi_max_vecs); -			pr_debug("*** bio->bi_vcnt: %u\n", -					bio->bi_vcnt); +	bio_list_init(&list); +	bio_list_add(&list, bio); -			bio = iblock_get_bio(task, ib_req, ib_dev, &ret, -						block_lba, sg_num); +	for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { +		/* +		 * XXX: if the length the device accepts is shorter than the +		 *	length of the S/G list entry this will cause and +		 *	endless loop.  Better hope no driver uses huge pages. +		 */ +		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) +				!= sg->length) { +			bio = iblock_get_bio(task, block_lba, sg_num);  			if (!bio)  				goto fail; - -			tbio = tbio->bi_next = bio; -			pr_debug("-----------------> Added +1 bio: %p to" -				" list, Going to again\n", bio); -			goto again; +			bio_list_add(&list, bio);  		} +  		/* Always in 512 byte units for Linux/Block */  		block_lba += sg->length >> IBLOCK_LBA_SHIFT;  		sg_num--; -		pr_debug("task: %p bio-add_page() passed!, decremented" -			" sg_num to %u\n", task, sg_num); -		pr_debug("task: %p bio_add_page() passed!, increased lba" -			 " to %llu\n", task, (unsigned long long)block_lba); -		pr_debug("task: %p bio_add_page() passed!, bio->bi_vcnt:" -				" %u\n", task, bio->bi_vcnt);  	} -	return 0; +	blk_start_plug(&plug); +	while ((bio = bio_list_pop(&list))) +		submit_bio(rw, bio); +	blk_finish_plug(&plug); + +	return PYX_TRANSPORT_SENT_TO_TRANSPORT; +  fail: -	while (hbio) { -		bio = hbio; -		hbio = hbio->bi_next; -		bio->bi_next = NULL; +	while ((bio = bio_list_pop(&list)))  		bio_put(bio); -	} -	return ret; -} - -static unsigned char *iblock_get_cdb(struct se_task *task) -{ -	return IBLOCK_REQ(task)->ib_scsi_cdb; +	return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;  }  static u32 iblock_get_device_rev(struct se_device *dev) @@ -707,6 +618,7 @@ static void iblock_bio_done(struct bio *bio, int err)  {  	struct se_task *task = bio->bi_private;  	struct iblock_req *ibr = IBLOCK_REQ(task); +  	/*  	 * Set -EIO if !BIO_UPTODATE and the passed is still err=0  	 */ @@ -721,50 +633,31 @@ static void iblock_bio_done(struct bio *bio, int err)  		 */  		atomic_inc(&ibr->ib_bio_err_cnt);  		smp_mb__after_atomic_inc(); -		bio_put(bio); -		/* -		 * Wait to complete the task until the last bio as completed. -		 */ -		if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) -			return; - -		ibr->ib_bio = NULL; -		transport_complete_task(task, 0); -		return;  	} -	pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", -		 task, bio, task->task_lba, (unsigned long long)bio->bi_sector, err); -	/* -	 * bio_put() will call iblock_bio_destructor() to release the bio back -	 * to ibr->ib_bio_set. -	 */ +  	bio_put(bio); -	/* -	 * Wait to complete the task until the last bio as completed. -	 */ +  	if (!atomic_dec_and_test(&ibr->ib_bio_cnt))  		return; -	/* -	 * Return GOOD status for task if zero ib_bio_err_cnt exists. -	 */ -	ibr->ib_bio = NULL; -	transport_complete_task(task, (!atomic_read(&ibr->ib_bio_err_cnt))); + +	pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", +		 task, bio, task->task_lba, +		 (unsigned long long)bio->bi_sector, err); + +	transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt));  }  static struct se_subsystem_api iblock_template = {  	.name			= "iblock",  	.owner			= THIS_MODULE,  	.transport_type		= TRANSPORT_PLUGIN_VHBA_PDEV, -	.map_data_SG		= iblock_map_data_SG, +	.write_cache_emulated	= 1, +	.fua_write_emulated	= 1,  	.attach_hba		= iblock_attach_hba,  	.detach_hba		= iblock_detach_hba,  	.allocate_virtdevice	= iblock_allocate_virtdevice,  	.create_virtdevice	= iblock_create_virtdevice,  	.free_device		= iblock_free_device, -	.dpo_emulated		= iblock_emulated_dpo, -	.fua_write_emulated	= iblock_emulated_fua_write, -	.fua_read_emulated	= iblock_emulated_fua_read, -	.write_cache_emulated	= iblock_emulated_write_cache,  	.alloc_task		= iblock_alloc_task,  	.do_task		= iblock_do_task,  	.do_discard		= iblock_do_discard, @@ -773,7 +666,6 @@ static struct se_subsystem_api iblock_template = {  	.check_configfs_dev_params = iblock_check_configfs_dev_params,  	.set_configfs_dev_params = iblock_set_configfs_dev_params,  	.show_configfs_dev_params = iblock_show_configfs_dev_params, -	.get_cdb		= iblock_get_cdb,  	.get_device_rev		= iblock_get_device_rev,  	.get_device_type	= iblock_get_device_type,  	.get_blocks		= iblock_get_blocks, diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h index a121cd1b657..5cf1860c10d 100644 --- a/drivers/target/target_core_iblock.h +++ b/drivers/target/target_core_iblock.h @@ -8,10 +8,8 @@  struct iblock_req {  	struct se_task ib_task; -	unsigned char ib_scsi_cdb[TCM_MAX_COMMAND_SIZE];  	atomic_t ib_bio_cnt;  	atomic_t ib_bio_err_cnt; -	struct bio *ib_bio;  } ____cacheline_aligned;  #define IBDF_HAS_UDEV_PATH		0x01 diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 1c1b849cd4f..0c4f783f924 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -25,7 +25,6 @@   *   ******************************************************************************/ -#include <linux/version.h>  #include <linux/slab.h>  #include <linux/spinlock.h>  #include <linux/list.h> @@ -1598,14 +1597,14 @@ static int core_scsi3_decode_spec_i_port(  			 * from the decoded fabric module specific TransportID  			 * at *i_str.  			 */ -			spin_lock_bh(&tmp_tpg->acl_node_lock); +			spin_lock_irq(&tmp_tpg->acl_node_lock);  			dest_node_acl = __core_tpg_get_initiator_node_acl(  						tmp_tpg, i_str);  			if (dest_node_acl) {  				atomic_inc(&dest_node_acl->acl_pr_ref_count);  				smp_mb__after_atomic_inc();  			} -			spin_unlock_bh(&tmp_tpg->acl_node_lock); +			spin_unlock_irq(&tmp_tpg->acl_node_lock);  			if (!dest_node_acl) {  				core_scsi3_tpg_undepend_item(tmp_tpg); @@ -3496,14 +3495,14 @@ after_iport_check:  	/*  	 * Locate the destination struct se_node_acl from the received Transport ID  	 */ -	spin_lock_bh(&dest_se_tpg->acl_node_lock); +	spin_lock_irq(&dest_se_tpg->acl_node_lock);  	dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,  				initiator_str);  	if (dest_node_acl) {  		atomic_inc(&dest_node_acl->acl_pr_ref_count);  		smp_mb__after_atomic_inc();  	} -	spin_unlock_bh(&dest_se_tpg->acl_node_lock); +	spin_unlock_irq(&dest_se_tpg->acl_node_lock);  	if (!dest_node_acl) {  		pr_err("Unable to locate %s dest_node_acl for" diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 2b7b0da9146..dad671dee9e 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -26,7 +26,6 @@   *   ******************************************************************************/ -#include <linux/version.h>  #include <linux/string.h>  #include <linux/parser.h>  #include <linux/timer.h> @@ -567,7 +566,7 @@ static struct se_device *pscsi_create_virtdevice(  			if (IS_ERR(sh)) {  				pr_err("pSCSI: Unable to locate"  					" pdv_host_id: %d\n", pdv->pdv_host_id); -				return (struct se_device *) sh; +				return ERR_CAST(sh);  			}  		}  	} else { @@ -677,7 +676,7 @@ static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task)   */  static int pscsi_transport_complete(struct se_task *task)  { -	struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; +	struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;  	struct scsi_device *sd = pdv->pdv_sd;  	int result;  	struct pscsi_plugin_task *pt = PSCSI_TASK(task); @@ -777,95 +776,6 @@ pscsi_alloc_task(unsigned char *cdb)  	return &pt->pscsi_task;  } -static inline void pscsi_blk_init_request( -	struct se_task *task, -	struct pscsi_plugin_task *pt, -	struct request *req, -	int bidi_read) -{ -	/* -	 * Defined as "scsi command" in include/linux/blkdev.h. -	 */ -	req->cmd_type = REQ_TYPE_BLOCK_PC; -	/* -	 * For the extra BIDI-COMMAND READ struct request we do not -	 * need to setup the remaining structure members -	 */ -	if (bidi_read) -		return; -	/* -	 * Setup the done function pointer for struct request, -	 * also set the end_io_data pointer.to struct se_task. -	 */ -	req->end_io = pscsi_req_done; -	req->end_io_data = task; -	/* -	 * Load the referenced struct se_task's SCSI CDB into -	 * include/linux/blkdev.h:struct request->cmd -	 */ -	req->cmd_len = scsi_command_size(pt->pscsi_cdb); -	req->cmd = &pt->pscsi_cdb[0]; -	/* -	 * Setup pointer for outgoing sense data. -	 */ -	req->sense = &pt->pscsi_sense[0]; -	req->sense_len = 0; -} - -/* - * Used for pSCSI data payloads for all *NON* SCF_SCSI_DATA_SG_IO_CDB -*/ -static int pscsi_blk_get_request(struct se_task *task) -{ -	struct pscsi_plugin_task *pt = PSCSI_TASK(task); -	struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; - -	pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue, -			(task->task_data_direction == DMA_TO_DEVICE), -			GFP_KERNEL); -	if (!pt->pscsi_req || IS_ERR(pt->pscsi_req)) { -		pr_err("PSCSI: blk_get_request() failed: %ld\n", -				IS_ERR(pt->pscsi_req)); -		return PYX_TRANSPORT_LU_COMM_FAILURE; -	} -	/* -	 * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC, -	 * and setup rq callback, CDB and sense. -	 */ -	pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); -	return 0; -} - -/*      pscsi_do_task(): (Part of se_subsystem_api_t template) - * - * - */ -static int pscsi_do_task(struct se_task *task) -{ -	struct pscsi_plugin_task *pt = PSCSI_TASK(task); -	struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; -	/* -	 * Set the struct request->timeout value based on peripheral -	 * device type from SCSI. -	 */ -	if (pdv->pdv_sd->type == TYPE_DISK) -		pt->pscsi_req->timeout = PS_TIMEOUT_DISK; -	else -		pt->pscsi_req->timeout = PS_TIMEOUT_OTHER; - -	pt->pscsi_req->retries = PS_RETRY; -	/* -	 * Queue the struct request into the struct scsi_device->request_queue. -	 * Also check for HEAD_OF_QUEUE SAM TASK attr from received se_cmd -	 * descriptor -	 */ -	blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req, -			(task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG), -			pscsi_req_done); - -	return PYX_TRANSPORT_SENT_TO_TRANSPORT; -} -  static void pscsi_free_task(struct se_task *task)  {  	struct pscsi_plugin_task *pt = PSCSI_TASK(task); @@ -1049,15 +959,12 @@ static inline struct bio *pscsi_get_bio(int sg_num)  	return bio;  } -static int __pscsi_map_SG( -	struct se_task *task, -	struct scatterlist *task_sg, -	u32 task_sg_num, -	int bidi_read) +static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, +		struct bio **hbio)  { -	struct pscsi_plugin_task *pt = PSCSI_TASK(task); -	struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; -	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; +	struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; +	u32 task_sg_num = task->task_sg_nents; +	struct bio *bio = NULL, *tbio = NULL;  	struct page *page;  	struct scatterlist *sg;  	u32 data_len = task->task_size, i, len, bytes, off; @@ -1066,19 +973,8 @@ static int __pscsi_map_SG(  	int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;  	int rw = (task->task_data_direction == DMA_TO_DEVICE); -	if (!task->task_size) -		return 0; -	/* -	 * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup -	 * the bio_vec maplist from task->task_sg -> -	 * struct scatterlist memory.  The struct se_task->task_sg[] currently needs -	 * to be attached to struct bios for submission to Linux/SCSI using -	 * struct request to struct scsi_device->request_queue. -	 * -	 * Note that this will be changing post v2.6.28 as Target_Core_Mod/pSCSI -	 * is ported to upstream SCSI passthrough functionality that accepts -	 * struct scatterlist->page_link or struct page as a paraemeter. -	 */ +	*hbio = NULL; +  	pr_debug("PSCSI: nr_pages: %d\n", nr_pages);  	for_each_sg(task_sg, sg, task_sg_num, i) { @@ -1115,8 +1011,8 @@ static int __pscsi_map_SG(  				 * bios need to be added to complete a given  				 * struct se_task  				 */ -				if (!hbio) -					hbio = tbio = bio; +				if (!*hbio) +					*hbio = tbio = bio;  				else  					tbio = tbio->bi_next = bio;  			} @@ -1152,92 +1048,82 @@ static int __pscsi_map_SG(  			off = 0;  		}  	} -	/* -	 * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND -	 * primary SCSI WRITE poayload mapped for struct se_task->task_sg[] -	 */ -	if (!bidi_read) { -		/* -		 * Starting with v2.6.31, call blk_make_request() passing in *hbio to -		 * allocate the pSCSI task a struct request. -		 */ -		pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue, -					hbio, GFP_KERNEL); -		if (!pt->pscsi_req) { -			pr_err("pSCSI: blk_make_request() failed\n"); -			goto fail; -		} -		/* -		 * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC, -		 * and setup rq callback, CDB and sense. -		 */ -		pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); - -		return task->task_sg_nents; -	} -	/* -	 * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND -	 * SCSI READ paylaod mapped for struct se_task->task_sg_bidi[] -	 */ -	pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue, -					hbio, GFP_KERNEL); -	if (!pt->pscsi_req->next_rq) { -		pr_err("pSCSI: blk_make_request() failed for BIDI\n"); -		goto fail; -	} -	pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1);  	return task->task_sg_nents;  fail: -	while (hbio) { -		bio = hbio; -		hbio = hbio->bi_next; +	while (*hbio) { +		bio = *hbio; +		*hbio = (*hbio)->bi_next;  		bio->bi_next = NULL; -		bio_endio(bio, 0); +		bio_endio(bio, 0);	/* XXX: should be error */  	}  	return ret;  } -/* - * pSCSI maps both ->map_control_SG() and ->map_data_SG() to a single call. - */ -static int pscsi_map_SG(struct se_task *task) +static int pscsi_do_task(struct se_task *task)  { +	struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; +	struct pscsi_plugin_task *pt = PSCSI_TASK(task); +	struct request *req; +	struct bio *hbio;  	int ret; -	/* -	 * Setup the main struct request for the task->task_sg[] payload -	 */ +	target_get_task_cdb(task, pt->pscsi_cdb); + +	if (task->task_se_cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { +		req = blk_get_request(pdv->pdv_sd->request_queue, +				(task->task_data_direction == DMA_TO_DEVICE), +				GFP_KERNEL); +		if (!req || IS_ERR(req)) { +			pr_err("PSCSI: blk_get_request() failed: %ld\n", +					req ? IS_ERR(req) : -ENOMEM); +			return PYX_TRANSPORT_LU_COMM_FAILURE; +		} +	} else { +		BUG_ON(!task->task_size); -	ret = __pscsi_map_SG(task, task->task_sg, task->task_sg_nents, 0); -	if (ret >= 0 && task->task_sg_bidi) {  		/* -		 * If present, set up the extra BIDI-COMMAND SCSI READ -		 * struct request and payload. +		 * Setup the main struct request for the task->task_sg[] payload  		 */ -		ret = __pscsi_map_SG(task, task->task_sg_bidi, -					task->task_sg_nents, 1); +		ret = pscsi_map_sg(task, task->task_sg, &hbio); +		if (ret < 0) +			return PYX_TRANSPORT_LU_COMM_FAILURE; + +		req = blk_make_request(pdv->pdv_sd->request_queue, hbio, +				       GFP_KERNEL); +		if (!req) { +			pr_err("pSCSI: blk_make_request() failed\n"); +			goto fail; +		}  	} -	if (ret < 0) -		return PYX_TRANSPORT_LU_COMM_FAILURE; -	return 0; -} +	req->cmd_type = REQ_TYPE_BLOCK_PC; +	req->end_io = pscsi_req_done; +	req->end_io_data = task; +	req->cmd_len = scsi_command_size(pt->pscsi_cdb); +	req->cmd = &pt->pscsi_cdb[0]; +	req->sense = &pt->pscsi_sense[0]; +	req->sense_len = 0; +	if (pdv->pdv_sd->type == TYPE_DISK) +		req->timeout = PS_TIMEOUT_DISK; +	else +		req->timeout = PS_TIMEOUT_OTHER; +	req->retries = PS_RETRY; -static int pscsi_CDB_none(struct se_task *task) -{ -	return pscsi_blk_get_request(task); -} +	blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req, +			(task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG), +			pscsi_req_done); -/*	pscsi_get_cdb(): - * - * - */ -static unsigned char *pscsi_get_cdb(struct se_task *task) -{ -	struct pscsi_plugin_task *pt = PSCSI_TASK(task); +	return PYX_TRANSPORT_SENT_TO_TRANSPORT; -	return pt->pscsi_cdb; +fail: +	while (hbio) { +		struct bio *bio = hbio; +		hbio = hbio->bi_next; +		bio->bi_next = NULL; +		bio_endio(bio, 0);	/* XXX: should be error */ +	} +	return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;  }  /*	pscsi_get_sense_buffer(): @@ -1328,23 +1214,13 @@ static void pscsi_req_done(struct request *req, int uptodate)  	pt->pscsi_resid = req->resid_len;  	pscsi_process_SAM_status(task, pt); -	/* -	 * Release BIDI-READ if present -	 */ -	if (req->next_rq != NULL) -		__blk_put_request(req->q, req->next_rq); -  	__blk_put_request(req->q, req); -	pt->pscsi_req = NULL;  }  static struct se_subsystem_api pscsi_template = {  	.name			= "pscsi",  	.owner			= THIS_MODULE,  	.transport_type		= TRANSPORT_PLUGIN_PHBA_PDEV, -	.cdb_none		= pscsi_CDB_none, -	.map_control_SG		= pscsi_map_SG, -	.map_data_SG		= pscsi_map_SG,  	.attach_hba		= pscsi_attach_hba,  	.detach_hba		= pscsi_detach_hba,  	.pmode_enable_hba	= pscsi_pmode_enable_hba, @@ -1358,7 +1234,6 @@ static struct se_subsystem_api pscsi_template = {  	.check_configfs_dev_params = pscsi_check_configfs_dev_params,  	.set_configfs_dev_params = pscsi_set_configfs_dev_params,  	.show_configfs_dev_params = pscsi_show_configfs_dev_params, -	.get_cdb		= pscsi_get_cdb,  	.get_sense_buffer	= pscsi_get_sense_buffer,  	.get_device_rev		= pscsi_get_device_rev,  	.get_device_type	= pscsi_get_device_type, diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h index ebf4f1ae2c8..fdc17b6aefb 100644 --- a/drivers/target/target_core_pscsi.h +++ b/drivers/target/target_core_pscsi.h @@ -27,7 +27,6 @@ struct pscsi_plugin_task {  	int	pscsi_direction;  	int	pscsi_result;  	u32	pscsi_resid; -	struct request *pscsi_req;  	unsigned char pscsi_cdb[0];  } ____cacheline_aligned; diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 3dd81d24d9a..5158d3846f1 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -27,7 +27,6 @@   *   ******************************************************************************/ -#include <linux/version.h>  #include <linux/string.h>  #include <linux/parser.h>  #include <linux/timer.h> @@ -351,7 +350,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)  static int rd_MEMCPY_read(struct rd_request *req)  {  	struct se_task *task = &req->rd_task; -	struct rd_dev *dev = req->rd_task.se_dev->dev_ptr; +	struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;  	struct rd_dev_sg_table *table;  	struct scatterlist *sg_d, *sg_s;  	void *dst, *src; @@ -390,12 +389,10 @@ static int rd_MEMCPY_read(struct rd_request *req)  				length = req->rd_size;  			dst = sg_virt(&sg_d[i++]) + dst_offset; -			if (!dst) -				BUG(); +			BUG_ON(!dst);  			src = sg_virt(&sg_s[j]) + src_offset; -			if (!src) -				BUG(); +			BUG_ON(!src);  			dst_offset = 0;  			src_offset = length; @@ -415,8 +412,7 @@ static int rd_MEMCPY_read(struct rd_request *req)  				length = req->rd_size;  			dst = sg_virt(&sg_d[i]) + dst_offset; -			if (!dst) -				BUG(); +			BUG_ON(!dst);  			if (sg_d[i].length == length) {  				i++; @@ -425,8 +421,7 @@ static int rd_MEMCPY_read(struct rd_request *req)  				dst_offset = length;  			src = sg_virt(&sg_s[j++]) + src_offset; -			if (!src) -				BUG(); +			BUG_ON(!src);  			src_offset = 0;  			page_end = 1; @@ -471,7 +466,7 @@ static int rd_MEMCPY_read(struct rd_request *req)  static int rd_MEMCPY_write(struct rd_request *req)  {  	struct se_task *task = &req->rd_task; -	struct rd_dev *dev = req->rd_task.se_dev->dev_ptr; +	struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;  	struct rd_dev_sg_table *table;  	struct scatterlist *sg_d, *sg_s;  	void *dst, *src; @@ -510,12 +505,10 @@ static int rd_MEMCPY_write(struct rd_request *req)  				length = req->rd_size;  			src = sg_virt(&sg_s[i++]) + src_offset; -			if (!src) -				BUG(); +			BUG_ON(!src);  			dst = sg_virt(&sg_d[j]) + dst_offset; -			if (!dst) -				BUG(); +			BUG_ON(!dst);  			src_offset = 0;  			dst_offset = length; @@ -535,8 +528,7 @@ static int rd_MEMCPY_write(struct rd_request *req)  				length = req->rd_size;  			src = sg_virt(&sg_s[i]) + src_offset; -			if (!src) -				BUG(); +			BUG_ON(!src);  			if (sg_s[i].length == length) {  				i++; @@ -545,8 +537,7 @@ static int rd_MEMCPY_write(struct rd_request *req)  				src_offset = length;  			dst = sg_virt(&sg_d[j++]) + dst_offset; -			if (!dst) -				BUG(); +			BUG_ON(!dst);  			dst_offset = 0;  			page_end = 1; @@ -590,7 +581,7 @@ static int rd_MEMCPY_write(struct rd_request *req)   */  static int rd_MEMCPY_do_task(struct se_task *task)  { -	struct se_device *dev = task->se_dev; +	struct se_device *dev = task->task_se_cmd->se_dev;  	struct rd_request *req = RD_REQ(task);  	unsigned long long lba;  	int ret; @@ -700,17 +691,6 @@ static ssize_t rd_show_configfs_dev_params(  	return bl;  } -/*	rd_get_cdb(): (Part of se_subsystem_api_t template) - * - * - */ -static unsigned char *rd_get_cdb(struct se_task *task) -{ -	struct rd_request *req = RD_REQ(task); - -	return req->rd_scsi_cdb; -} -  static u32 rd_get_device_rev(struct se_device *dev)  {  	return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ @@ -744,7 +724,6 @@ static struct se_subsystem_api rd_mcp_template = {  	.check_configfs_dev_params = rd_check_configfs_dev_params,  	.set_configfs_dev_params = rd_set_configfs_dev_params,  	.show_configfs_dev_params = rd_show_configfs_dev_params, -	.get_cdb		= rd_get_cdb,  	.get_device_rev		= rd_get_device_rev,  	.get_device_type	= rd_get_device_type,  	.get_blocks		= rd_get_blocks, diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h index 0d027732cd0..784e56a0410 100644 --- a/drivers/target/target_core_rd.h +++ b/drivers/target/target_core_rd.h @@ -22,8 +22,6 @@ void rd_module_exit(void);  struct rd_request {  	struct se_task	rd_task; -	/* SCSI CDB from iSCSI Command PDU */ -	unsigned char	rd_scsi_cdb[TCM_MAX_COMMAND_SIZE];  	/* Offset from start of page */  	u32		rd_offset;  	/* Starting page in Ramdisk for request */ diff --git a/drivers/target/target_core_scdb.c b/drivers/target/target_core_scdb.c deleted file mode 100644 index 72843441d4f..00000000000 --- a/drivers/target/target_core_scdb.c +++ /dev/null @@ -1,105 +0,0 @@ -/******************************************************************************* - * Filename:  target_core_scdb.c - * - * This file contains the generic target engine Split CDB related functions. - * - * Copyright (c) 2004-2005 PyX Technologies, Inc. - * Copyright (c) 2005, 2006, 2007 SBE, Inc. - * Copyright (c) 2007-2010 Rising Tide Systems - * Copyright (c) 2008-2010 Linux-iSCSI.org - * - * Nicholas A. Bellinger <nab@kernel.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - ******************************************************************************/ - -#include <linux/net.h> -#include <linux/string.h> -#include <scsi/scsi.h> -#include <asm/unaligned.h> - -#include <target/target_core_base.h> -#include <target/target_core_transport.h> - -#include "target_core_scdb.h" - -/*	split_cdb_XX_6(): - * - *      21-bit LBA w/ 8-bit SECTORS - */ -void split_cdb_XX_6( -	unsigned long long lba, -	u32 sectors, -	unsigned char *cdb) -{ -	cdb[1] = (lba >> 16) & 0x1f; -	cdb[2] = (lba >> 8) & 0xff; -	cdb[3] = lba & 0xff; -	cdb[4] = sectors & 0xff; -} - -/*	split_cdb_XX_10(): - * - *	32-bit LBA w/ 16-bit SECTORS - */ -void split_cdb_XX_10( -	unsigned long long lba, -	u32 sectors, -	unsigned char *cdb) -{ -	put_unaligned_be32(lba, &cdb[2]); -	put_unaligned_be16(sectors, &cdb[7]); -} - -/*	split_cdb_XX_12(): - * - *	32-bit LBA w/ 32-bit SECTORS - */ -void split_cdb_XX_12( -	unsigned long long lba, -	u32 sectors, -	unsigned char *cdb) -{ -	put_unaligned_be32(lba, &cdb[2]); -	put_unaligned_be32(sectors, &cdb[6]); -} - -/*	split_cdb_XX_16(): - * - *	64-bit LBA w/ 32-bit SECTORS - */ -void split_cdb_XX_16( -	unsigned long long lba, -	u32 sectors, -	unsigned char *cdb) -{ -	put_unaligned_be64(lba, &cdb[2]); -	put_unaligned_be32(sectors, &cdb[10]); -} - -/* - *	split_cdb_XX_32(): - * - * 	64-bit LBA w/ 32-bit SECTORS such as READ_32, WRITE_32 and emulated XDWRITEREAD_32 - */ -void split_cdb_XX_32( -	unsigned long long lba, -	u32 sectors, -	unsigned char *cdb) -{ -	put_unaligned_be64(lba, &cdb[12]); -	put_unaligned_be32(sectors, &cdb[28]); -} diff --git a/drivers/target/target_core_scdb.h b/drivers/target/target_core_scdb.h deleted file mode 100644 index 48e9ccc9585..00000000000 --- a/drivers/target/target_core_scdb.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef TARGET_CORE_SCDB_H -#define TARGET_CORE_SCDB_H - -extern void split_cdb_XX_6(unsigned long long, u32, unsigned char *); -extern void split_cdb_XX_10(unsigned long long, u32, unsigned char *); -extern void split_cdb_XX_12(unsigned long long, u32, unsigned char *); -extern void split_cdb_XX_16(unsigned long long, u32, unsigned char *); -extern void split_cdb_XX_32(unsigned long long, u32, unsigned char *); - -#endif /* TARGET_CORE_SCDB_H */ diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index a8d6e1dee93..874152aed94 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c @@ -32,7 +32,6 @@  #include <linux/delay.h>  #include <linux/timer.h>  #include <linux/string.h> -#include <linux/version.h>  #include <generated/utsrelease.h>  #include <linux/utsname.h>  #include <linux/proc_fs.h> diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 27d4925e51c..570b144a1ed 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -24,7 +24,6 @@   *   ******************************************************************************/ -#include <linux/version.h>  #include <linux/slab.h>  #include <linux/spinlock.h>  #include <linux/list.h> @@ -44,12 +43,12 @@  struct se_tmr_req *core_tmr_alloc_req(  	struct se_cmd *se_cmd,  	void *fabric_tmr_ptr, -	u8 function) +	u8 function, +	gfp_t gfp_flags)  {  	struct se_tmr_req *tmr; -	tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ? -					GFP_ATOMIC : GFP_KERNEL); +	tmr = kmem_cache_zalloc(se_tmr_req_cache, gfp_flags);  	if (!tmr) {  		pr_err("Unable to allocate struct se_tmr_req\n");  		return ERR_PTR(-ENOMEM); @@ -67,15 +66,16 @@ void core_tmr_release_req(  	struct se_tmr_req *tmr)  {  	struct se_device *dev = tmr->tmr_dev; +	unsigned long flags;  	if (!dev) {  		kmem_cache_free(se_tmr_req_cache, tmr);  		return;  	} -	spin_lock_irq(&dev->se_tmr_lock); +	spin_lock_irqsave(&dev->se_tmr_lock, flags);  	list_del(&tmr->tmr_list); -	spin_unlock_irq(&dev->se_tmr_lock); +	spin_unlock_irqrestore(&dev->se_tmr_lock, flags);  	kmem_cache_free(se_tmr_req_cache, tmr);  } @@ -100,54 +100,20 @@ static void core_tmr_handle_tas_abort(  	transport_cmd_finish_abort(cmd, 0);  } -int core_tmr_lun_reset( +static void core_tmr_drain_tmr_list(  	struct se_device *dev,  	struct se_tmr_req *tmr, -	struct list_head *preempt_and_abort_list, -	struct se_cmd *prout_cmd) +	struct list_head *preempt_and_abort_list)  { -	struct se_cmd *cmd, *tcmd; -	struct se_node_acl *tmr_nacl = NULL; -	struct se_portal_group *tmr_tpg = NULL; -	struct se_queue_obj *qobj = &dev->dev_queue_obj; +	LIST_HEAD(drain_tmr_list);  	struct se_tmr_req *tmr_p, *tmr_pp; -	struct se_task *task, *task_tmp; +	struct se_cmd *cmd;  	unsigned long flags; -	int fe_count, tas; -	/* -	 * TASK_ABORTED status bit, this is configurable via ConfigFS -	 * struct se_device attributes.  spc4r17 section 7.4.6 Control mode page -	 * -	 * A task aborted status (TAS) bit set to zero specifies that aborted -	 * tasks shall be terminated by the device server without any response -	 * to the application client. A TAS bit set to one specifies that tasks -	 * aborted by the actions of an I_T nexus other than the I_T nexus on -	 * which the command was received shall be completed with TASK ABORTED -	 * status (see SAM-4). -	 */ -	tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; -	/* -	 * Determine if this se_tmr is coming from a $FABRIC_MOD -	 * or struct se_device passthrough.. -	 */ -	if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { -		tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; -		tmr_tpg = tmr->task_cmd->se_sess->se_tpg; -		if (tmr_nacl && tmr_tpg) { -			pr_debug("LUN_RESET: TMR caller fabric: %s" -				" initiator port %s\n", -				tmr_tpg->se_tpg_tfo->get_fabric_name(), -				tmr_nacl->initiatorname); -		} -	} -	pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", -		(preempt_and_abort_list) ? "Preempt" : "TMR", -		dev->transport->name, tas);  	/*  	 * Release all pending and outgoing TMRs aside from the received  	 * LUN_RESET tmr..  	 */ -	spin_lock_irq(&dev->se_tmr_lock); +	spin_lock_irqsave(&dev->se_tmr_lock, flags);  	list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {  		/*  		 * Allow the received TMR to return with FUNCTION_COMPLETE. @@ -169,29 +135,48 @@ int core_tmr_lun_reset(  		    (core_scsi3_check_cdb_abort_and_preempt(  					preempt_and_abort_list, cmd) != 0))  			continue; -		spin_unlock_irq(&dev->se_tmr_lock); -		spin_lock_irqsave(&cmd->t_state_lock, flags); +		spin_lock(&cmd->t_state_lock);  		if (!atomic_read(&cmd->t_transport_active)) { -			spin_unlock_irqrestore(&cmd->t_state_lock, flags); -			spin_lock_irq(&dev->se_tmr_lock); +			spin_unlock(&cmd->t_state_lock);  			continue;  		}  		if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { -			spin_unlock_irqrestore(&cmd->t_state_lock, flags); -			spin_lock_irq(&dev->se_tmr_lock); +			spin_unlock(&cmd->t_state_lock);  			continue;  		} +		spin_unlock(&cmd->t_state_lock); + +		list_move_tail(&tmr->tmr_list, &drain_tmr_list); +	} +	spin_unlock_irqrestore(&dev->se_tmr_lock, flags); + +	while (!list_empty(&drain_tmr_list)) { +		tmr = list_entry(drain_tmr_list.next, struct se_tmr_req, tmr_list); +		list_del(&tmr->tmr_list); +		cmd = tmr_p->task_cmd; +  		pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"  			" Response: 0x%02x, t_state: %d\n", -			(preempt_and_abort_list) ? "Preempt" : "", tmr_p, -			tmr_p->function, tmr_p->response, cmd->t_state); -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); +			(preempt_and_abort_list) ? "Preempt" : "", tmr, +			tmr->function, tmr->response, cmd->t_state); -		transport_cmd_finish_abort_tmr(cmd); -		spin_lock_irq(&dev->se_tmr_lock); +		transport_cmd_finish_abort(cmd, 1);  	} -	spin_unlock_irq(&dev->se_tmr_lock); +} + +static void core_tmr_drain_task_list( +	struct se_device *dev, +	struct se_cmd *prout_cmd, +	struct se_node_acl *tmr_nacl, +	int tas, +	struct list_head *preempt_and_abort_list) +{ +	LIST_HEAD(drain_task_list); +	struct se_cmd *cmd; +	struct se_task *task, *task_tmp; +	unsigned long flags; +	int fe_count;  	/*  	 * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.  	 * This is following sam4r17, section 5.6 Aborting commands, Table 38 @@ -236,18 +221,28 @@ int core_tmr_lun_reset(  		if (prout_cmd == cmd)  			continue; -		list_del(&task->t_state_list); +		list_move_tail(&task->t_state_list, &drain_task_list);  		atomic_set(&task->task_state_active, 0); -		spin_unlock_irqrestore(&dev->execute_task_lock, flags); +		/* +		 * Remove from task execute list before processing drain_task_list +		 */ +		if (!list_empty(&task->t_execute_list)) +			__transport_remove_task_from_execute_queue(task, dev); +	} +	spin_unlock_irqrestore(&dev->execute_task_lock, flags); + +	while (!list_empty(&drain_task_list)) { +		task = list_entry(drain_task_list.next, struct se_task, t_state_list); +		list_del(&task->t_state_list); +		cmd = task->task_se_cmd; -		spin_lock_irqsave(&cmd->t_state_lock, flags);  		pr_debug("LUN_RESET: %s cmd: %p task: %p" -			" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" -			"def_t_state: %d/%d cdb: 0x%02x\n", +			" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d" +			"cdb: 0x%02x\n",  			(preempt_and_abort_list) ? "Preempt" : "", cmd, task,  			cmd->se_tfo->get_task_tag(cmd), 0,  			cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, -			cmd->deferred_t_state, cmd->t_task_cdb[0]); +			cmd->t_task_cdb[0]);  		pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"  			" t_task_cdbs: %d t_task_cdbs_left: %d"  			" t_task_cdbs_sent: %d -- t_transport_active: %d" @@ -260,35 +255,24 @@ int core_tmr_lun_reset(  			atomic_read(&cmd->t_transport_stop),  			atomic_read(&cmd->t_transport_sent)); -		if (atomic_read(&task->task_active)) { -			atomic_set(&task->task_stop, 1); -			spin_unlock_irqrestore( -				&cmd->t_state_lock, flags); - -			pr_debug("LUN_RESET: Waiting for task: %p to shutdown" -				" for dev: %p\n", task, dev); -			wait_for_completion(&task->task_stop_comp); -			pr_debug("LUN_RESET Completed task: %p shutdown for" -				" dev: %p\n", task, dev); -			spin_lock_irqsave(&cmd->t_state_lock, flags); -			atomic_dec(&cmd->t_task_cdbs_left); +		/* +		 * If the command may be queued onto a workqueue cancel it now. +		 * +		 * This is equivalent to removal from the execute queue in the +		 * loop above, but we do it down here given that +		 * cancel_work_sync may block. +		 */ +		if (cmd->t_state == TRANSPORT_COMPLETE) +			cancel_work_sync(&cmd->work); -			atomic_set(&task->task_active, 0); -			atomic_set(&task->task_stop, 0); -		} else { -			if (atomic_read(&task->task_execute_queue) != 0) -				transport_remove_task_from_execute_queue(task, dev); -		} -		__transport_stop_task_timer(task, &flags); +		spin_lock_irqsave(&cmd->t_state_lock, flags); +		target_stop_task(task, &flags);  		if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { -			spin_unlock_irqrestore( -					&cmd->t_state_lock, flags); +			spin_unlock_irqrestore(&cmd->t_state_lock, flags);  			pr_debug("LUN_RESET: Skipping task: %p, dev: %p for"  				" t_task_cdbs_ex_left: %d\n", task, dev,  				atomic_read(&cmd->t_task_cdbs_ex_left)); - -			spin_lock_irqsave(&dev->execute_task_lock, flags);  			continue;  		}  		fe_count = atomic_read(&cmd->t_fe_count); @@ -298,22 +282,31 @@ int core_tmr_lun_reset(  				" task: %p, t_fe_count: %d dev: %p\n", task,  				fe_count, dev);  			atomic_set(&cmd->t_transport_aborted, 1); -			spin_unlock_irqrestore(&cmd->t_state_lock, -						flags); -			core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); +			spin_unlock_irqrestore(&cmd->t_state_lock, flags); -			spin_lock_irqsave(&dev->execute_task_lock, flags); +			core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);  			continue;  		}  		pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"  			" t_fe_count: %d dev: %p\n", task, fe_count, dev);  		atomic_set(&cmd->t_transport_aborted, 1);  		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); -		spin_lock_irqsave(&dev->execute_task_lock, flags); +		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);  	} -	spin_unlock_irqrestore(&dev->execute_task_lock, flags); +} + +static void core_tmr_drain_cmd_list( +	struct se_device *dev, +	struct se_cmd *prout_cmd, +	struct se_node_acl *tmr_nacl, +	int tas, +	struct list_head *preempt_and_abort_list) +{ +	LIST_HEAD(drain_cmd_list); +	struct se_queue_obj *qobj = &dev->dev_queue_obj; +	struct se_cmd *cmd, *tcmd; +	unsigned long flags;  	/*  	 * Release all commands remaining in the struct se_device cmd queue.  	 * @@ -337,11 +330,26 @@ int core_tmr_lun_reset(  		 */  		if (prout_cmd == cmd)  			continue; +		/* +		 * Skip direct processing of TRANSPORT_FREE_CMD_INTR for +		 * HW target mode fabrics. +		 */ +		spin_lock(&cmd->t_state_lock); +		if (cmd->t_state == TRANSPORT_FREE_CMD_INTR) { +			spin_unlock(&cmd->t_state_lock); +			continue; +		} +		spin_unlock(&cmd->t_state_lock); -		atomic_dec(&cmd->t_transport_queue_active); +		atomic_set(&cmd->t_transport_queue_active, 0);  		atomic_dec(&qobj->queue_cnt); -		list_del(&cmd->se_queue_node); -		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); +		list_move_tail(&cmd->se_queue_node, &drain_cmd_list); +	} +	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + +	while (!list_empty(&drain_cmd_list)) { +		cmd = list_entry(drain_cmd_list.next, struct se_cmd, se_queue_node); +		list_del_init(&cmd->se_queue_node);  		pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:"  			" %d t_fe_count: %d\n", (preempt_and_abort_list) ? @@ -354,9 +362,53 @@ int core_tmr_lun_reset(  		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,  				atomic_read(&cmd->t_fe_count)); -		spin_lock_irqsave(&qobj->cmd_queue_lock, flags);  	} -	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); +} + +int core_tmr_lun_reset( +        struct se_device *dev, +        struct se_tmr_req *tmr, +        struct list_head *preempt_and_abort_list, +        struct se_cmd *prout_cmd) +{ +	struct se_node_acl *tmr_nacl = NULL; +	struct se_portal_group *tmr_tpg = NULL; +	int tas; +        /* +	 * TASK_ABORTED status bit, this is configurable via ConfigFS +	 * struct se_device attributes.  spc4r17 section 7.4.6 Control mode page +	 * +	 * A task aborted status (TAS) bit set to zero specifies that aborted +	 * tasks shall be terminated by the device server without any response +	 * to the application client. A TAS bit set to one specifies that tasks +	 * aborted by the actions of an I_T nexus other than the I_T nexus on +	 * which the command was received shall be completed with TASK ABORTED +	 * status (see SAM-4). +	 */ +	tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; +	/* +	 * Determine if this se_tmr is coming from a $FABRIC_MOD +	 * or struct se_device passthrough.. +	 */ +	if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { +		tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; +		tmr_tpg = tmr->task_cmd->se_sess->se_tpg; +		if (tmr_nacl && tmr_tpg) { +			pr_debug("LUN_RESET: TMR caller fabric: %s" +				" initiator port %s\n", +				tmr_tpg->se_tpg_tfo->get_fabric_name(), +				tmr_nacl->initiatorname); +		} +	} +	pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", +		(preempt_and_abort_list) ? "Preempt" : "TMR", +		dev->transport->name, tas); + +	core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); +	core_tmr_drain_task_list(dev, prout_cmd, tmr_nacl, tas, +				preempt_and_abort_list); +	core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas, +				preempt_and_abort_list);  	/*  	 * Clear any legacy SPC-2 reservation when called during  	 * LOGICAL UNIT RESET @@ -379,3 +431,4 @@ int core_tmr_lun_reset(  			dev->transport->name);  	return 0;  } + diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 4f1ba4c5ef1..49fd0a9b0a5 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -137,15 +137,15 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(  {  	struct se_node_acl *acl; -	spin_lock_bh(&tpg->acl_node_lock); +	spin_lock_irq(&tpg->acl_node_lock);  	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {  		if (!strcmp(acl->initiatorname, initiatorname) &&  		    !acl->dynamic_node_acl) { -			spin_unlock_bh(&tpg->acl_node_lock); +			spin_unlock_irq(&tpg->acl_node_lock);  			return acl;  		}  	} -	spin_unlock_bh(&tpg->acl_node_lock); +	spin_unlock_irq(&tpg->acl_node_lock);  	return NULL;  } @@ -298,13 +298,21 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(  		tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);  		return NULL;  	} +	/* +	 * Here we only create demo-mode MappedLUNs from the active +	 * TPG LUNs if the fabric is not explictly asking for +	 * tpg_check_demo_mode_login_only() == 1. +	 */ +	if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) && +	    (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1)) +		do { ; } while (0); +	else +		core_tpg_add_node_to_devs(acl, tpg); -	core_tpg_add_node_to_devs(acl, tpg); - -	spin_lock_bh(&tpg->acl_node_lock); +	spin_lock_irq(&tpg->acl_node_lock);  	list_add_tail(&acl->acl_list, &tpg->acl_node_list);  	tpg->num_node_acls++; -	spin_unlock_bh(&tpg->acl_node_lock); +	spin_unlock_irq(&tpg->acl_node_lock);  	pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"  		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), @@ -354,7 +362,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(  {  	struct se_node_acl *acl = NULL; -	spin_lock_bh(&tpg->acl_node_lock); +	spin_lock_irq(&tpg->acl_node_lock);  	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);  	if (acl) {  		if (acl->dynamic_node_acl) { @@ -362,7 +370,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(  			pr_debug("%s_TPG[%u] - Replacing dynamic ACL"  				" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),  				tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); -			spin_unlock_bh(&tpg->acl_node_lock); +			spin_unlock_irq(&tpg->acl_node_lock);  			/*  			 * Release the locally allocated struct se_node_acl  			 * because * core_tpg_add_initiator_node_acl() returned @@ -378,10 +386,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(  			" Node %s already exists for TPG %u, ignoring"  			" request.\n",  tpg->se_tpg_tfo->get_fabric_name(),  			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); -		spin_unlock_bh(&tpg->acl_node_lock); +		spin_unlock_irq(&tpg->acl_node_lock);  		return ERR_PTR(-EEXIST);  	} -	spin_unlock_bh(&tpg->acl_node_lock); +	spin_unlock_irq(&tpg->acl_node_lock);  	if (!se_nacl) {  		pr_err("struct se_node_acl pointer is NULL\n"); @@ -418,10 +426,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(  		return ERR_PTR(-EINVAL);  	} -	spin_lock_bh(&tpg->acl_node_lock); +	spin_lock_irq(&tpg->acl_node_lock);  	list_add_tail(&acl->acl_list, &tpg->acl_node_list);  	tpg->num_node_acls++; -	spin_unlock_bh(&tpg->acl_node_lock); +	spin_unlock_irq(&tpg->acl_node_lock);  done:  	pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" @@ -445,14 +453,14 @@ int core_tpg_del_initiator_node_acl(  	struct se_session *sess, *sess_tmp;  	int dynamic_acl = 0; -	spin_lock_bh(&tpg->acl_node_lock); +	spin_lock_irq(&tpg->acl_node_lock);  	if (acl->dynamic_node_acl) {  		acl->dynamic_node_acl = 0;  		dynamic_acl = 1;  	}  	list_del(&acl->acl_list);  	tpg->num_node_acls--; -	spin_unlock_bh(&tpg->acl_node_lock); +	spin_unlock_irq(&tpg->acl_node_lock);  	spin_lock_bh(&tpg->session_lock);  	list_for_each_entry_safe(sess, sess_tmp, @@ -503,21 +511,21 @@ int core_tpg_set_initiator_node_queue_depth(  	struct se_node_acl *acl;  	int dynamic_acl = 0; -	spin_lock_bh(&tpg->acl_node_lock); +	spin_lock_irq(&tpg->acl_node_lock);  	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);  	if (!acl) {  		pr_err("Access Control List entry for %s Initiator"  			" Node %s does not exists for TPG %hu, ignoring"  			" request.\n", tpg->se_tpg_tfo->get_fabric_name(),  			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); -		spin_unlock_bh(&tpg->acl_node_lock); +		spin_unlock_irq(&tpg->acl_node_lock);  		return -ENODEV;  	}  	if (acl->dynamic_node_acl) {  		acl->dynamic_node_acl = 0;  		dynamic_acl = 1;  	} -	spin_unlock_bh(&tpg->acl_node_lock); +	spin_unlock_irq(&tpg->acl_node_lock);  	spin_lock_bh(&tpg->session_lock);  	list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { @@ -533,10 +541,10 @@ int core_tpg_set_initiator_node_queue_depth(  				tpg->se_tpg_tfo->get_fabric_name(), initiatorname);  			spin_unlock_bh(&tpg->session_lock); -			spin_lock_bh(&tpg->acl_node_lock); +			spin_lock_irq(&tpg->acl_node_lock);  			if (dynamic_acl)  				acl->dynamic_node_acl = 1; -			spin_unlock_bh(&tpg->acl_node_lock); +			spin_unlock_irq(&tpg->acl_node_lock);  			return -EEXIST;  		}  		/* @@ -571,10 +579,10 @@ int core_tpg_set_initiator_node_queue_depth(  		if (init_sess)  			tpg->se_tpg_tfo->close_session(init_sess); -		spin_lock_bh(&tpg->acl_node_lock); +		spin_lock_irq(&tpg->acl_node_lock);  		if (dynamic_acl)  			acl->dynamic_node_acl = 1; -		spin_unlock_bh(&tpg->acl_node_lock); +		spin_unlock_irq(&tpg->acl_node_lock);  		return -EINVAL;  	}  	spin_unlock_bh(&tpg->session_lock); @@ -585,15 +593,15 @@ int core_tpg_set_initiator_node_queue_depth(  	if (init_sess)  		tpg->se_tpg_tfo->close_session(init_sess); -	pr_debug("Successfuly changed queue depth to: %d for Initiator" +	pr_debug("Successfully changed queue depth to: %d for Initiator"  		" Node: %s on %s Target Portal Group: %u\n", queue_depth,  		initiatorname, tpg->se_tpg_tfo->get_fabric_name(),  		tpg->se_tpg_tfo->tpg_get_tag(tpg)); -	spin_lock_bh(&tpg->acl_node_lock); +	spin_lock_irq(&tpg->acl_node_lock);  	if (dynamic_acl)  		acl->dynamic_node_acl = 1; -	spin_unlock_bh(&tpg->acl_node_lock); +	spin_unlock_irq(&tpg->acl_node_lock);  	return 0;  } @@ -717,20 +725,20 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)  	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1  	 * in transport_deregister_session().  	 */ -	spin_lock_bh(&se_tpg->acl_node_lock); +	spin_lock_irq(&se_tpg->acl_node_lock);  	list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,  			acl_list) {  		list_del(&nacl->acl_list);  		se_tpg->num_node_acls--; -		spin_unlock_bh(&se_tpg->acl_node_lock); +		spin_unlock_irq(&se_tpg->acl_node_lock);  		core_tpg_wait_for_nacl_pr_ref(nacl);  		core_free_device_list_for_node(nacl, se_tpg);  		se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); -		spin_lock_bh(&se_tpg->acl_node_lock); +		spin_lock_irq(&se_tpg->acl_node_lock);  	} -	spin_unlock_bh(&se_tpg->acl_node_lock); +	spin_unlock_irq(&se_tpg->acl_node_lock);  	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)  		core_tpg_release_virtual_lun0(se_tpg); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 89760329d5d..d7525580448 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -26,7 +26,6 @@   *   ******************************************************************************/ -#include <linux/version.h>  #include <linux/net.h>  #include <linux/delay.h>  #include <linux/string.h> @@ -55,11 +54,11 @@  #include "target_core_alua.h"  #include "target_core_hba.h"  #include "target_core_pr.h" -#include "target_core_scdb.h"  #include "target_core_ua.h"  static int sub_api_initialized; +static struct workqueue_struct *target_completion_wq;  static struct kmem_cache *se_cmd_cache;  static struct kmem_cache *se_sess_cache;  struct kmem_cache *se_tmr_req_cache; @@ -70,30 +69,19 @@ struct kmem_cache *t10_alua_lu_gp_mem_cache;  struct kmem_cache *t10_alua_tg_pt_gp_cache;  struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; -/* Used for transport_dev_get_map_*() */ -typedef int (*map_func_t)(struct se_task *, u32); -  static int transport_generic_write_pending(struct se_cmd *);  static int transport_processing_thread(void *param);  static int __transport_execute_tasks(struct se_device *dev);  static void transport_complete_task_attr(struct se_cmd *cmd); -static int transport_complete_qf(struct se_cmd *cmd);  static void transport_handle_queue_full(struct se_cmd *cmd, -		struct se_device *dev, int (*qf_callback)(struct se_cmd *)); -static void transport_direct_request_timeout(struct se_cmd *cmd); +		struct se_device *dev);  static void transport_free_dev_tasks(struct se_cmd *cmd); -static u32 transport_allocate_tasks(struct se_cmd *cmd, -		unsigned long long starting_lba, -		enum dma_data_direction data_direction, -		struct scatterlist *sgl, unsigned int nents);  static int transport_generic_get_mem(struct se_cmd *cmd); -static int transport_generic_remove(struct se_cmd *cmd, -		int session_reinstatement); -static void transport_release_fe_cmd(struct se_cmd *cmd); -static void transport_remove_cmd_from_queue(struct se_cmd *cmd, -		struct se_queue_obj *qobj); +static void transport_put_cmd(struct se_cmd *cmd); +static void transport_remove_cmd_from_queue(struct se_cmd *cmd);  static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); -static void transport_stop_all_task_timers(struct se_cmd *cmd); +static void transport_generic_request_failure(struct se_cmd *, int, int); +static void target_complete_ok_work(struct work_struct *work);  int init_se_kmem_caches(void)  { @@ -109,7 +97,7 @@ int init_se_kmem_caches(void)  	if (!se_tmr_req_cache) {  		pr_err("kmem_cache_create() for struct se_tmr_req"  				" failed\n"); -		goto out; +		goto out_free_cmd_cache;  	}  	se_sess_cache = kmem_cache_create("se_sess_cache",  			sizeof(struct se_session), __alignof__(struct se_session), @@ -117,14 +105,14 @@ int init_se_kmem_caches(void)  	if (!se_sess_cache) {  		pr_err("kmem_cache_create() for struct se_session"  				" failed\n"); -		goto out; +		goto out_free_tmr_req_cache;  	}  	se_ua_cache = kmem_cache_create("se_ua_cache",  			sizeof(struct se_ua), __alignof__(struct se_ua),  			0, NULL);  	if (!se_ua_cache) {  		pr_err("kmem_cache_create() for struct se_ua failed\n"); -		goto out; +		goto out_free_sess_cache;  	}  	t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",  			sizeof(struct t10_pr_registration), @@ -132,7 +120,7 @@ int init_se_kmem_caches(void)  	if (!t10_pr_reg_cache) {  		pr_err("kmem_cache_create() for struct t10_pr_registration"  				" failed\n"); -		goto out; +		goto out_free_ua_cache;  	}  	t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",  			sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), @@ -140,7 +128,7 @@ int init_se_kmem_caches(void)  	if (!t10_alua_lu_gp_cache) {  		pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"  				" failed\n"); -		goto out; +		goto out_free_pr_reg_cache;  	}  	t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",  			sizeof(struct t10_alua_lu_gp_member), @@ -148,7 +136,7 @@ int init_se_kmem_caches(void)  	if (!t10_alua_lu_gp_mem_cache) {  		pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"  				"cache failed\n"); -		goto out; +		goto out_free_lu_gp_cache;  	}  	t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",  			sizeof(struct t10_alua_tg_pt_gp), @@ -156,7 +144,7 @@ int init_se_kmem_caches(void)  	if (!t10_alua_tg_pt_gp_cache) {  		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"  				"cache failed\n"); -		goto out; +		goto out_free_lu_gp_mem_cache;  	}  	t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(  			"t10_alua_tg_pt_gp_mem_cache", @@ -166,34 +154,41 @@ int init_se_kmem_caches(void)  	if (!t10_alua_tg_pt_gp_mem_cache) {  		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"  				"mem_t failed\n"); -		goto out; +		goto out_free_tg_pt_gp_cache;  	} +	target_completion_wq = alloc_workqueue("target_completion", +					       WQ_MEM_RECLAIM, 0); +	if (!target_completion_wq) +		goto out_free_tg_pt_gp_mem_cache; +  	return 0; + +out_free_tg_pt_gp_mem_cache: +	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); +out_free_tg_pt_gp_cache: +	kmem_cache_destroy(t10_alua_tg_pt_gp_cache); +out_free_lu_gp_mem_cache: +	kmem_cache_destroy(t10_alua_lu_gp_mem_cache); +out_free_lu_gp_cache: +	kmem_cache_destroy(t10_alua_lu_gp_cache); +out_free_pr_reg_cache: +	kmem_cache_destroy(t10_pr_reg_cache); +out_free_ua_cache: +	kmem_cache_destroy(se_ua_cache); +out_free_sess_cache: +	kmem_cache_destroy(se_sess_cache); +out_free_tmr_req_cache: +	kmem_cache_destroy(se_tmr_req_cache); +out_free_cmd_cache: +	kmem_cache_destroy(se_cmd_cache);  out: -	if (se_cmd_cache) -		kmem_cache_destroy(se_cmd_cache); -	if (se_tmr_req_cache) -		kmem_cache_destroy(se_tmr_req_cache); -	if (se_sess_cache) -		kmem_cache_destroy(se_sess_cache); -	if (se_ua_cache) -		kmem_cache_destroy(se_ua_cache); -	if (t10_pr_reg_cache) -		kmem_cache_destroy(t10_pr_reg_cache); -	if (t10_alua_lu_gp_cache) -		kmem_cache_destroy(t10_alua_lu_gp_cache); -	if (t10_alua_lu_gp_mem_cache) -		kmem_cache_destroy(t10_alua_lu_gp_mem_cache); -	if (t10_alua_tg_pt_gp_cache) -		kmem_cache_destroy(t10_alua_tg_pt_gp_cache); -	if (t10_alua_tg_pt_gp_mem_cache) -		kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);  	return -ENOMEM;  }  void release_se_kmem_caches(void)  { +	destroy_workqueue(target_completion_wq);  	kmem_cache_destroy(se_cmd_cache);  	kmem_cache_destroy(se_tmr_req_cache);  	kmem_cache_destroy(se_sess_cache); @@ -234,10 +229,13 @@ void transport_init_queue_obj(struct se_queue_obj *qobj)  }  EXPORT_SYMBOL(transport_init_queue_obj); -static int transport_subsystem_reqmods(void) +void transport_subsystem_check_init(void)  {  	int ret; +	if (sub_api_initialized) +		return; +  	ret = request_module("target_core_iblock");  	if (ret != 0)  		pr_err("Unable to load target_core_iblock\n"); @@ -254,24 +252,8 @@ static int transport_subsystem_reqmods(void)  	if (ret != 0)  		pr_err("Unable to load target_core_stgt\n"); -	return 0; -} - -int transport_subsystem_check_init(void) -{ -	int ret; - -	if (sub_api_initialized) -		return 0; -	/* -	 * Request the loading of known TCM subsystem plugins.. -	 */ -	ret = transport_subsystem_reqmods(); -	if (ret < 0) -		return ret; -  	sub_api_initialized = 1; -	return 0; +	return;  }  struct se_session *transport_init_session(void) @@ -389,17 +371,18 @@ void transport_deregister_session(struct se_session *se_sess)  {  	struct se_portal_group *se_tpg = se_sess->se_tpg;  	struct se_node_acl *se_nacl; +	unsigned long flags;  	if (!se_tpg) {  		transport_free_session(se_sess);  		return;  	} -	spin_lock_bh(&se_tpg->session_lock); +	spin_lock_irqsave(&se_tpg->session_lock, flags);  	list_del(&se_sess->sess_list);  	se_sess->se_tpg = NULL;  	se_sess->fabric_sess_ptr = NULL; -	spin_unlock_bh(&se_tpg->session_lock); +	spin_unlock_irqrestore(&se_tpg->session_lock, flags);  	/*  	 * Determine if we need to do extra work for this initiator node's @@ -407,22 +390,22 @@ void transport_deregister_session(struct se_session *se_sess)  	 */  	se_nacl = se_sess->se_node_acl;  	if (se_nacl) { -		spin_lock_bh(&se_tpg->acl_node_lock); +		spin_lock_irqsave(&se_tpg->acl_node_lock, flags);  		if (se_nacl->dynamic_node_acl) {  			if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(  					se_tpg)) {  				list_del(&se_nacl->acl_list);  				se_tpg->num_node_acls--; -				spin_unlock_bh(&se_tpg->acl_node_lock); +				spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);  				core_tpg_wait_for_nacl_pr_ref(se_nacl);  				core_free_device_list_for_node(se_nacl, se_tpg);  				se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,  						se_nacl); -				spin_lock_bh(&se_tpg->acl_node_lock); +				spin_lock_irqsave(&se_tpg->acl_node_lock, flags);  			}  		} -		spin_unlock_bh(&se_tpg->acl_node_lock); +		spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);  	}  	transport_free_session(se_sess); @@ -437,16 +420,15 @@ EXPORT_SYMBOL(transport_deregister_session);   */  static void transport_all_task_dev_remove_state(struct se_cmd *cmd)  { -	struct se_device *dev; +	struct se_device *dev = cmd->se_dev;  	struct se_task *task;  	unsigned long flags; -	list_for_each_entry(task, &cmd->t_task_list, t_list) { -		dev = task->se_dev; -		if (!dev) -			continue; +	if (!dev) +		return; -		if (atomic_read(&task->task_active)) +	list_for_each_entry(task, &cmd->t_task_list, t_list) { +		if (task->task_flags & TF_ACTIVE)  			continue;  		if (!atomic_read(&task->task_state_active)) @@ -488,8 +470,6 @@ static int transport_cmd_check_stop(  			" == TRUE for ITT: 0x%08x\n", __func__, __LINE__,  			cmd->se_tfo->get_task_tag(cmd)); -		cmd->deferred_t_state = cmd->t_state; -		cmd->t_state = TRANSPORT_DEFERRED_CMD;  		atomic_set(&cmd->t_transport_active, 0);  		if (transport_off == 2)  			transport_all_task_dev_remove_state(cmd); @@ -507,8 +487,6 @@ static int transport_cmd_check_stop(  			" TRUE for ITT: 0x%08x\n", __func__, __LINE__,  			cmd->se_tfo->get_task_tag(cmd)); -		cmd->deferred_t_state = cmd->t_state; -		cmd->t_state = TRANSPORT_DEFERRED_CMD;  		if (transport_off == 2)  			transport_all_task_dev_remove_state(cmd); @@ -593,35 +571,24 @@ check_lun:  void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)  { -	transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); -	transport_lun_remove_cmd(cmd); - -	if (transport_cmd_check_stop_to_fabric(cmd)) -		return; -	if (remove) -		transport_generic_remove(cmd, 0); -} - -void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) -{ -	transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); +	if (!cmd->se_tmr_req) +		transport_lun_remove_cmd(cmd);  	if (transport_cmd_check_stop_to_fabric(cmd))  		return; - -	transport_generic_remove(cmd, 0); +	if (remove) { +		transport_remove_cmd_from_queue(cmd); +		transport_put_cmd(cmd); +	}  } -static void transport_add_cmd_to_queue( -	struct se_cmd *cmd, -	int t_state) +static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state, +		bool at_head)  {  	struct se_device *dev = cmd->se_dev;  	struct se_queue_obj *qobj = &dev->dev_queue_obj;  	unsigned long flags; -	INIT_LIST_HEAD(&cmd->se_queue_node); -  	if (t_state) {  		spin_lock_irqsave(&cmd->t_state_lock, flags);  		cmd->t_state = t_state; @@ -630,15 +597,20 @@ static void transport_add_cmd_to_queue(  	}  	spin_lock_irqsave(&qobj->cmd_queue_lock, flags); -	if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) { -		cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL; + +	/* If the cmd is already on the list, remove it before we add it */ +	if (!list_empty(&cmd->se_queue_node)) +		list_del(&cmd->se_queue_node); +	else +		atomic_inc(&qobj->queue_cnt); + +	if (at_head)  		list_add(&cmd->se_queue_node, &qobj->qobj_list); -	} else +	else  		list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); -	atomic_inc(&cmd->t_transport_queue_active); +	atomic_set(&cmd->t_transport_queue_active, 1);  	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); -	atomic_inc(&qobj->queue_cnt);  	wake_up_interruptible(&qobj->thread_wq);  } @@ -655,19 +627,18 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj)  	}  	cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); -	atomic_dec(&cmd->t_transport_queue_active); +	atomic_set(&cmd->t_transport_queue_active, 0); -	list_del(&cmd->se_queue_node); +	list_del_init(&cmd->se_queue_node);  	atomic_dec(&qobj->queue_cnt);  	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);  	return cmd;  } -static void transport_remove_cmd_from_queue(struct se_cmd *cmd, -		struct se_queue_obj *qobj) +static void transport_remove_cmd_from_queue(struct se_cmd *cmd)  { -	struct se_cmd *t; +	struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;  	unsigned long flags;  	spin_lock_irqsave(&qobj->cmd_queue_lock, flags); @@ -675,14 +646,9 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,  		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);  		return;  	} - -	list_for_each_entry(t, &qobj->qobj_list, se_queue_node) -		if (t == cmd) { -			atomic_dec(&cmd->t_transport_queue_active); -			atomic_dec(&qobj->queue_cnt); -			list_del(&cmd->se_queue_node); -			break; -		} +	atomic_set(&cmd->t_transport_queue_active, 0); +	atomic_dec(&qobj->queue_cnt); +	list_del_init(&cmd->se_queue_node);  	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);  	if (atomic_read(&cmd->t_transport_queue_active)) { @@ -715,6 +681,13 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)  }  EXPORT_SYMBOL(transport_complete_sync_cache); +static void target_complete_failure_work(struct work_struct *work) +{ +	struct se_cmd *cmd = container_of(work, struct se_cmd, work); + +	transport_generic_request_failure(cmd, 1, 1); +} +  /*	transport_complete_task():   *   *	Called from interrupt and non interrupt context depending @@ -723,8 +696,7 @@ EXPORT_SYMBOL(transport_complete_sync_cache);  void transport_complete_task(struct se_task *task, int success)  {  	struct se_cmd *cmd = task->task_se_cmd; -	struct se_device *dev = task->se_dev; -	int t_state; +	struct se_device *dev = cmd->se_dev;  	unsigned long flags;  #if 0  	pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task, @@ -734,7 +706,7 @@ void transport_complete_task(struct se_task *task, int success)  		atomic_inc(&dev->depth_left);  	spin_lock_irqsave(&cmd->t_state_lock, flags); -	atomic_set(&task->task_active, 0); +	task->task_flags &= ~TF_ACTIVE;  	/*  	 * See if any sense data exists, if so set the TASK_SENSE flag. @@ -753,68 +725,39 @@ void transport_complete_task(struct se_task *task, int success)  	 * See if we are waiting for outstanding struct se_task  	 * to complete for an exception condition  	 */ -	if (atomic_read(&task->task_stop)) { -		/* -		 * Decrement cmd->t_se_count if this task had -		 * previously thrown its timeout exception handler. -		 */ -		if (atomic_read(&task->task_timeout)) { -			atomic_dec(&cmd->t_se_count); -			atomic_set(&task->task_timeout, 0); -		} +	if (task->task_flags & TF_REQUEST_STOP) {  		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -  		complete(&task->task_stop_comp);  		return;  	}  	/* -	 * If the task's timeout handler has fired, use the t_task_cdbs_timeout -	 * left counter to determine when the struct se_cmd is ready to be queued to -	 * the processing thread. -	 */ -	if (atomic_read(&task->task_timeout)) { -		if (!atomic_dec_and_test( -				&cmd->t_task_cdbs_timeout_left)) { -			spin_unlock_irqrestore(&cmd->t_state_lock, -				flags); -			return; -		} -		t_state = TRANSPORT_COMPLETE_TIMEOUT; -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); - -		transport_add_cmd_to_queue(cmd, t_state); -		return; -	} -	atomic_dec(&cmd->t_task_cdbs_timeout_left); - -	/*  	 * Decrement the outstanding t_task_cdbs_left count.  The last  	 * struct se_task from struct se_cmd will complete itself into the  	 * device queue depending upon int success.  	 */  	if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { -		if (!success) -			cmd->t_tasks_failed = 1; -  		spin_unlock_irqrestore(&cmd->t_state_lock, flags);  		return;  	}  	if (!success || cmd->t_tasks_failed) { -		t_state = TRANSPORT_COMPLETE_FAILURE;  		if (!task->task_error_status) {  			task->task_error_status =  				PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;  			cmd->transport_error_status =  				PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;  		} +		INIT_WORK(&cmd->work, target_complete_failure_work);  	} else {  		atomic_set(&cmd->t_transport_complete, 1); -		t_state = TRANSPORT_COMPLETE_OK; +		INIT_WORK(&cmd->work, target_complete_ok_work);  	} + +	cmd->t_state = TRANSPORT_COMPLETE; +	atomic_set(&cmd->t_transport_active, 1);  	spin_unlock_irqrestore(&cmd->t_state_lock, flags); -	transport_add_cmd_to_queue(cmd, t_state); +	queue_work(target_completion_wq, &cmd->work);  }  EXPORT_SYMBOL(transport_complete_task); @@ -901,14 +844,12 @@ static void __transport_add_task_to_execute_queue(  static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)  { -	struct se_device *dev; +	struct se_device *dev = cmd->se_dev;  	struct se_task *task;  	unsigned long flags;  	spin_lock_irqsave(&cmd->t_state_lock, flags);  	list_for_each_entry(task, &cmd->t_task_list, t_list) { -		dev = task->se_dev; -  		if (atomic_read(&task->task_state_active))  			continue; @@ -933,38 +874,36 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd)  	spin_lock_irqsave(&dev->execute_task_lock, flags);  	list_for_each_entry(task, &cmd->t_task_list, t_list) { -		if (atomic_read(&task->task_execute_queue)) +		if (!list_empty(&task->t_execute_list))  			continue;  		/*  		 * __transport_add_task_to_execute_queue() handles the  		 * SAM Task Attribute emulation if enabled  		 */  		__transport_add_task_to_execute_queue(task, task_prev, dev); -		atomic_set(&task->task_execute_queue, 1);  		task_prev = task;  	}  	spin_unlock_irqrestore(&dev->execute_task_lock, flags);  } -/*	transport_remove_task_from_execute_queue(): - * - * - */ +void __transport_remove_task_from_execute_queue(struct se_task *task, +		struct se_device *dev) +{ +	list_del_init(&task->t_execute_list); +	atomic_dec(&dev->execute_tasks); +} +  void transport_remove_task_from_execute_queue(  	struct se_task *task,  	struct se_device *dev)  {  	unsigned long flags; -	if (atomic_read(&task->task_execute_queue) == 0) { -		dump_stack(); +	if (WARN_ON(list_empty(&task->t_execute_list)))  		return; -	}  	spin_lock_irqsave(&dev->execute_task_lock, flags); -	list_del(&task->t_execute_list); -	atomic_set(&task->task_execute_queue, 0); -	atomic_dec(&dev->execute_tasks); +	__transport_remove_task_from_execute_queue(task, dev);  	spin_unlock_irqrestore(&dev->execute_task_lock, flags);  } @@ -976,30 +915,26 @@ static void target_qf_do_work(struct work_struct *work)  {  	struct se_device *dev = container_of(work, struct se_device,  					qf_work_queue); +	LIST_HEAD(qf_cmd_list);  	struct se_cmd *cmd, *cmd_tmp;  	spin_lock_irq(&dev->qf_cmd_lock); -	list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) { +	list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); +	spin_unlock_irq(&dev->qf_cmd_lock); +	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {  		list_del(&cmd->se_qf_node);  		atomic_dec(&dev->dev_qf_count);  		smp_mb__after_atomic_dec(); -		spin_unlock_irq(&dev->qf_cmd_lock);  		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"  			" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, -			(cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" : +			(cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :  			(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"  			: "UNKNOWN"); -		/* -		 * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd -		 * has been added to head of queue -		 */ -		transport_add_cmd_to_queue(cmd, cmd->t_state); -		spin_lock_irq(&dev->qf_cmd_lock); +		transport_add_cmd_to_queue(cmd, cmd->t_state, true);  	} -	spin_unlock_irq(&dev->qf_cmd_lock);  }  unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) @@ -1053,41 +988,6 @@ void transport_dump_dev_state(  	*bl += sprintf(b + *bl, "        ");  } -/*	transport_release_all_cmds(): - * - * - */ -static void transport_release_all_cmds(struct se_device *dev) -{ -	struct se_cmd *cmd, *tcmd; -	int bug_out = 0, t_state; -	unsigned long flags; - -	spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); -	list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list, -				se_queue_node) { -		t_state = cmd->t_state; -		list_del(&cmd->se_queue_node); -		spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, -				flags); - -		pr_err("Releasing ITT: 0x%08x, i_state: %u," -			" t_state: %u directly\n", -			cmd->se_tfo->get_task_tag(cmd), -			cmd->se_tfo->get_cmd_state(cmd), t_state); - -		transport_release_fe_cmd(cmd); -		bug_out = 1; - -		spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); -	} -	spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags); -#if 0 -	if (bug_out) -		BUG(); -#endif -} -  void transport_dump_vpd_proto_id(  	struct t10_vpd *vpd,  	unsigned char *p_buf, @@ -1573,7 +1473,6 @@ transport_generic_get_task(struct se_cmd *cmd,  	INIT_LIST_HEAD(&task->t_state_list);  	init_completion(&task->task_stop_comp);  	task->task_se_cmd = cmd; -	task->se_dev = dev;  	task->task_data_direction = data_direction;  	return task; @@ -1598,6 +1497,7 @@ void transport_init_se_cmd(  	INIT_LIST_HEAD(&cmd->se_delayed_node);  	INIT_LIST_HEAD(&cmd->se_ordered_node);  	INIT_LIST_HEAD(&cmd->se_qf_node); +	INIT_LIST_HEAD(&cmd->se_queue_node);  	INIT_LIST_HEAD(&cmd->t_task_list);  	init_completion(&cmd->transport_lun_fe_stop_comp); @@ -1641,21 +1541,6 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)  	return 0;  } -void transport_free_se_cmd( -	struct se_cmd *se_cmd) -{ -	if (se_cmd->se_tmr_req) -		core_tmr_release_req(se_cmd->se_tmr_req); -	/* -	 * Check and free any extended CDB buffer that was allocated -	 */ -	if (se_cmd->t_task_cdb != se_cmd->__t_task_cdb) -		kfree(se_cmd->t_task_cdb); -} -EXPORT_SYMBOL(transport_free_se_cmd); - -static void transport_generic_wait_for_tasks(struct se_cmd *, int, int); -  /*	transport_generic_allocate_tasks():   *   *	Called from fabric RX Thread. @@ -1667,12 +1552,6 @@ int transport_generic_allocate_tasks(  	int ret;  	transport_generic_prepare_cdb(cdb); - -	/* -	 * This is needed for early exceptions. -	 */ -	cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; -  	/*  	 * Ensure that the received CDB is less than the max (252 + 8) bytes  	 * for VARIABLE_LENGTH_CMD @@ -1730,26 +1609,6 @@ int transport_generic_allocate_tasks(  EXPORT_SYMBOL(transport_generic_allocate_tasks);  /* - * Used by fabric module frontends not defining a TFO->new_cmd_map() - * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis - */ -int transport_generic_handle_cdb( -	struct se_cmd *cmd) -{ -	if (!cmd->se_lun) { -		dump_stack(); -		pr_err("cmd->se_lun is NULL\n"); -		return -EINVAL; -	} - -	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD); -	return 0; -} -EXPORT_SYMBOL(transport_generic_handle_cdb); - -static void transport_generic_request_failure(struct se_cmd *, -			struct se_device *, int, int); -/*   * Used by fabric module frontends to queue tasks directly.   * Many only be used from process context only   */ @@ -1773,7 +1632,7 @@ int transport_handle_cdb_direct(  	 * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following  	 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()  	 * in existing usage to ensure that outstanding descriptors are handled -	 * correctly during shutdown via transport_generic_wait_for_tasks() +	 * correctly during shutdown via transport_wait_for_tasks()  	 *  	 * Also, we don't take cmd->t_state_lock here as we only expect  	 * this to be called for initial descriptor submission. @@ -1790,7 +1649,7 @@ int transport_handle_cdb_direct(  		return 0;  	else if (ret < 0) {  		cmd->transport_error_status = ret; -		transport_generic_request_failure(cmd, NULL, 0, +		transport_generic_request_failure(cmd, 0,  				(cmd->data_direction != DMA_TO_DEVICE));  	}  	return 0; @@ -1811,7 +1670,7 @@ int transport_generic_handle_cdb_map(  		return -EINVAL;  	} -	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP); +	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);  	return 0;  }  EXPORT_SYMBOL(transport_generic_handle_cdb_map); @@ -1841,7 +1700,7 @@ int transport_generic_handle_data(  	if (transport_check_aborted_status(cmd, 1) != 0)  		return 0; -	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE); +	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);  	return 0;  }  EXPORT_SYMBOL(transport_generic_handle_data); @@ -1853,12 +1712,7 @@ EXPORT_SYMBOL(transport_generic_handle_data);  int transport_generic_handle_tmr(  	struct se_cmd *cmd)  { -	/* -	 * This is needed for early exceptions. -	 */ -	cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; - -	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR); +	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);  	return 0;  }  EXPORT_SYMBOL(transport_generic_handle_tmr); @@ -1866,10 +1720,36 @@ EXPORT_SYMBOL(transport_generic_handle_tmr);  void transport_generic_free_cmd_intr(  	struct se_cmd *cmd)  { -	transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR); +	transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR, false);  }  EXPORT_SYMBOL(transport_generic_free_cmd_intr); +/* + * If the task is active, request it to be stopped and sleep until it + * has completed. + */ +bool target_stop_task(struct se_task *task, unsigned long *flags) +{ +	struct se_cmd *cmd = task->task_se_cmd; +	bool was_active = false; + +	if (task->task_flags & TF_ACTIVE) { +		task->task_flags |= TF_REQUEST_STOP; +		spin_unlock_irqrestore(&cmd->t_state_lock, *flags); + +		pr_debug("Task %p waiting to complete\n", task); +		wait_for_completion(&task->task_stop_comp); +		pr_debug("Task %p stopped successfully\n", task); + +		spin_lock_irqsave(&cmd->t_state_lock, *flags); +		atomic_dec(&cmd->t_task_cdbs_left); +		task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP); +		was_active = true; +	} + +	return was_active; +} +  static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)  {  	struct se_task *task, *task_tmp; @@ -1885,51 +1765,26 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)  	spin_lock_irqsave(&cmd->t_state_lock, flags);  	list_for_each_entry_safe(task, task_tmp,  				&cmd->t_task_list, t_list) { -		pr_debug("task_no[%d] - Processing task %p\n", -				task->task_no, task); +		pr_debug("Processing task %p\n", task);  		/*  		 * If the struct se_task has not been sent and is not active,  		 * remove the struct se_task from the execution queue.  		 */ -		if (!atomic_read(&task->task_sent) && -		    !atomic_read(&task->task_active)) { +		if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {  			spin_unlock_irqrestore(&cmd->t_state_lock,  					flags);  			transport_remove_task_from_execute_queue(task, -					task->se_dev); +					cmd->se_dev); -			pr_debug("task_no[%d] - Removed from execute queue\n", -				task->task_no); +			pr_debug("Task %p removed from execute queue\n", task);  			spin_lock_irqsave(&cmd->t_state_lock, flags);  			continue;  		} -		/* -		 * If the struct se_task is active, sleep until it is returned -		 * from the plugin. -		 */ -		if (atomic_read(&task->task_active)) { -			atomic_set(&task->task_stop, 1); -			spin_unlock_irqrestore(&cmd->t_state_lock, -					flags); - -			pr_debug("task_no[%d] - Waiting to complete\n", -				task->task_no); -			wait_for_completion(&task->task_stop_comp); -			pr_debug("task_no[%d] - Stopped successfully\n", -				task->task_no); - -			spin_lock_irqsave(&cmd->t_state_lock, flags); -			atomic_dec(&cmd->t_task_cdbs_left); - -			atomic_set(&task->task_active, 0); -			atomic_set(&task->task_stop, 0); -		} else { -			pr_debug("task_no[%d] - Did nothing\n", task->task_no); +		if (!target_stop_task(task, &flags)) { +			pr_debug("Task %p - did nothing\n", task);  			ret++;  		} - -		__transport_stop_task_timer(task, &flags);  	}  	spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -1941,7 +1796,6 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)   */  static void transport_generic_request_failure(  	struct se_cmd *cmd, -	struct se_device *dev,  	int complete,  	int sc)  { @@ -1950,10 +1804,9 @@ static void transport_generic_request_failure(  	pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"  		" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),  		cmd->t_task_cdb[0]); -	pr_debug("-----[ i_state: %d t_state/def_t_state:" -		" %d/%d transport_error_status: %d\n", +	pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n",  		cmd->se_tfo->get_cmd_state(cmd), -		cmd->t_state, cmd->deferred_t_state, +		cmd->t_state,  		cmd->transport_error_status);  	pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"  		" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" @@ -1966,10 +1819,6 @@ static void transport_generic_request_failure(  		atomic_read(&cmd->t_transport_stop),  		atomic_read(&cmd->t_transport_sent)); -	transport_stop_all_task_timers(cmd); - -	if (dev) -		atomic_inc(&dev->depth_left);  	/*  	 * For SAM Task Attribute emulation for failed struct se_cmd  	 */ @@ -1977,7 +1826,6 @@ static void transport_generic_request_failure(  		transport_complete_task_attr(cmd);  	if (complete) { -		transport_direct_request_timeout(cmd);  		cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;  	} @@ -2053,8 +1901,14 @@ static void transport_generic_request_failure(  		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;  		break;  	} - -	if (!sc) +	/* +	 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller, +	 * make the call to transport_send_check_condition_and_sense() +	 * directly.  Otherwise expect the fabric to make the call to +	 * transport_send_check_condition_and_sense() after handling +	 * possible unsoliticied write data payloads. +	 */ +	if (!sc && !cmd->se_tfo->new_cmd_map)  		transport_new_cmd_failure(cmd);  	else {  		ret = transport_send_check_condition_and_sense(cmd, @@ -2070,46 +1924,8 @@ check_stop:  	return;  queue_full: -	cmd->t_state = TRANSPORT_COMPLETE_OK; -	transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); -} - -static void transport_direct_request_timeout(struct se_cmd *cmd) -{ -	unsigned long flags; - -	spin_lock_irqsave(&cmd->t_state_lock, flags); -	if (!atomic_read(&cmd->t_transport_timeout)) { -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		return; -	} -	if (atomic_read(&cmd->t_task_cdbs_timeout_left)) { -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		return; -	} - -	atomic_sub(atomic_read(&cmd->t_transport_timeout), -		   &cmd->t_se_count); -	spin_unlock_irqrestore(&cmd->t_state_lock, flags); -} - -static void transport_generic_request_timeout(struct se_cmd *cmd) -{ -	unsigned long flags; - -	/* -	 * Reset cmd->t_se_count to allow transport_generic_remove() -	 * to allow last call to free memory resources. -	 */ -	spin_lock_irqsave(&cmd->t_state_lock, flags); -	if (atomic_read(&cmd->t_transport_timeout) > 1) { -		int tmp = (atomic_read(&cmd->t_transport_timeout) - 1); - -		atomic_sub(tmp, &cmd->t_se_count); -	} -	spin_unlock_irqrestore(&cmd->t_state_lock, flags); - -	transport_generic_remove(cmd, 0); +	cmd->t_state = TRANSPORT_COMPLETE_QF_OK; +	transport_handle_queue_full(cmd, cmd->se_dev);  }  static inline u32 transport_lba_21(unsigned char *cdb) @@ -2154,127 +1970,6 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)  	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);  } -/* - * Called from interrupt context. - */ -static void transport_task_timeout_handler(unsigned long data) -{ -	struct se_task *task = (struct se_task *)data; -	struct se_cmd *cmd = task->task_se_cmd; -	unsigned long flags; - -	pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd); - -	spin_lock_irqsave(&cmd->t_state_lock, flags); -	if (task->task_flags & TF_STOP) { -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		return; -	} -	task->task_flags &= ~TF_RUNNING; - -	/* -	 * Determine if transport_complete_task() has already been called. -	 */ -	if (!atomic_read(&task->task_active)) { -		pr_debug("transport task: %p cmd: %p timeout task_active" -				" == 0\n", task, cmd); -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		return; -	} - -	atomic_inc(&cmd->t_se_count); -	atomic_inc(&cmd->t_transport_timeout); -	cmd->t_tasks_failed = 1; - -	atomic_set(&task->task_timeout, 1); -	task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; -	task->task_scsi_status = 1; - -	if (atomic_read(&task->task_stop)) { -		pr_debug("transport task: %p cmd: %p timeout task_stop" -				" == 1\n", task, cmd); -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		complete(&task->task_stop_comp); -		return; -	} - -	if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { -		pr_debug("transport task: %p cmd: %p timeout non zero" -				" t_task_cdbs_left\n", task, cmd); -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		return; -	} -	pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", -			task, cmd); - -	cmd->t_state = TRANSPORT_COMPLETE_FAILURE; -	spin_unlock_irqrestore(&cmd->t_state_lock, flags); - -	transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); -} - -/* - * Called with cmd->t_state_lock held. - */ -static void transport_start_task_timer(struct se_task *task) -{ -	struct se_device *dev = task->se_dev; -	int timeout; - -	if (task->task_flags & TF_RUNNING) -		return; -	/* -	 * If the task_timeout is disabled, exit now. -	 */ -	timeout = dev->se_sub_dev->se_dev_attrib.task_timeout; -	if (!timeout) -		return; - -	init_timer(&task->task_timer); -	task->task_timer.expires = (get_jiffies_64() + timeout * HZ); -	task->task_timer.data = (unsigned long) task; -	task->task_timer.function = transport_task_timeout_handler; - -	task->task_flags |= TF_RUNNING; -	add_timer(&task->task_timer); -#if 0 -	pr_debug("Starting task timer for cmd: %p task: %p seconds:" -		" %d\n", task->task_se_cmd, task, timeout); -#endif -} - -/* - * Called with spin_lock_irq(&cmd->t_state_lock) held. - */ -void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) -{ -	struct se_cmd *cmd = task->task_se_cmd; - -	if (!task->task_flags & TF_RUNNING) -		return; - -	task->task_flags |= TF_STOP; -	spin_unlock_irqrestore(&cmd->t_state_lock, *flags); - -	del_timer_sync(&task->task_timer); - -	spin_lock_irqsave(&cmd->t_state_lock, *flags); -	task->task_flags &= ~TF_RUNNING; -	task->task_flags &= ~TF_STOP; -} - -static void transport_stop_all_task_timers(struct se_cmd *cmd) -{ -	struct se_task *task = NULL, *task_tmp; -	unsigned long flags; - -	spin_lock_irqsave(&cmd->t_state_lock, flags); -	list_for_each_entry_safe(task, task_tmp, -				&cmd->t_task_list, t_list) -		__transport_stop_task_timer(task, &flags); -	spin_unlock_irqrestore(&cmd->t_state_lock, flags); -} -  static inline int transport_tcq_window_closed(struct se_device *dev)  {  	if (dev->dev_tcq_window_closed++ < @@ -2379,7 +2074,7 @@ static int transport_execute_tasks(struct se_cmd *cmd)  	if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {  		cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; -		transport_generic_request_failure(cmd, NULL, 0, 1); +		transport_generic_request_failure(cmd, 0, 1);  		return 0;  	} @@ -2442,9 +2137,7 @@ check_depth:  	}  	task = list_first_entry(&dev->execute_task_list,  				struct se_task, t_execute_list); -	list_del(&task->t_execute_list); -	atomic_set(&task->task_execute_queue, 0); -	atomic_dec(&dev->execute_tasks); +	__transport_remove_task_from_execute_queue(task, dev);  	spin_unlock_irq(&dev->execute_task_lock);  	atomic_dec(&dev->depth_left); @@ -2452,15 +2145,13 @@ check_depth:  	cmd = task->task_se_cmd;  	spin_lock_irqsave(&cmd->t_state_lock, flags); -	atomic_set(&task->task_active, 1); -	atomic_set(&task->task_sent, 1); +	task->task_flags |= (TF_ACTIVE | TF_SENT);  	atomic_inc(&cmd->t_task_cdbs_sent);  	if (atomic_read(&cmd->t_task_cdbs_sent) ==  	    cmd->t_task_list_num) -		atomic_set(&cmd->transport_sent, 1); +		atomic_set(&cmd->t_transport_sent, 1); -	transport_start_task_timer(task);  	spin_unlock_irqrestore(&cmd->t_state_lock, flags);  	/*  	 * The struct se_cmd->transport_emulate_cdb() function pointer is used @@ -2471,10 +2162,13 @@ check_depth:  		error = cmd->transport_emulate_cdb(cmd);  		if (error != 0) {  			cmd->transport_error_status = error; -			atomic_set(&task->task_active, 0); -			atomic_set(&cmd->transport_sent, 0); +			spin_lock_irqsave(&cmd->t_state_lock, flags); +			task->task_flags &= ~TF_ACTIVE; +			spin_unlock_irqrestore(&cmd->t_state_lock, flags); +			atomic_set(&cmd->t_transport_sent, 0);  			transport_stop_tasks_for_cmd(cmd); -			transport_generic_request_failure(cmd, dev, 0, 1); +			atomic_inc(&dev->depth_left); +			transport_generic_request_failure(cmd, 0, 1);  			goto check_depth;  		}  		/* @@ -2507,10 +2201,13 @@ check_depth:  		if (error != 0) {  			cmd->transport_error_status = error; -			atomic_set(&task->task_active, 0); -			atomic_set(&cmd->transport_sent, 0); +			spin_lock_irqsave(&cmd->t_state_lock, flags); +			task->task_flags &= ~TF_ACTIVE; +			spin_unlock_irqrestore(&cmd->t_state_lock, flags); +			atomic_set(&cmd->t_transport_sent, 0);  			transport_stop_tasks_for_cmd(cmd); -			transport_generic_request_failure(cmd, dev, 0, 1); +			atomic_inc(&dev->depth_left); +			transport_generic_request_failure(cmd, 0, 1);  		}  	} @@ -2532,8 +2229,6 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd)  	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);  } -static void transport_nop_wait_for_tasks(struct se_cmd *, int, int); -  static inline u32 transport_get_sectors_6(  	unsigned char *cdb,  	struct se_cmd *cmd, @@ -2746,13 +2441,16 @@ out:  static int transport_get_sense_data(struct se_cmd *cmd)  {  	unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; -	struct se_device *dev; +	struct se_device *dev = cmd->se_dev;  	struct se_task *task = NULL, *task_tmp;  	unsigned long flags;  	u32 offset = 0;  	WARN_ON(!cmd->se_lun); +	if (!dev) +		return 0; +  	spin_lock_irqsave(&cmd->t_state_lock, flags);  	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {  		spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -2761,14 +2459,9 @@ static int transport_get_sense_data(struct se_cmd *cmd)  	list_for_each_entry_safe(task, task_tmp,  				&cmd->t_task_list, t_list) { -  		if (!task->task_sense)  			continue; -		dev = task->se_dev; -		if (!dev) -			continue; -  		if (!dev->transport->get_sense_buffer) {  			pr_err("dev->transport->get_sense_buffer"  					" is NULL\n"); @@ -2777,9 +2470,9 @@ static int transport_get_sense_data(struct se_cmd *cmd)  		sense_buffer = dev->transport->get_sense_buffer(task);  		if (!sense_buffer) { -			pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate" +			pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"  				" sense buffer for task with sense\n", -				cmd->se_tfo->get_task_tag(cmd), task->task_no); +				cmd->se_tfo->get_task_tag(cmd), task);  			continue;  		}  		spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -2808,7 +2501,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)  static int  transport_handle_reservation_conflict(struct se_cmd *cmd)  { -	cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;  	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;  	cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;  	cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; @@ -2847,12 +2539,42 @@ static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)  			" transport_dev_end_lba(): %llu\n",  			cmd->t_task_lba, sectors,  			transport_dev_end_lba(dev)); -		pr_err("  We should return CHECK_CONDITION" -		       " but we don't yet\n"); -		return 0; +		return -EINVAL;  	} -	return sectors; +	return 0; +} + +static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev) +{ +	/* +	 * Determine if the received WRITE_SAME is used to for direct +	 * passthrough into Linux/SCSI with struct request via TCM/pSCSI +	 * or we are signaling the use of internal WRITE_SAME + UNMAP=1 +	 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code. +	 */ +	int passthrough = (dev->transport->transport_type == +				TRANSPORT_PLUGIN_PHBA_PDEV); + +	if (!passthrough) { +		if ((flags[0] & 0x04) || (flags[0] & 0x02)) { +			pr_err("WRITE_SAME PBDATA and LBDATA" +				" bits not supported for Block Discard" +				" Emulation\n"); +			return -ENOSYS; +		} +		/* +		 * Currently for the emulated case we only accept +		 * tpws with the UNMAP=1 bit set. +		 */ +		if (!(flags[0] & 0x08)) { +			pr_err("WRITE_SAME w/o UNMAP bit not" +				" supported for Block Discard Emulation\n"); +			return -ENOSYS; +		} +	} + +	return 0;  }  /*	transport_generic_cmd_sequencer(): @@ -2879,8 +2601,6 @@ static int transport_generic_cmd_sequencer(  	 * Check for an existing UNIT ATTENTION condition  	 */  	if (core_scsi3_ua_check(cmd, cdb) < 0) { -		cmd->transport_wait_for_tasks = -				&transport_nop_wait_for_tasks;  		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;  		cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;  		return -EINVAL; @@ -2890,7 +2610,6 @@ static int transport_generic_cmd_sequencer(  	 */  	ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);  	if (ret != 0) { -		cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;  		/*  		 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';  		 * The ALUA additional sense code qualifier (ASCQ) is determined @@ -2929,7 +2648,6 @@ static int transport_generic_cmd_sequencer(  		if (sector_ret)  			goto out_unsupported_cdb;  		size = transport_get_size(sectors, cdb, cmd); -		cmd->transport_split_cdb = &split_cdb_XX_6;  		cmd->t_task_lba = transport_lba_21(cdb);  		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;  		break; @@ -2938,7 +2656,6 @@ static int transport_generic_cmd_sequencer(  		if (sector_ret)  			goto out_unsupported_cdb;  		size = transport_get_size(sectors, cdb, cmd); -		cmd->transport_split_cdb = &split_cdb_XX_10;  		cmd->t_task_lba = transport_lba_32(cdb);  		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;  		break; @@ -2947,7 +2664,6 @@ static int transport_generic_cmd_sequencer(  		if (sector_ret)  			goto out_unsupported_cdb;  		size = transport_get_size(sectors, cdb, cmd); -		cmd->transport_split_cdb = &split_cdb_XX_12;  		cmd->t_task_lba = transport_lba_32(cdb);  		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;  		break; @@ -2956,7 +2672,6 @@ static int transport_generic_cmd_sequencer(  		if (sector_ret)  			goto out_unsupported_cdb;  		size = transport_get_size(sectors, cdb, cmd); -		cmd->transport_split_cdb = &split_cdb_XX_16;  		cmd->t_task_lba = transport_lba_64(cdb);  		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;  		break; @@ -2965,7 +2680,6 @@ static int transport_generic_cmd_sequencer(  		if (sector_ret)  			goto out_unsupported_cdb;  		size = transport_get_size(sectors, cdb, cmd); -		cmd->transport_split_cdb = &split_cdb_XX_6;  		cmd->t_task_lba = transport_lba_21(cdb);  		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;  		break; @@ -2974,7 +2688,6 @@ static int transport_generic_cmd_sequencer(  		if (sector_ret)  			goto out_unsupported_cdb;  		size = transport_get_size(sectors, cdb, cmd); -		cmd->transport_split_cdb = &split_cdb_XX_10;  		cmd->t_task_lba = transport_lba_32(cdb);  		cmd->t_tasks_fua = (cdb[1] & 0x8);  		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; @@ -2984,7 +2697,6 @@ static int transport_generic_cmd_sequencer(  		if (sector_ret)  			goto out_unsupported_cdb;  		size = transport_get_size(sectors, cdb, cmd); -		cmd->transport_split_cdb = &split_cdb_XX_12;  		cmd->t_task_lba = transport_lba_32(cdb);  		cmd->t_tasks_fua = (cdb[1] & 0x8);  		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; @@ -2994,7 +2706,6 @@ static int transport_generic_cmd_sequencer(  		if (sector_ret)  			goto out_unsupported_cdb;  		size = transport_get_size(sectors, cdb, cmd); -		cmd->transport_split_cdb = &split_cdb_XX_16;  		cmd->t_task_lba = transport_lba_64(cdb);  		cmd->t_tasks_fua = (cdb[1] & 0x8);  		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; @@ -3007,18 +2718,14 @@ static int transport_generic_cmd_sequencer(  		if (sector_ret)  			goto out_unsupported_cdb;  		size = transport_get_size(sectors, cdb, cmd); -		cmd->transport_split_cdb = &split_cdb_XX_10;  		cmd->t_task_lba = transport_lba_32(cdb);  		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; -		passthrough = (dev->transport->transport_type == -				TRANSPORT_PLUGIN_PHBA_PDEV); -		/* -		 * Skip the remaining assignments for TCM/PSCSI passthrough -		 */ -		if (passthrough) -			break; + +		if (dev->transport->transport_type == +				TRANSPORT_PLUGIN_PHBA_PDEV) +			goto out_unsupported_cdb;  		/* -		 * Setup BIDI XOR callback to be run during transport_generic_complete_ok() +		 * Setup BIDI XOR callback to be run after I/O completion.  		 */  		cmd->transport_complete_callback = &transport_xor_callback;  		cmd->t_tasks_fua = (cdb[1] & 0x8); @@ -3042,19 +2749,14 @@ static int transport_generic_cmd_sequencer(  			 * Use WRITE_32 and READ_32 opcodes for the emulated  			 * XDWRITE_READ_32 logic.  			 */ -			cmd->transport_split_cdb = &split_cdb_XX_32;  			cmd->t_task_lba = transport_lba_64_ext(cdb);  			cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; -			/* -			 * Skip the remaining assignments for TCM/PSCSI passthrough -			 */  			if (passthrough) -				break; - +				goto out_unsupported_cdb;  			/* -			 * Setup BIDI XOR callback to be run during -			 * transport_generic_complete_ok() +			 * Setup BIDI XOR callback to be run during after I/O +			 * completion.  			 */  			cmd->transport_complete_callback = &transport_xor_callback;  			cmd->t_tasks_fua = (cdb[10] & 0x8); @@ -3065,7 +2767,7 @@ static int transport_generic_cmd_sequencer(  				goto out_unsupported_cdb;  			if (sectors) -				size = transport_get_size(sectors, cdb, cmd); +				size = transport_get_size(1, cdb, cmd);  			else {  				pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"  				       " supported\n"); @@ -3075,27 +2777,9 @@ static int transport_generic_cmd_sequencer(  			cmd->t_task_lba = get_unaligned_be64(&cdb[12]);  			cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; -			/* -			 * Skip the remaining assignments for TCM/PSCSI passthrough -			 */ -			if (passthrough) -				break; - -			if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) { -				pr_err("WRITE_SAME PBDATA and LBDATA" -					" bits not supported for Block Discard" -					" Emulation\n"); +			if (target_check_write_same_discard(&cdb[10], dev) < 0)  				goto out_invalid_cdb_field; -			} -			/* -			 * Currently for the emulated case we only accept -			 * tpws with the UNMAP=1 bit set. -			 */ -			if (!(cdb[10] & 0x08)) { -				pr_err("WRITE_SAME w/o UNMAP bit not" -					" supported for Block Discard Emulation\n"); -				goto out_invalid_cdb_field; -			} +  			break;  		default:  			pr_err("VARIABLE_LENGTH_CMD service action" @@ -3330,10 +3014,12 @@ static int transport_generic_cmd_sequencer(  		cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;  		/*  		 * Check to ensure that LBA + Range does not exceed past end of -		 * device. +		 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls  		 */ -		if (!transport_cmd_get_valid_sectors(cmd)) -			goto out_invalid_cdb_field; +		if ((cmd->t_task_lba != 0) || (sectors != 0)) { +			if (transport_cmd_get_valid_sectors(cmd) < 0) +				goto out_invalid_cdb_field; +		}  		break;  	case UNMAP:  		size = get_unaligned_be16(&cdb[7]); @@ -3345,40 +3031,38 @@ static int transport_generic_cmd_sequencer(  			goto out_unsupported_cdb;  		if (sectors) -			size = transport_get_size(sectors, cdb, cmd); +			size = transport_get_size(1, cdb, cmd);  		else {  			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");  			goto out_invalid_cdb_field;  		}  		cmd->t_task_lba = get_unaligned_be64(&cdb[2]); -		passthrough = (dev->transport->transport_type == -				TRANSPORT_PLUGIN_PHBA_PDEV); -		/* -		 * Determine if the received WRITE_SAME_16 is used to for direct -		 * passthrough into Linux/SCSI with struct request via TCM/pSCSI -		 * or we are signaling the use of internal WRITE_SAME + UNMAP=1 -		 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and -		 * TCM/FILEIO subsystem plugin backstores. -		 */ -		if (!passthrough) { -			if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { -				pr_err("WRITE_SAME PBDATA and LBDATA" -					" bits not supported for Block Discard" -					" Emulation\n"); -				goto out_invalid_cdb_field; -			} -			/* -			 * Currently for the emulated case we only accept -			 * tpws with the UNMAP=1 bit set. -			 */ -			if (!(cdb[1] & 0x08)) { -				pr_err("WRITE_SAME w/o UNMAP bit not " -					" supported for Block Discard Emulation\n"); -				goto out_invalid_cdb_field; -			} +		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + +		if (target_check_write_same_discard(&cdb[1], dev) < 0) +			goto out_invalid_cdb_field; +		break; +	case WRITE_SAME: +		sectors = transport_get_sectors_10(cdb, cmd, §or_ret); +		if (sector_ret) +			goto out_unsupported_cdb; + +		if (sectors) +			size = transport_get_size(1, cdb, cmd); +		else { +			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); +			goto out_invalid_cdb_field;  		} + +		cmd->t_task_lba = get_unaligned_be32(&cdb[2]);  		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; +		/* +		 * Follow sbcr26 with WRITE_SAME (10) and check for the existence +		 * of byte 1 bit 3 UNMAP instead of original reserved field +		 */ +		if (target_check_write_same_discard(&cdb[1], dev) < 0) +			goto out_invalid_cdb_field;  		break;  	case ALLOW_MEDIUM_REMOVAL:  	case GPCMD_CLOSE_TRACK: @@ -3412,7 +3096,6 @@ static int transport_generic_cmd_sequencer(  		pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"  			" 0x%02x, sending CHECK_CONDITION.\n",  			cmd->se_tfo->get_fabric_name(), cdb[0]); -		cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;  		goto out_unsupported_cdb;  	} @@ -3470,8 +3153,7 @@ out_invalid_cdb_field:  }  /* - * Called from transport_generic_complete_ok() and - * transport_generic_request_failure() to determine which dormant/delayed + * Called from I/O completion to determine which dormant/delayed   * and ordered cmds need to have their tasks added to the execution queue.   */  static void transport_complete_task_attr(struct se_cmd *cmd) @@ -3539,12 +3221,18 @@ static void transport_complete_task_attr(struct se_cmd *cmd)  		wake_up_interruptible(&dev->dev_queue_obj.thread_wq);  } -static int transport_complete_qf(struct se_cmd *cmd) +static void transport_complete_qf(struct se_cmd *cmd)  {  	int ret = 0; -	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) -		return cmd->se_tfo->queue_status(cmd); +	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) +		transport_complete_task_attr(cmd); + +	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { +		ret = cmd->se_tfo->queue_status(cmd); +		if (ret) +			goto out; +	}  	switch (cmd->data_direction) {  	case DMA_FROM_DEVICE: @@ -3554,7 +3242,7 @@ static int transport_complete_qf(struct se_cmd *cmd)  		if (cmd->t_bidi_data_sg) {  			ret = cmd->se_tfo->queue_data_in(cmd);  			if (ret < 0) -				return ret; +				break;  		}  		/* Fall through for DMA_TO_DEVICE */  	case DMA_NONE: @@ -3564,17 +3252,20 @@ static int transport_complete_qf(struct se_cmd *cmd)  		break;  	} -	return ret; +out: +	if (ret < 0) { +		transport_handle_queue_full(cmd, cmd->se_dev); +		return; +	} +	transport_lun_remove_cmd(cmd); +	transport_cmd_check_stop_to_fabric(cmd);  }  static void transport_handle_queue_full(  	struct se_cmd *cmd, -	struct se_device *dev, -	int (*qf_callback)(struct se_cmd *)) +	struct se_device *dev)  {  	spin_lock_irq(&dev->qf_cmd_lock); -	cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL; -	cmd->transport_qf_callback = qf_callback;  	list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);  	atomic_inc(&dev->dev_qf_count);  	smp_mb__after_atomic_inc(); @@ -3583,9 +3274,11 @@ static void transport_handle_queue_full(  	schedule_work(&cmd->se_dev->qf_work_queue);  } -static void transport_generic_complete_ok(struct se_cmd *cmd) +static void target_complete_ok_work(struct work_struct *work)  { +	struct se_cmd *cmd = container_of(work, struct se_cmd, work);  	int reason = 0, ret; +  	/*  	 * Check if we need to move delayed/dormant tasks from cmds on the  	 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task @@ -3600,14 +3293,6 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)  	if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)  		schedule_work(&cmd->se_dev->qf_work_queue); -	if (cmd->transport_qf_callback) { -		ret = cmd->transport_qf_callback(cmd); -		if (ret < 0) -			goto queue_full; - -		cmd->transport_qf_callback = NULL; -		goto done; -	}  	/*  	 * Check if we need to retrieve a sense buffer from  	 * the struct se_cmd in question. @@ -3683,7 +3368,6 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)  		break;  	} -done:  	transport_lun_remove_cmd(cmd);  	transport_cmd_check_stop_to_fabric(cmd);  	return; @@ -3691,34 +3375,35 @@ done:  queue_full:  	pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"  		" data_direction: %d\n", cmd, cmd->data_direction); -	transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); +	cmd->t_state = TRANSPORT_COMPLETE_QF_OK; +	transport_handle_queue_full(cmd, cmd->se_dev);  }  static void transport_free_dev_tasks(struct se_cmd *cmd)  {  	struct se_task *task, *task_tmp;  	unsigned long flags; +	LIST_HEAD(dispose_list);  	spin_lock_irqsave(&cmd->t_state_lock, flags);  	list_for_each_entry_safe(task, task_tmp,  				&cmd->t_task_list, t_list) { -		if (atomic_read(&task->task_active)) -			continue; +		if (!(task->task_flags & TF_ACTIVE)) +			list_move_tail(&task->t_list, &dispose_list); +	} +	spin_unlock_irqrestore(&cmd->t_state_lock, flags); + +	while (!list_empty(&dispose_list)) { +		task = list_first_entry(&dispose_list, struct se_task, t_list); -		kfree(task->task_sg_bidi); -		kfree(task->task_sg); +		if (task->task_sg != cmd->t_data_sg && +		    task->task_sg != cmd->t_bidi_data_sg) +			kfree(task->task_sg);  		list_del(&task->t_list); -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		if (task->se_dev) -			task->se_dev->transport->free_task(task); -		else -			pr_err("task[%u] - task->se_dev is NULL\n", -				task->task_no); -		spin_lock_irqsave(&cmd->t_state_lock, flags); +		cmd->se_dev->transport->free_task(task);  	} -	spin_unlock_irqrestore(&cmd->t_state_lock, flags);  }  static inline void transport_free_sgl(struct scatterlist *sgl, int nents) @@ -3746,89 +3431,43 @@ static inline void transport_free_pages(struct se_cmd *cmd)  	cmd->t_bidi_data_nents = 0;  } -static inline void transport_release_tasks(struct se_cmd *cmd) -{ -	transport_free_dev_tasks(cmd); -} - -static inline int transport_dec_and_check(struct se_cmd *cmd) +/** + * transport_put_cmd - release a reference to a command + * @cmd:       command to release + * + * This routine releases our reference to the command and frees it if possible. + */ +static void transport_put_cmd(struct se_cmd *cmd)  {  	unsigned long flags; +	int free_tasks = 0;  	spin_lock_irqsave(&cmd->t_state_lock, flags);  	if (atomic_read(&cmd->t_fe_count)) { -		if (!atomic_dec_and_test(&cmd->t_fe_count)) { -			spin_unlock_irqrestore(&cmd->t_state_lock, -					flags); -			return 1; -		} +		if (!atomic_dec_and_test(&cmd->t_fe_count)) +			goto out_busy;  	}  	if (atomic_read(&cmd->t_se_count)) { -		if (!atomic_dec_and_test(&cmd->t_se_count)) { -			spin_unlock_irqrestore(&cmd->t_state_lock, -					flags); -			return 1; -		} -	} -	spin_unlock_irqrestore(&cmd->t_state_lock, flags); - -	return 0; -} - -static void transport_release_fe_cmd(struct se_cmd *cmd) -{ -	unsigned long flags; - -	if (transport_dec_and_check(cmd)) -		return; - -	spin_lock_irqsave(&cmd->t_state_lock, flags); -	if (!atomic_read(&cmd->transport_dev_active)) { -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		goto free_pages; +		if (!atomic_dec_and_test(&cmd->t_se_count)) +			goto out_busy;  	} -	atomic_set(&cmd->transport_dev_active, 0); -	transport_all_task_dev_remove_state(cmd); -	spin_unlock_irqrestore(&cmd->t_state_lock, flags); - -	transport_release_tasks(cmd); -free_pages: -	transport_free_pages(cmd); -	transport_free_se_cmd(cmd); -	cmd->se_tfo->release_cmd(cmd); -} - -static int -transport_generic_remove(struct se_cmd *cmd, int session_reinstatement) -{ -	unsigned long flags; -	if (transport_dec_and_check(cmd)) { -		if (session_reinstatement) { -			spin_lock_irqsave(&cmd->t_state_lock, flags); -			transport_all_task_dev_remove_state(cmd); -			spin_unlock_irqrestore(&cmd->t_state_lock, -					flags); -		} -		return 1; -	} - -	spin_lock_irqsave(&cmd->t_state_lock, flags); -	if (!atomic_read(&cmd->transport_dev_active)) { -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		goto free_pages; +	if (atomic_read(&cmd->transport_dev_active)) { +		atomic_set(&cmd->transport_dev_active, 0); +		transport_all_task_dev_remove_state(cmd); +		free_tasks = 1;  	} -	atomic_set(&cmd->transport_dev_active, 0); -	transport_all_task_dev_remove_state(cmd);  	spin_unlock_irqrestore(&cmd->t_state_lock, flags); -	transport_release_tasks(cmd); +	if (free_tasks != 0) +		transport_free_dev_tasks(cmd); -free_pages:  	transport_free_pages(cmd);  	transport_release_cmd(cmd); -	return 0; +	return; +out_busy: +	spin_unlock_irqrestore(&cmd->t_state_lock, flags);  }  /* @@ -3870,64 +3509,6 @@ int transport_generic_map_mem_to_cmd(  }  EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); -static int transport_new_cmd_obj(struct se_cmd *cmd) -{ -	struct se_device *dev = cmd->se_dev; -	u32 task_cdbs; -	u32 rc; -	int set_counts = 1; - -	/* -	 * Setup any BIDI READ tasks and memory from -	 * cmd->t_mem_bidi_list so the READ struct se_tasks -	 * are queued first for the non pSCSI passthrough case. -	 */ -	if (cmd->t_bidi_data_sg && -	    (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { -		rc = transport_allocate_tasks(cmd, -					      cmd->t_task_lba, -					      DMA_FROM_DEVICE, -					      cmd->t_bidi_data_sg, -					      cmd->t_bidi_data_nents); -		if (rc <= 0) { -			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; -			cmd->scsi_sense_reason = -				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; -			return PYX_TRANSPORT_LU_COMM_FAILURE; -		} -		atomic_inc(&cmd->t_fe_count); -		atomic_inc(&cmd->t_se_count); -		set_counts = 0; -	} -	/* -	 * Setup the tasks and memory from cmd->t_mem_list -	 * Note for BIDI transfers this will contain the WRITE payload -	 */ -	task_cdbs = transport_allocate_tasks(cmd, -					     cmd->t_task_lba, -					     cmd->data_direction, -					     cmd->t_data_sg, -					     cmd->t_data_nents); -	if (task_cdbs <= 0) { -		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; -		cmd->scsi_sense_reason = -			TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; -		return PYX_TRANSPORT_LU_COMM_FAILURE; -	} - -	if (set_counts) { -		atomic_inc(&cmd->t_fe_count); -		atomic_inc(&cmd->t_se_count); -	} - -	cmd->t_task_list_num = task_cdbs; - -	atomic_set(&cmd->t_task_cdbs_left, task_cdbs); -	atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs); -	atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs); -	return 0; -} -  void *transport_kmap_first_data_page(struct se_cmd *cmd)  {  	struct scatterlist *sg = cmd->t_data_sg; @@ -4028,8 +3609,6 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)  		if (!task->task_sg)  			continue; -		BUG_ON(!task->task_padded_sg); -  		if (!sg_first) {  			sg_first = task->task_sg;  			chained_nents = task->task_sg_nents; @@ -4037,9 +3616,17 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)  			sg_chain(sg_prev, sg_prev_nents, task->task_sg);  			chained_nents += task->task_sg_nents;  		} - +		/* +		 * For the padded tasks, use the extra SGL vector allocated +		 * in transport_allocate_data_tasks() for the sg_prev_nents +		 * offset into sg_chain() above. +		 * +		 * We do not need the padding for the last task (or a single +		 * task), but in that case we will never use the sg_prev_nents +		 * value below which would be incorrect. +		 */ +		sg_prev_nents = (task->task_sg_nents + 1);  		sg_prev = task->task_sg; -		sg_prev_nents = task->task_sg_nents;  	}  	/*  	 * Setup the starting pointer and total t_tasks_sg_linked_no including @@ -4068,72 +3655,96 @@ EXPORT_SYMBOL(transport_do_task_sg_chain);  /*   * Break up cmd into chunks transport can handle   */ -static int transport_allocate_data_tasks( -	struct se_cmd *cmd, -	unsigned long long lba, +static int +transport_allocate_data_tasks(struct se_cmd *cmd,  	enum dma_data_direction data_direction, -	struct scatterlist *sgl, -	unsigned int sgl_nents) +	struct scatterlist *cmd_sg, unsigned int sgl_nents)  { -	unsigned char *cdb = NULL; -	struct se_task *task;  	struct se_device *dev = cmd->se_dev; -	unsigned long flags; -	int task_count, i, ret; -	sector_t sectors, dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; -	u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size; -	struct scatterlist *sg; -	struct scatterlist *cmd_sg; +	int task_count, i; +	unsigned long long lba; +	sector_t sectors, dev_max_sectors; +	u32 sector_size; + +	if (transport_cmd_get_valid_sectors(cmd) < 0) +		return -EINVAL; + +	dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; +	sector_size = dev->se_sub_dev->se_dev_attrib.block_size;  	WARN_ON(cmd->data_length % sector_size); + +	lba = cmd->t_task_lba;  	sectors = DIV_ROUND_UP(cmd->data_length, sector_size);  	task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors); -	 -	cmd_sg = sgl; -	for (i = 0; i < task_count; i++) { -		unsigned int task_size; -		int count; + +	/* +	 * If we need just a single task reuse the SG list in the command +	 * and avoid a lot of work. +	 */ +	if (task_count == 1) { +		struct se_task *task; +		unsigned long flags;  		task = transport_generic_get_task(cmd, data_direction);  		if (!task)  			return -ENOMEM; +		task->task_sg = cmd_sg; +		task->task_sg_nents = sgl_nents; +  		task->task_lba = lba; -		task->task_sectors = min(sectors, dev_max_sectors); +		task->task_sectors = sectors;  		task->task_size = task->task_sectors * sector_size; -		cdb = dev->transport->get_cdb(task); -		BUG_ON(!cdb); +		spin_lock_irqsave(&cmd->t_state_lock, flags); +		list_add_tail(&task->t_list, &cmd->t_task_list); +		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		memcpy(cdb, cmd->t_task_cdb, -		       scsi_command_size(cmd->t_task_cdb)); +		return task_count; +	} -		/* Update new cdb with updated lba/sectors */ -		cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); +	for (i = 0; i < task_count; i++) { +		struct se_task *task; +		unsigned int task_size, task_sg_nents_padded; +		struct scatterlist *sg; +		unsigned long flags; +		int count; + +		task = transport_generic_get_task(cmd, data_direction); +		if (!task) +			return -ENOMEM; +		task->task_lba = lba; +		task->task_sectors = min(sectors, dev_max_sectors); +		task->task_size = task->task_sectors * sector_size; + +		/* +		 * This now assumes that passed sg_ents are in PAGE_SIZE chunks +		 * in order to calculate the number per task SGL entries +		 */ +		task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);  		/*  		 * Check if the fabric module driver is requesting that all  		 * struct se_task->task_sg[] be chained together..  If so,  		 * then allocate an extra padding SG entry for linking and -		 * marking the end of the chained SGL. -		 * Possibly over-allocate task sgl size by using cmd sgl size. -		 * It's so much easier and only a waste when task_count > 1. -		 * That is extremely rare. +		 * marking the end of the chained SGL for every task except +		 * the last one for (task_count > 1) operation, or skipping +		 * the extra padding for the (task_count == 1) case.  		 */ -		task->task_sg_nents = sgl_nents; -		if (cmd->se_tfo->task_sg_chaining) { -			task->task_sg_nents++; -			task->task_padded_sg = 1; -		} +		if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) { +			task_sg_nents_padded = (task->task_sg_nents + 1); +		} else +			task_sg_nents_padded = task->task_sg_nents;  		task->task_sg = kmalloc(sizeof(struct scatterlist) * -					task->task_sg_nents, GFP_KERNEL); +					task_sg_nents_padded, GFP_KERNEL);  		if (!task->task_sg) {  			cmd->se_dev->transport->free_task(task);  			return -ENOMEM;  		} -		sg_init_table(task->task_sg, task->task_sg_nents); +		sg_init_table(task->task_sg, task_sg_nents_padded);  		task_size = task->task_size; @@ -4154,20 +3765,6 @@ static int transport_allocate_data_tasks(  		list_add_tail(&task->t_list, &cmd->t_task_list);  		spin_unlock_irqrestore(&cmd->t_state_lock, flags);  	} -	/* -	 * Now perform the memory map of task->task_sg[] into backend -	 * subsystem memory.. -	 */ -	list_for_each_entry(task, &cmd->t_task_list, t_list) { -		if (atomic_read(&task->task_sent)) -			continue; -		if (!dev->transport->map_data_SG) -			continue; - -		ret = dev->transport->map_data_SG(task); -		if (ret < 0) -			return 0; -	}  	return task_count;  } @@ -4175,30 +3772,14 @@ static int transport_allocate_data_tasks(  static int  transport_allocate_control_task(struct se_cmd *cmd)  { -	struct se_device *dev = cmd->se_dev; -	unsigned char *cdb;  	struct se_task *task;  	unsigned long flags; -	int ret = 0;  	task = transport_generic_get_task(cmd, cmd->data_direction);  	if (!task)  		return -ENOMEM; -	cdb = dev->transport->get_cdb(task); -	BUG_ON(!cdb); -	memcpy(cdb, cmd->t_task_cdb, -	       scsi_command_size(cmd->t_task_cdb)); - -	task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, -				GFP_KERNEL); -	if (!task->task_sg) { -		cmd->se_dev->transport->free_task(task); -		return -ENOMEM; -	} - -	memcpy(task->task_sg, cmd->t_data_sg, -	       sizeof(struct scatterlist) * cmd->t_data_nents); +	task->task_sg = cmd->t_data_sg;  	task->task_size = cmd->data_length;  	task->task_sg_nents = cmd->t_data_nents; @@ -4206,50 +3787,20 @@ transport_allocate_control_task(struct se_cmd *cmd)  	list_add_tail(&task->t_list, &cmd->t_task_list);  	spin_unlock_irqrestore(&cmd->t_state_lock, flags); -	if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { -		if (dev->transport->map_control_SG) -			ret = dev->transport->map_control_SG(task); -	} else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { -		if (dev->transport->cdb_none) -			ret = dev->transport->cdb_none(task); -	} else { -		pr_err("target: Unknown control cmd type!\n"); -		BUG(); -	} -  	/* Success! Return number of tasks allocated */ -	if (ret == 0) -		return 1; -	return ret; -} - -static u32 transport_allocate_tasks( -	struct se_cmd *cmd, -	unsigned long long lba, -	enum dma_data_direction data_direction, -	struct scatterlist *sgl, -	unsigned int sgl_nents) -{ -	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) -		return transport_allocate_data_tasks(cmd, lba, data_direction, -						     sgl, sgl_nents); -	else -		return transport_allocate_control_task(cmd); - +	return 1;  } - -/*	 transport_generic_new_cmd(): Called from transport_processing_thread() - * - *	 Allocate storage transport resources from a set of values predefined - *	 by transport_generic_cmd_sequencer() from the iSCSI Target RX process. - *	 Any non zero return here is treated as an "out of resource' op here. +/* + * Allocate any required ressources to execute the command, and either place + * it on the execution queue if possible.  For writes we might not have the + * payload yet, thus notify the fabric via a call to ->write_pending instead.   */ -	/* -	 * Generate struct se_task(s) and/or their payloads for this CDB. -	 */  int transport_generic_new_cmd(struct se_cmd *cmd)  { +	struct se_device *dev = cmd->se_dev; +	int task_cdbs, task_cdbs_bidi = 0; +	int set_counts = 1;  	int ret = 0;  	/* @@ -4263,16 +3814,45 @@ int transport_generic_new_cmd(struct se_cmd *cmd)  		if (ret < 0)  			return ret;  	} +  	/* -	 * Call transport_new_cmd_obj() to invoke transport_allocate_tasks() for -	 * control or data CDB types, and perform the map to backend subsystem -	 * code from SGL memory allocated here by transport_generic_get_mem(), or -	 * via pre-existing SGL memory setup explictly by fabric module code with -	 * transport_generic_map_mem_to_cmd(). +	 * For BIDI command set up the read tasks first.  	 */ -	ret = transport_new_cmd_obj(cmd); -	if (ret < 0) -		return ret; +	if (cmd->t_bidi_data_sg && +	    dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { +		BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)); + +		task_cdbs_bidi = transport_allocate_data_tasks(cmd, +				DMA_FROM_DEVICE, cmd->t_bidi_data_sg, +				cmd->t_bidi_data_nents); +		if (task_cdbs_bidi <= 0) +			goto out_fail; + +		atomic_inc(&cmd->t_fe_count); +		atomic_inc(&cmd->t_se_count); +		set_counts = 0; +	} + +	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { +		task_cdbs = transport_allocate_data_tasks(cmd, +					cmd->data_direction, cmd->t_data_sg, +					cmd->t_data_nents); +	} else { +		task_cdbs = transport_allocate_control_task(cmd); +	} + +	if (task_cdbs <= 0) +		goto out_fail; + +	if (set_counts) { +		atomic_inc(&cmd->t_fe_count); +		atomic_inc(&cmd->t_se_count); +	} + +	cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi); +	atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num); +	atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num); +  	/*  	 * For WRITEs, let the fabric know its buffer is ready..  	 * This WRITE struct se_cmd (and all of its associated struct se_task's) @@ -4290,6 +3870,11 @@ int transport_generic_new_cmd(struct se_cmd *cmd)  	 */  	transport_execute_tasks(cmd);  	return 0; + +out_fail: +	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; +	cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +	return -EINVAL;  }  EXPORT_SYMBOL(transport_generic_new_cmd); @@ -4303,15 +3888,15 @@ void transport_generic_process_write(struct se_cmd *cmd)  }  EXPORT_SYMBOL(transport_generic_process_write); -static int transport_write_pending_qf(struct se_cmd *cmd) +static void transport_write_pending_qf(struct se_cmd *cmd)  { -	return cmd->se_tfo->write_pending(cmd); +	if (cmd->se_tfo->write_pending(cmd) == -EAGAIN) { +		pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", +			 cmd); +		transport_handle_queue_full(cmd, cmd->se_dev); +	}  } -/*	transport_generic_write_pending(): - * - * - */  static int transport_generic_write_pending(struct se_cmd *cmd)  {  	unsigned long flags; @@ -4321,17 +3906,6 @@ static int transport_generic_write_pending(struct se_cmd *cmd)  	cmd->t_state = TRANSPORT_WRITE_PENDING;  	spin_unlock_irqrestore(&cmd->t_state_lock, flags); -	if (cmd->transport_qf_callback) { -		ret = cmd->transport_qf_callback(cmd); -		if (ret == -EAGAIN) -			goto queue_full; -		else if (ret < 0) -			return ret; - -		cmd->transport_qf_callback = NULL; -		return 0; -	} -  	/*  	 * Clear the se_cmd for WRITE_PENDING status in order to set  	 * cmd->t_transport_active=0 so that transport_generic_handle_data @@ -4356,61 +3930,52 @@ static int transport_generic_write_pending(struct se_cmd *cmd)  queue_full:  	pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);  	cmd->t_state = TRANSPORT_COMPLETE_QF_WP; -	transport_handle_queue_full(cmd, cmd->se_dev, -			transport_write_pending_qf); +	transport_handle_queue_full(cmd, cmd->se_dev);  	return ret;  } +/** + * transport_release_cmd - free a command + * @cmd:       command to free + * + * This routine unconditionally frees a command, and reference counting + * or list removal must be done in the caller. + */  void transport_release_cmd(struct se_cmd *cmd)  {  	BUG_ON(!cmd->se_tfo); -	transport_free_se_cmd(cmd); +	if (cmd->se_tmr_req) +		core_tmr_release_req(cmd->se_tmr_req); +	if (cmd->t_task_cdb != cmd->__t_task_cdb) +		kfree(cmd->t_task_cdb);  	cmd->se_tfo->release_cmd(cmd);  }  EXPORT_SYMBOL(transport_release_cmd); -/*	transport_generic_free_cmd(): - * - *	Called from processing frontend to release storage engine resources - */ -void transport_generic_free_cmd( -	struct se_cmd *cmd, -	int wait_for_tasks, -	int session_reinstatement) +void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)  { -	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) +	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { +		if (wait_for_tasks && cmd->se_tmr_req) +			 transport_wait_for_tasks(cmd); +  		transport_release_cmd(cmd); -	else { +	} else { +		if (wait_for_tasks) +			transport_wait_for_tasks(cmd); +  		core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); -		if (cmd->se_lun) { -#if 0 -			pr_debug("cmd: %p ITT: 0x%08x contains" -				" cmd->se_lun\n", cmd, -				cmd->se_tfo->get_task_tag(cmd)); -#endif +		if (cmd->se_lun)  			transport_lun_remove_cmd(cmd); -		} - -		if (wait_for_tasks && cmd->transport_wait_for_tasks) -			cmd->transport_wait_for_tasks(cmd, 0, 0);  		transport_free_dev_tasks(cmd); -		transport_generic_remove(cmd, session_reinstatement); +		transport_put_cmd(cmd);  	}  }  EXPORT_SYMBOL(transport_generic_free_cmd); -static void transport_nop_wait_for_tasks( -	struct se_cmd *cmd, -	int remove_cmd, -	int session_reinstatement) -{ -	return; -} -  /*	transport_lun_wait_for_tasks():   *   *	Called from ConfigFS context to stop the passed struct se_cmd to allow @@ -4449,7 +4014,7 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)  		pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",  				cmd->se_tfo->get_task_tag(cmd));  	} -	transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); +	transport_remove_cmd_from_queue(cmd);  	return 0;  } @@ -4580,22 +4145,30 @@ int transport_clear_lun_from_sessions(struct se_lun *lun)  	return 0;  } -/*	transport_generic_wait_for_tasks(): +/** + * transport_wait_for_tasks - wait for completion to occur + * @cmd:	command to wait   * - *	Called from frontend or passthrough context to wait for storage engine - *	to pause and/or release frontend generated struct se_cmd. + * Called from frontend fabric context to wait for storage engine + * to pause and/or release frontend generated struct se_cmd.   */ -static void transport_generic_wait_for_tasks( -	struct se_cmd *cmd, -	int remove_cmd, -	int session_reinstatement) +void transport_wait_for_tasks(struct se_cmd *cmd)  {  	unsigned long flags; -	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) -		return; -  	spin_lock_irqsave(&cmd->t_state_lock, flags); +	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) { +		spin_unlock_irqrestore(&cmd->t_state_lock, flags); +		return; +	} +	/* +	 * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE +	 * has been set in transport_set_supported_SAM_opcode(). +	 */ +	if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) { +		spin_unlock_irqrestore(&cmd->t_state_lock, flags); +		return; +	}  	/*  	 * If we are already stopped due to an external event (ie: LUN shutdown)  	 * sleep until the connection can have the passed struct se_cmd back. @@ -4635,16 +4208,17 @@ static void transport_generic_wait_for_tasks(  		atomic_set(&cmd->transport_lun_stop, 0);  	}  	if (!atomic_read(&cmd->t_transport_active) || -	     atomic_read(&cmd->t_transport_aborted)) -		goto remove; +	     atomic_read(&cmd->t_transport_aborted)) { +		spin_unlock_irqrestore(&cmd->t_state_lock, flags); +		return; +	}  	atomic_set(&cmd->t_transport_stop, 1);  	pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" -		" i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" -		" = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd), -		cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, -		cmd->deferred_t_state); +		" i_state: %d, t_state: %d, t_transport_stop = TRUE\n", +		cmd, cmd->se_tfo->get_task_tag(cmd), +		cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);  	spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -4659,13 +4233,10 @@ static void transport_generic_wait_for_tasks(  	pr_debug("wait_for_tasks: Stopped wait_for_compltion("  		"&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",  		cmd->se_tfo->get_task_tag(cmd)); -remove: -	spin_unlock_irqrestore(&cmd->t_state_lock, flags); -	if (!remove_cmd) -		return; -	transport_generic_free_cmd(cmd, 0, session_reinstatement); +	spin_unlock_irqrestore(&cmd->t_state_lock, flags);  } +EXPORT_SYMBOL(transport_wait_for_tasks);  static int transport_get_sense_codes(  	struct se_cmd *cmd, @@ -4726,6 +4297,13 @@ int transport_send_check_condition_and_sense(  	 */  	switch (reason) {  	case TCM_NON_EXISTENT_LUN: +		/* CURRENT ERROR */ +		buffer[offset] = 0x70; +		/* ILLEGAL REQUEST */ +		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; +		/* LOGICAL UNIT NOT SUPPORTED */ +		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25; +		break;  	case TCM_UNSUPPORTED_SCSI_OPCODE:  	case TCM_SECTOR_COUNT_TOO_MANY:  		/* CURRENT ERROR */ @@ -4883,6 +4461,15 @@ EXPORT_SYMBOL(transport_check_aborted_status);  void transport_send_task_abort(struct se_cmd *cmd)  { +	unsigned long flags; + +	spin_lock_irqsave(&cmd->t_state_lock, flags); +	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { +		spin_unlock_irqrestore(&cmd->t_state_lock, flags); +		return; +	} +	spin_unlock_irqrestore(&cmd->t_state_lock, flags); +  	/*  	 * If there are still expected incoming fabric WRITEs, we wait  	 * until until they have completed before sending a TASK_ABORTED @@ -4947,184 +4534,10 @@ int transport_generic_do_tmr(struct se_cmd *cmd)  	cmd->t_state = TRANSPORT_ISTATE_PROCESSING;  	cmd->se_tfo->queue_tm_rsp(cmd); -	transport_cmd_check_stop(cmd, 2, 0); +	transport_cmd_check_stop_to_fabric(cmd);  	return 0;  } -/* - *	Called with spin_lock_irq(&dev->execute_task_lock); held - * - */ -static struct se_task * -transport_get_task_from_state_list(struct se_device *dev) -{ -	struct se_task *task; - -	if (list_empty(&dev->state_task_list)) -		return NULL; - -	list_for_each_entry(task, &dev->state_task_list, t_state_list) -		break; - -	list_del(&task->t_state_list); -	atomic_set(&task->task_state_active, 0); - -	return task; -} - -static void transport_processing_shutdown(struct se_device *dev) -{ -	struct se_cmd *cmd; -	struct se_task *task; -	unsigned long flags; -	/* -	 * Empty the struct se_device's struct se_task state list. -	 */ -	spin_lock_irqsave(&dev->execute_task_lock, flags); -	while ((task = transport_get_task_from_state_list(dev))) { -		if (!task->task_se_cmd) { -			pr_err("task->task_se_cmd is NULL!\n"); -			continue; -		} -		cmd = task->task_se_cmd; - -		spin_unlock_irqrestore(&dev->execute_task_lock, flags); - -		spin_lock_irqsave(&cmd->t_state_lock, flags); - -		pr_debug("PT: cmd: %p task: %p ITT: 0x%08x," -			" i_state: %d, t_state/def_t_state:" -			" %d/%d cdb: 0x%02x\n", cmd, task, -			cmd->se_tfo->get_task_tag(cmd), -			cmd->se_tfo->get_cmd_state(cmd), -			cmd->t_state, cmd->deferred_t_state, -			cmd->t_task_cdb[0]); -		pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:" -			" %d t_task_cdbs_sent: %d -- t_transport_active: %d" -			" t_transport_stop: %d t_transport_sent: %d\n", -			cmd->se_tfo->get_task_tag(cmd), -			cmd->t_task_list_num, -			atomic_read(&cmd->t_task_cdbs_left), -			atomic_read(&cmd->t_task_cdbs_sent), -			atomic_read(&cmd->t_transport_active), -			atomic_read(&cmd->t_transport_stop), -			atomic_read(&cmd->t_transport_sent)); - -		if (atomic_read(&task->task_active)) { -			atomic_set(&task->task_stop, 1); -			spin_unlock_irqrestore( -				&cmd->t_state_lock, flags); - -			pr_debug("Waiting for task: %p to shutdown for dev:" -				" %p\n", task, dev); -			wait_for_completion(&task->task_stop_comp); -			pr_debug("Completed task: %p shutdown for dev: %p\n", -				task, dev); - -			spin_lock_irqsave(&cmd->t_state_lock, flags); -			atomic_dec(&cmd->t_task_cdbs_left); - -			atomic_set(&task->task_active, 0); -			atomic_set(&task->task_stop, 0); -		} else { -			if (atomic_read(&task->task_execute_queue) != 0) -				transport_remove_task_from_execute_queue(task, dev); -		} -		__transport_stop_task_timer(task, &flags); - -		if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { -			spin_unlock_irqrestore( -					&cmd->t_state_lock, flags); - -			pr_debug("Skipping task: %p, dev: %p for" -				" t_task_cdbs_ex_left: %d\n", task, dev, -				atomic_read(&cmd->t_task_cdbs_ex_left)); - -			spin_lock_irqsave(&dev->execute_task_lock, flags); -			continue; -		} - -		if (atomic_read(&cmd->t_transport_active)) { -			pr_debug("got t_transport_active = 1 for task: %p, dev:" -					" %p\n", task, dev); - -			if (atomic_read(&cmd->t_fe_count)) { -				spin_unlock_irqrestore( -					&cmd->t_state_lock, flags); -				transport_send_check_condition_and_sense( -					cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, -					0); -				transport_remove_cmd_from_queue(cmd, -					&cmd->se_dev->dev_queue_obj); - -				transport_lun_remove_cmd(cmd); -				transport_cmd_check_stop(cmd, 1, 0); -			} else { -				spin_unlock_irqrestore( -					&cmd->t_state_lock, flags); - -				transport_remove_cmd_from_queue(cmd, -					&cmd->se_dev->dev_queue_obj); - -				transport_lun_remove_cmd(cmd); - -				if (transport_cmd_check_stop(cmd, 1, 0)) -					transport_generic_remove(cmd, 0); -			} - -			spin_lock_irqsave(&dev->execute_task_lock, flags); -			continue; -		} -		pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n", -				task, dev); - -		if (atomic_read(&cmd->t_fe_count)) { -			spin_unlock_irqrestore( -				&cmd->t_state_lock, flags); -			transport_send_check_condition_and_sense(cmd, -				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); -			transport_remove_cmd_from_queue(cmd, -				&cmd->se_dev->dev_queue_obj); - -			transport_lun_remove_cmd(cmd); -			transport_cmd_check_stop(cmd, 1, 0); -		} else { -			spin_unlock_irqrestore( -				&cmd->t_state_lock, flags); - -			transport_remove_cmd_from_queue(cmd, -				&cmd->se_dev->dev_queue_obj); -			transport_lun_remove_cmd(cmd); - -			if (transport_cmd_check_stop(cmd, 1, 0)) -				transport_generic_remove(cmd, 0); -		} - -		spin_lock_irqsave(&dev->execute_task_lock, flags); -	} -	spin_unlock_irqrestore(&dev->execute_task_lock, flags); -	/* -	 * Empty the struct se_device's struct se_cmd list. -	 */ -	while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) { - -		pr_debug("From Device Queue: cmd: %p t_state: %d\n", -				cmd, cmd->t_state); - -		if (atomic_read(&cmd->t_fe_count)) { -			transport_send_check_condition_and_sense(cmd, -				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); - -			transport_lun_remove_cmd(cmd); -			transport_cmd_check_stop(cmd, 1, 0); -		} else { -			transport_lun_remove_cmd(cmd); -			if (transport_cmd_check_stop(cmd, 1, 0)) -				transport_generic_remove(cmd, 0); -		} -	} -} -  /*	transport_processing_thread():   *   * @@ -5144,14 +4557,6 @@ static int transport_processing_thread(void *param)  		if (ret < 0)  			goto out; -		spin_lock_irq(&dev->dev_status_lock); -		if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) { -			spin_unlock_irq(&dev->dev_status_lock); -			transport_processing_shutdown(dev); -			continue; -		} -		spin_unlock_irq(&dev->dev_status_lock); -  get_cmd:  		__transport_execute_tasks(dev); @@ -5160,6 +4565,9 @@ get_cmd:  			continue;  		switch (cmd->t_state) { +		case TRANSPORT_NEW_CMD: +			BUG(); +			break;  		case TRANSPORT_NEW_CMD_MAP:  			if (!cmd->se_tfo->new_cmd_map) {  				pr_err("cmd->se_tfo->new_cmd_map is" @@ -5169,19 +4577,17 @@ get_cmd:  			ret = cmd->se_tfo->new_cmd_map(cmd);  			if (ret < 0) {  				cmd->transport_error_status = ret; -				transport_generic_request_failure(cmd, NULL, +				transport_generic_request_failure(cmd,  						0, (cmd->data_direction !=  						    DMA_TO_DEVICE));  				break;  			} -			/* Fall through */ -		case TRANSPORT_NEW_CMD:  			ret = transport_generic_new_cmd(cmd);  			if (ret == -EAGAIN)  				break;  			else if (ret < 0) {  				cmd->transport_error_status = ret; -				transport_generic_request_failure(cmd, NULL, +				transport_generic_request_failure(cmd,  					0, (cmd->data_direction !=  					 DMA_TO_DEVICE));  			} @@ -5189,33 +4595,22 @@ get_cmd:  		case TRANSPORT_PROCESS_WRITE:  			transport_generic_process_write(cmd);  			break; -		case TRANSPORT_COMPLETE_OK: -			transport_stop_all_task_timers(cmd); -			transport_generic_complete_ok(cmd); -			break; -		case TRANSPORT_REMOVE: -			transport_generic_remove(cmd, 0); -			break;  		case TRANSPORT_FREE_CMD_INTR: -			transport_generic_free_cmd(cmd, 0, 0); +			transport_generic_free_cmd(cmd, 0);  			break;  		case TRANSPORT_PROCESS_TMR:  			transport_generic_do_tmr(cmd);  			break; -		case TRANSPORT_COMPLETE_FAILURE: -			transport_generic_request_failure(cmd, NULL, 1, 1); -			break; -		case TRANSPORT_COMPLETE_TIMEOUT: -			transport_stop_all_task_timers(cmd); -			transport_generic_request_timeout(cmd); -			break;  		case TRANSPORT_COMPLETE_QF_WP: -			transport_generic_write_pending(cmd); +			transport_write_pending_qf(cmd); +			break; +		case TRANSPORT_COMPLETE_QF_OK: +			transport_complete_qf(cmd);  			break;  		default: -			pr_err("Unknown t_state: %d deferred_t_state:" -				" %d for ITT: 0x%08x i_state: %d on SE LUN:" -				" %u\n", cmd->t_state, cmd->deferred_t_state, +			pr_err("Unknown t_state: %d  for ITT: 0x%08x " +				"i_state: %d on SE LUN: %u\n", +				cmd->t_state,  				cmd->se_tfo->get_task_tag(cmd),  				cmd->se_tfo->get_cmd_state(cmd),  				cmd->se_lun->unpacked_lun); @@ -5226,7 +4621,8 @@ get_cmd:  	}  out: -	transport_release_all_cmds(dev); +	WARN_ON(!list_empty(&dev->state_task_list)); +	WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));  	dev->process_thread = NULL;  	return 0;  } diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index 31e3c652527..50a480db7a6 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c @@ -24,7 +24,6 @@   *   ******************************************************************************/ -#include <linux/version.h>  #include <linux/slab.h>  #include <linux/spinlock.h>  #include <scsi/scsi.h> diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index bd4fe21a23b..3749d8b4b42 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h @@ -98,8 +98,7 @@ struct ft_tpg {  	struct list_head list;		/* linkage in ft_lport_acl tpg_list */  	struct list_head lun_list;	/* head of LUNs */  	struct se_portal_group se_tpg; -	struct task_struct *thread;	/* processing thread */ -	struct se_queue_obj qobj;	/* queue for processing thread */ +	struct workqueue_struct *workqueue;  };  struct ft_lport_acl { @@ -110,16 +109,10 @@ struct ft_lport_acl {  	struct se_wwn fc_lport_wwn;  }; -enum ft_cmd_state { -	FC_CMD_ST_NEW = 0, -	FC_CMD_ST_REJ -}; -  /*   * Commands   */  struct ft_cmd { -	enum ft_cmd_state state;  	u32 lun;                        /* LUN from request */  	struct ft_sess *sess;		/* session held for cmd */  	struct fc_seq *seq;		/* sequence in exchange mgr */ @@ -127,7 +120,7 @@ struct ft_cmd {  	struct fc_frame *req_frame;  	unsigned char *cdb;		/* pointer to CDB inside frame */  	u32 write_data_len;		/* data received on writes */ -	struct se_queue_req se_req; +	struct work_struct work;  	/* Local sense buffer */  	unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];  	u32 was_ddp_setup:1;		/* Set only if ddp is setup */ @@ -177,7 +170,6 @@ int ft_is_state_remove(struct se_cmd *);  /*   * other internal functions.   */ -int ft_thread(void *);  void ft_recv_req(struct ft_sess *, struct fc_frame *);  struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);  struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *); diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 5654dc22f7a..6195026cc7b 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -19,7 +19,6 @@  #include <linux/module.h>  #include <linux/moduleparam.h> -#include <linux/version.h>  #include <generated/utsrelease.h>  #include <linux/utsname.h>  #include <linux/init.h> @@ -62,8 +61,8 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)  	int count;  	se_cmd = &cmd->se_cmd; -	pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n", -		caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd); +	pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n", +		caller, cmd, cmd->sess, cmd->seq, se_cmd);  	pr_debug("%s: cmd %p cdb %p\n",  		caller, cmd, cmd->cdb);  	pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun); @@ -90,38 +89,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)  		16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);  } -static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd) -{ -	struct ft_tpg *tpg = sess->tport->tpg; -	struct se_queue_obj *qobj = &tpg->qobj; -	unsigned long flags; - -	qobj = &sess->tport->tpg->qobj; -	spin_lock_irqsave(&qobj->cmd_queue_lock, flags); -	list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list); -	atomic_inc(&qobj->queue_cnt); -	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); - -	wake_up_process(tpg->thread); -} - -static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj) -{ -	unsigned long flags; -	struct se_queue_req *qr; - -	spin_lock_irqsave(&qobj->cmd_queue_lock, flags); -	if (list_empty(&qobj->qobj_list)) { -		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); -		return NULL; -	} -	qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list); -	list_del(&qr->qr_list); -	atomic_dec(&qobj->queue_cnt); -	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); -	return container_of(qr, struct ft_cmd, se_req); -} -  static void ft_free_cmd(struct ft_cmd *cmd)  {  	struct fc_frame *fp; @@ -147,7 +114,7 @@ void ft_release_cmd(struct se_cmd *se_cmd)  void ft_check_stop_free(struct se_cmd *se_cmd)  { -	transport_generic_free_cmd(se_cmd, 0, 0); +	transport_generic_free_cmd(se_cmd, 0);  }  /* @@ -282,9 +249,7 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)  int ft_get_cmd_state(struct se_cmd *se_cmd)  { -	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); - -	return cmd->state; +	return 0;  }  int ft_is_state_remove(struct se_cmd *se_cmd) @@ -302,9 +267,8 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)  	if (IS_ERR(fp)) {  		/* XXX need to find cmd if queued */ -		cmd->se_cmd.t_state = TRANSPORT_REMOVE;  		cmd->seq = NULL; -		transport_generic_free_cmd(&cmd->se_cmd, 0, 0); +		transport_generic_free_cmd(&cmd->se_cmd, 0);  		return;  	} @@ -322,7 +286,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)  		       __func__, fh->fh_r_ctl);  		ft_invl_hw_context(cmd);  		fc_frame_free(fp); -		transport_generic_free_cmd(&cmd->se_cmd, 0, 0); +		transport_generic_free_cmd(&cmd->se_cmd, 0);  		break;  	}  } @@ -431,7 +395,7 @@ static void ft_send_tm(struct ft_cmd *cmd)  	}  	pr_debug("alloc tm cmd fn %d\n", tm_func); -	tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func); +	tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func, GFP_KERNEL);  	if (!tmr) {  		pr_debug("alloc failed\n");  		ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED); @@ -455,7 +419,7 @@ static void ft_send_tm(struct ft_cmd *cmd)  			sess = cmd->sess;  			transport_send_check_condition_and_sense(&cmd->se_cmd,  				cmd->se_cmd.scsi_sense_reason, 0); -			transport_generic_free_cmd(&cmd->se_cmd, 0, 0); +			transport_generic_free_cmd(&cmd->se_cmd, 0);  			ft_sess_put(sess);  			return;  		} @@ -505,6 +469,8 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd)  	return 0;  } +static void ft_send_work(struct work_struct *work); +  /*   * Handle incoming FCP command.   */ @@ -523,7 +489,9 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)  		goto busy;  	}  	cmd->req_frame = fp;		/* hold frame during cmd */ -	ft_queue_cmd(sess, cmd); + +	INIT_WORK(&cmd->work, ft_send_work); +	queue_work(sess->tport->tpg->workqueue, &cmd->work);  	return;  busy: @@ -563,12 +531,13 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)  /*   * Send new command to target.   */ -static void ft_send_cmd(struct ft_cmd *cmd) +static void ft_send_work(struct work_struct *work)  { +	struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);  	struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);  	struct se_cmd *se_cmd;  	struct fcp_cmnd *fcp; -	int data_dir; +	int data_dir = 0;  	u32 data_len;  	int task_attr;  	int ret; @@ -657,7 +626,7 @@ static void ft_send_cmd(struct ft_cmd *cmd)  	if (ret == -ENOMEM) {  		transport_send_check_condition_and_sense(se_cmd,  				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); -		transport_generic_free_cmd(se_cmd, 0, 0); +		transport_generic_free_cmd(se_cmd, 0);  		return;  	}  	if (ret == -EINVAL) { @@ -666,51 +635,12 @@ static void ft_send_cmd(struct ft_cmd *cmd)  		else  			transport_send_check_condition_and_sense(se_cmd,  					se_cmd->scsi_sense_reason, 0); -		transport_generic_free_cmd(se_cmd, 0, 0); +		transport_generic_free_cmd(se_cmd, 0);  		return;  	} -	transport_generic_handle_cdb(se_cmd); +	transport_handle_cdb_direct(se_cmd);  	return;  err:  	ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);  } - -/* - * Handle request in the command thread. - */ -static void ft_exec_req(struct ft_cmd *cmd) -{ -	pr_debug("cmd state %x\n", cmd->state); -	switch (cmd->state) { -	case FC_CMD_ST_NEW: -		ft_send_cmd(cmd); -		break; -	default: -		break; -	} -} - -/* - * Processing thread. - * Currently one thread per tpg. - */ -int ft_thread(void *arg) -{ -	struct ft_tpg *tpg = arg; -	struct se_queue_obj *qobj = &tpg->qobj; -	struct ft_cmd *cmd; - -	while (!kthread_should_stop()) { -		schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT); -		if (kthread_should_stop()) -			goto out; - -		cmd = ft_dequeue_cmd(qobj); -		if (cmd) -			ft_exec_req(cmd); -	} - -out: -	return 0; -} diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 8781d1e423d..5f770412ca4 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -23,7 +23,6 @@  #include <linux/module.h>  #include <linux/moduleparam.h> -#include <linux/version.h>  #include <generated/utsrelease.h>  #include <linux/utsname.h>  #include <linux/init.h> @@ -32,6 +31,7 @@  #include <linux/types.h>  #include <linux/string.h>  #include <linux/configfs.h> +#include <linux/kernel.h>  #include <linux/ctype.h>  #include <asm/unaligned.h>  #include <scsi/scsi.h> @@ -71,10 +71,10 @@ static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)  {  	const char *cp;  	char c; -	u32 nibble;  	u32 byte = 0;  	u32 pos = 0;  	u32 err; +	int val;  	*wwn = 0;  	for (cp = name; cp < &name[FT_NAMELEN - 1]; cp++) { @@ -95,13 +95,10 @@ static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)  			return cp - name;  		}  		err = 3; -		if (isdigit(c)) -			nibble = c - '0'; -		else if (isxdigit(c) && (islower(c) || !strict)) -			nibble = tolower(c) - 'a' + 10; -		else +		val = hex_to_bin(c); +		if (val < 0 || (strict && isupper(c)))  			goto fail; -		*wwn = (*wwn << 4) | nibble; +		*wwn = (*wwn << 4) | val;  	}  	err = 4;  fail: @@ -256,7 +253,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)  	struct se_portal_group *se_tpg = &tpg->se_tpg;  	struct se_node_acl *se_acl; -	spin_lock_bh(&se_tpg->acl_node_lock); +	spin_lock_irq(&se_tpg->acl_node_lock);  	list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {  		acl = container_of(se_acl, struct ft_node_acl, se_node_acl);  		pr_debug("acl %p port_name %llx\n", @@ -270,7 +267,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)  			break;  		}  	} -	spin_unlock_bh(&se_tpg->acl_node_lock); +	spin_unlock_irq(&se_tpg->acl_node_lock);  	return found;  } @@ -327,7 +324,6 @@ static struct se_portal_group *ft_add_tpg(  	tpg->index = index;  	tpg->lport_acl = lacl;  	INIT_LIST_HEAD(&tpg->lun_list); -	transport_init_queue_obj(&tpg->qobj);  	ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,  				tpg, TRANSPORT_TPG_TYPE_NORMAL); @@ -336,8 +332,8 @@ static struct se_portal_group *ft_add_tpg(  		return NULL;  	} -	tpg->thread = kthread_run(ft_thread, tpg, "ft_tpg%lu", index); -	if (IS_ERR(tpg->thread)) { +	tpg->workqueue = alloc_workqueue("tcm_fc", 0, 1); +	if (!tpg->workqueue) {  		kfree(tpg);  		return NULL;  	} @@ -356,7 +352,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)  	pr_debug("del tpg %s\n",  		    config_item_name(&tpg->se_tpg.tpg_group.cg_item)); -	kthread_stop(tpg->thread); +	destroy_workqueue(tpg->workqueue);  	/* Wait for sessions to be freed thru RCU, for BUG_ON below */  	synchronize_rcu(); @@ -655,9 +651,7 @@ static void __exit ft_exit(void)  	synchronize_rcu();  } -#ifdef MODULE  MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION);  MODULE_LICENSE("GPL");  module_init(ft_init);  module_exit(ft_exit); -#endif /* MODULE */ diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index c37f4cd9645..1369b1cb103 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -28,7 +28,6 @@  #include <linux/module.h>  #include <linux/moduleparam.h> -#include <linux/version.h>  #include <generated/utsrelease.h>  #include <linux/utsname.h>  #include <linux/init.h> @@ -219,43 +218,41 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)  	if (cmd->was_ddp_setup) {  		BUG_ON(!ep);  		BUG_ON(!lport); -	} - -	/* -	 * Doesn't expect payload if DDP is setup. Payload -	 * is expected to be copied directly to user buffers -	 * due to DDP (Large Rx offload), -	 */ -	buf = fc_frame_payload_get(fp, 1); -	if (buf) -		pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, " +		/* +		 * Since DDP (Large Rx offload) was setup for this request, +		 * payload is expected to be copied directly to user buffers. +		 */ +		buf = fc_frame_payload_get(fp, 1); +		if (buf) +			pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "  				"cmd->sg_cnt 0x%x. DDP was setup"  				" hence not expected to receive frame with " -				"payload, Frame will be dropped if " -				"'Sequence Initiative' bit in f_ctl is " +				"payload, Frame will be dropped if" +				"'Sequence Initiative' bit in f_ctl is"  				"not set\n", __func__, ep->xid, f_ctl,  				cmd->sg, cmd->sg_cnt); -	/* - 	 * Invalidate HW DDP context if it was setup for respective - 	 * command. Invalidation of HW DDP context is requited in both - 	 * situation (success and error).  - 	 */ -	ft_invl_hw_context(cmd); +		/* +		 * Invalidate HW DDP context if it was setup for respective +		 * command. Invalidation of HW DDP context is requited in both +		 * situation (success and error). +		 */ +		ft_invl_hw_context(cmd); -	/* -	 * If "Sequence Initiative (TSI)" bit set in f_ctl, means last -	 * write data frame is received successfully where payload is -	 * posted directly to user buffer and only the last frame's -	 * header is posted in receive queue. -	 * -	 * If "Sequence Initiative (TSI)" bit is not set, means error -	 * condition w.r.t. DDP, hence drop the packet and let explict -	 * ABORTS from other end of exchange timer trigger the recovery. -	 */ -	if (f_ctl & FC_FC_SEQ_INIT) -		goto last_frame; -	else -		goto drop; +		/* +		 * If "Sequence Initiative (TSI)" bit set in f_ctl, means last +		 * write data frame is received successfully where payload is +		 * posted directly to user buffer and only the last frame's +		 * header is posted in receive queue. +		 * +		 * If "Sequence Initiative (TSI)" bit is not set, means error +		 * condition w.r.t. DDP, hence drop the packet and let explict +		 * ABORTS from other end of exchange timer trigger the recovery. +		 */ +		if (f_ctl & FC_FC_SEQ_INIT) +			goto last_frame; +		else +			goto drop; +	}  	rel_off = ntohl(fh->fh_parm_offset);  	frame_len = fr_len(fp); diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index dbb5eaeee39..326921385af 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -19,7 +19,6 @@  #include <linux/module.h>  #include <linux/moduleparam.h> -#include <linux/version.h>  #include <generated/utsrelease.h>  #include <linux/utsname.h>  #include <linux/init.h> diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig index bd7cc052799..f462fa5f937 100644 --- a/drivers/tty/Kconfig +++ b/drivers/tty/Kconfig @@ -62,7 +62,7 @@ config VT_CONSOLE  config HW_CONSOLE  	bool -	depends on VT && !S390 && !UML +	depends on VT && !UML  	default y  config VT_HW_CONSOLE_BINDING diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 98b6e3bdb00..e809e9d4683 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -446,8 +446,19 @@ static inline void legacy_pty_init(void) { }  int pty_limit = NR_UNIX98_PTY_DEFAULT;  static int pty_limit_min;  static int pty_limit_max = NR_UNIX98_PTY_MAX; +static int tty_count;  static int pty_count; +static inline void pty_inc_count(void) +{ +	pty_count = (++tty_count) / 2; +} + +static inline void pty_dec_count(void) +{ +	pty_count = (--tty_count) / 2; +} +  static struct cdev ptmx_cdev;  static struct ctl_table pty_table[] = { @@ -542,6 +553,7 @@ static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver,  static void pty_unix98_shutdown(struct tty_struct *tty)  { +	tty_driver_remove_tty(tty->driver, tty);  	/* We have our own method as we don't use the tty index */  	kfree(tty->termios);  } @@ -588,7 +600,8 @@ static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty)  	 */  	tty_driver_kref_get(driver);  	tty->count++; -	pty_count++; +	pty_inc_count(); /* tty */ +	pty_inc_count(); /* tty->link */  	return 0;  err_free_mem:  	deinitialize_tty_struct(o_tty); @@ -602,7 +615,7 @@ err_free_tty:  static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)  { -	pty_count--; +	pty_dec_count();  }  static const struct tty_operations ptm_unix98_ops = { diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c index f2dfec82faf..7f50999eebc 100644 --- a/drivers/tty/serial/8250.c +++ b/drivers/tty/serial/8250.c @@ -1819,6 +1819,8 @@ static void serial8250_backup_timeout(unsigned long data)  	unsigned int iir, ier = 0, lsr;  	unsigned long flags; +	spin_lock_irqsave(&up->port.lock, flags); +  	/*  	 * Must disable interrupts or else we risk racing with the interrupt  	 * based handler. @@ -1836,10 +1838,8 @@ static void serial8250_backup_timeout(unsigned long data)  	 * the "Diva" UART used on the management processor on many HP  	 * ia64 and parisc boxes.  	 */ -	spin_lock_irqsave(&up->port.lock, flags);  	lsr = serial_in(up, UART_LSR);  	up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; -	spin_unlock_irqrestore(&up->port.lock, flags);  	if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) &&  	    (!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) &&  	    (lsr & UART_LSR_THRE)) { @@ -1848,11 +1848,13 @@ static void serial8250_backup_timeout(unsigned long data)  	}  	if (!(iir & UART_IIR_NO_INT)) -		serial8250_handle_port(up); +		transmit_chars(up);  	if (is_real_interrupt(up->port.irq))  		serial_out(up, UART_IER, ier); +	spin_unlock_irqrestore(&up->port.lock, flags); +  	/* Standard timer interval plus 0.2s to keep the port running */  	mod_timer(&up->timer,  		jiffies + uart_poll_timeout(&up->port) + HZ / 5); diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c index 6b887d90a20..3abeca2a2a1 100644 --- a/drivers/tty/serial/8250_pci.c +++ b/drivers/tty/serial/8250_pci.c @@ -1599,11 +1599,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {  		.device         = 0x800D,  		.init		= pci_eg20t_init,  	}, -	{ -		.vendor         = 0x10DB, -		.device         = 0x800D, -		.init		= pci_eg20t_init, -	},  	/*  	 * Cronyx Omega PCI (PLX-chip based)  	 */ @@ -4021,7 +4016,7 @@ static struct pci_device_id serial_pci_tbl[] = {  		0, 0, pbn_NETMOS9900_2s_115200 },  	/* -	 * Best Connectivity PCI Multi I/O cards +	 * Best Connectivity and Rosewill PCI Multi I/O cards  	 */  	{	PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, @@ -4029,6 +4024,10 @@ static struct pci_device_id serial_pci_tbl[] = {  		0, 0, pbn_b0_1_115200 },  	{	PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, +		0xA000, 0x3002, +		0, 0, pbn_b0_bt_2_115200 }, + +	{	PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,  		0xA000, 0x3004,  		0, 0, pbn_b0_bt_4_115200 },  	/* Intel CE4100 */ diff --git a/drivers/tty/serial/8250_pnp.c b/drivers/tty/serial/8250_pnp.c index fc301f6722e..a2f236510ff 100644 --- a/drivers/tty/serial/8250_pnp.c +++ b/drivers/tty/serial/8250_pnp.c @@ -109,6 +109,9 @@ static const struct pnp_device_id pnp_dev_table[] = {  	/* IBM */  	/* IBM Thinkpad 701 Internal Modem Voice */  	{	"IBM0033",		0	}, +	/* Intermec */ +	/* Intermec CV60 touchscreen port */ +	{	"PNP4972",		0	},  	/* Intertex */  	/* Intertex 28k8 33k6 Voice EXT PnP */  	{	"IXDC801",		0	}, diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index af9b7814965..b922f5d2e61 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -1609,9 +1609,11 @@ static struct console atmel_console = {  static int __init atmel_console_init(void)  {  	if (atmel_default_console_device) { -		add_preferred_console(ATMEL_DEVICENAME, -				      atmel_default_console_device->id, NULL); -		atmel_init_port(&atmel_ports[atmel_default_console_device->id], +		struct atmel_uart_data *pdata = +			atmel_default_console_device->dev.platform_data; + +		add_preferred_console(ATMEL_DEVICENAME, pdata->num, NULL); +		atmel_init_port(&atmel_ports[pdata->num],  				atmel_default_console_device);  		register_console(&atmel_console);  	} diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c index 225123b37f1..58be715913c 100644 --- a/drivers/tty/serial/crisv10.c +++ b/drivers/tty/serial/crisv10.c @@ -4450,7 +4450,7 @@ static int __init rs_init(void)  #if defined(CONFIG_ETRAX_RS485)  #if defined(CONFIG_ETRAX_RS485_ON_PA) -	if (cris_io_interface_allocate_pins(if_ser0, 'a', rs485_pa_bit, +	if (cris_io_interface_allocate_pins(if_serial_0, 'a', rs485_pa_bit,  			rs485_pa_bit)) {  		printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "  			"RS485 pin\n"); @@ -4459,7 +4459,7 @@ static int __init rs_init(void)  	}  #endif  #if defined(CONFIG_ETRAX_RS485_ON_PORT_G) -	if (cris_io_interface_allocate_pins(if_ser0, 'g', rs485_pa_bit, +	if (cris_io_interface_allocate_pins(if_serial_0, 'g', rs485_pa_bit,  			rs485_port_g_bit)) {  		printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "  			"RS485 pin\n"); diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c index 58cf279ed87..bc95f52cad8 100644 --- a/drivers/tty/serial/lantiq.c +++ b/drivers/tty/serial/lantiq.c @@ -478,8 +478,10 @@ lqasc_set_termios(struct uart_port *port,  	spin_unlock_irqrestore(<q_asc_lock, flags);  	/* Don't rewrite B0 */ -        if (tty_termios_baud_rate(new)) +	if (tty_termios_baud_rate(new))  		tty_termios_encode_baud_rate(new, baud, baud); + +	uart_update_timeout(port, cflag, baud);  }  static const char* diff --git a/drivers/tty/serial/max3107-aava.c b/drivers/tty/serial/max3107-aava.c index a1fe304f2f5..d73aadd7a9a 100644 --- a/drivers/tty/serial/max3107-aava.c +++ b/drivers/tty/serial/max3107-aava.c @@ -340,5 +340,5 @@ module_exit(max3107_exit);  MODULE_DESCRIPTION("MAX3107 driver");  MODULE_AUTHOR("Aavamobile"); -MODULE_ALIAS("aava-max3107-spi"); +MODULE_ALIAS("spi:aava-max3107");  MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c index 750b4f62731..a8164601c0e 100644 --- a/drivers/tty/serial/max3107.c +++ b/drivers/tty/serial/max3107.c @@ -1209,5 +1209,5 @@ module_exit(max3107_exit);  MODULE_DESCRIPTION("MAX3107 driver");  MODULE_AUTHOR("Aavamobile"); -MODULE_ALIAS("max3107-spi"); +MODULE_ALIAS("spi:max3107");  MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c index a764bf99743..492c14d63e9 100644 --- a/drivers/tty/serial/mrst_max3110.c +++ b/drivers/tty/serial/mrst_max3110.c @@ -23,7 +23,7 @@   *    1 word. If SPI master controller doesn't support sclk frequency change,   *    then the char need be sent out one by one with some delay   * - * 2. Currently only RX available interrrupt is used, no need for waiting TXE + * 2. Currently only RX available interrupt is used, no need for waiting TXE   *    interrupt for a low speed UART device   */ @@ -917,4 +917,4 @@ module_init(serial_m3110_init);  module_exit(serial_m3110_exit);  MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("max3110-uart"); +MODULE_ALIAS("spi:max3110-uart"); diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index c37df8d0fa2..5e713d3ef1f 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -806,8 +806,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,  	serial_omap_set_mctrl(&up->port, up->port.mctrl);  	/* Software Flow Control Configuration */ -	if (termios->c_iflag & (IXON | IXOFF)) -		serial_omap_configure_xonxoff(up, termios); +	serial_omap_configure_xonxoff(up, termios);  	spin_unlock_irqrestore(&up->port.lock, flags);  	dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id); diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index 846dfcd3ce0..b46218d679e 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -598,7 +598,8 @@ static void pch_request_dma(struct uart_port *port)  	dma_cap_zero(mask);  	dma_cap_set(DMA_SLAVE, mask); -	dma_dev = pci_get_bus_and_slot(2, PCI_DEVFN(0xa, 0)); /* Get DMA's dev +	dma_dev = pci_get_bus_and_slot(priv->pdev->bus->number, +				       PCI_DEVFN(0xa, 0)); /* Get DMA's dev  								information */  	/* Set Tx DMA */  	param = &priv->param_tx; diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index afc62942315..6edafb5ace1 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -1225,15 +1225,19 @@ static const struct dev_pm_ops s3c24xx_serial_pm_ops = {  	.suspend = s3c24xx_serial_suspend,  	.resume = s3c24xx_serial_resume,  }; +#define SERIAL_SAMSUNG_PM_OPS	(&s3c24xx_serial_pm_ops) +  #else /* !CONFIG_PM_SLEEP */ -#define s3c24xx_serial_pm_ops	NULL + +#define SERIAL_SAMSUNG_PM_OPS	NULL  #endif /* CONFIG_PM_SLEEP */  int s3c24xx_serial_init(struct platform_driver *drv,  			struct s3c24xx_uart_info *info)  {  	dbg("s3c24xx_serial_init(%p,%p)\n", drv, info); -	drv->driver.pm = &s3c24xx_serial_pm_ops; + +	drv->driver.pm = SERIAL_SAMSUNG_PM_OPS;  	return platform_driver_register(drv);  } diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index db7912cb7ae..a3efbea5dbb 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -200,6 +200,11 @@ static int uart_startup(struct tty_struct *tty, struct uart_state *state, int in  		clear_bit(TTY_IO_ERROR, &tty->flags);  	} +	/* +	 * This is to allow setserial on this port. People may want to set +	 * port/irq/type and then reconfigure the port properly if it failed +	 * now. +	 */  	if (retval && capable(CAP_SYS_ADMIN))  		retval = 0; diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 2ec57b2fb27..5ea6ec3442e 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -47,6 +47,7 @@  #include <linux/ctype.h>  #include <linux/err.h>  #include <linux/dmaengine.h> +#include <linux/dma-mapping.h>  #include <linux/scatterlist.h>  #include <linux/slab.h> @@ -95,6 +96,12 @@ struct sci_port {  #endif  	struct notifier_block		freq_transition; + +#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE +	unsigned short saved_smr; +	unsigned short saved_fcr; +	unsigned char saved_brr; +#endif  };  /* Function prototypes */ @@ -1076,7 +1083,7 @@ static unsigned int sci_get_mctrl(struct uart_port *port)  	/* This routine is used for getting signals of: DTR, DCD, DSR, RI,  	   and CTS/RTS */ -	return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR; +	return TIOCM_DTR | TIOCM_RTS | TIOCM_CTS | TIOCM_DSR;  }  #ifdef CONFIG_SERIAL_SH_SCI_DMA @@ -1633,11 +1640,25 @@ static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,  	return ((freq + 16 * bps) / (32 * bps) - 1);  } +static void sci_reset(struct uart_port *port) +{ +	unsigned int status; + +	do { +		status = sci_in(port, SCxSR); +	} while (!(status & SCxSR_TEND(port))); + +	sci_out(port, SCSCR, 0x00);	/* TE=0, RE=0, CKE1=0 */ + +	if (port->type != PORT_SCI) +		sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST); +} +  static void sci_set_termios(struct uart_port *port, struct ktermios *termios,  			    struct ktermios *old)  {  	struct sci_port *s = to_sci_port(port); -	unsigned int status, baud, smr_val, max_baud; +	unsigned int baud, smr_val, max_baud;  	int t = -1;  	u16 scfcr = 0; @@ -1657,14 +1678,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,  	sci_port_enable(s); -	do { -		status = sci_in(port, SCxSR); -	} while (!(status & SCxSR_TEND(port))); - -	sci_out(port, SCSCR, 0x00);	/* TE=0, RE=0, CKE1=0 */ - -	if (port->type != PORT_SCI) -		sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST); +	sci_reset(port);  	smr_val = sci_in(port, SCSMR) & 3; @@ -1913,6 +1927,7 @@ static int __devinit sci_init_single(struct platform_device *dev,  		port->dev = &dev->dev; +		pm_runtime_irq_safe(&dev->dev);  		pm_runtime_enable(&dev->dev);  	} @@ -2036,7 +2051,8 @@ static int __devinit serial_console_setup(struct console *co, char *options)  	if (options)  		uart_parse_options(options, &baud, &parity, &bits, &flow); -	/* TODO: disable clock */ +	sci_port_disable(sci_port); +  	return uart_set_options(port, co, baud, parity, bits, flow);  } @@ -2079,6 +2095,36 @@ static int __devinit sci_probe_earlyprintk(struct platform_device *pdev)  	return 0;  } +#define uart_console(port)	((port)->cons->index == (port)->line) + +static int sci_runtime_suspend(struct device *dev) +{ +	struct sci_port *sci_port = dev_get_drvdata(dev); +	struct uart_port *port = &sci_port->port; + +	if (uart_console(port)) { +		sci_port->saved_smr = sci_in(port, SCSMR); +		sci_port->saved_brr = sci_in(port, SCBRR); +		sci_port->saved_fcr = sci_in(port, SCFCR); +	} +	return 0; +} + +static int sci_runtime_resume(struct device *dev) +{ +	struct sci_port *sci_port = dev_get_drvdata(dev); +	struct uart_port *port = &sci_port->port; + +	if (uart_console(port)) { +		sci_reset(port); +		sci_out(port, SCSMR, sci_port->saved_smr); +		sci_out(port, SCBRR, sci_port->saved_brr); +		sci_out(port, SCFCR, sci_port->saved_fcr); +		sci_out(port, SCSCR, sci_port->cfg->scscr); +	} +	return 0; +} +  #define SCI_CONSOLE	(&serial_console)  #else @@ -2088,6 +2134,8 @@ static inline int __devinit sci_probe_earlyprintk(struct platform_device *pdev)  }  #define SCI_CONSOLE	NULL +#define sci_runtime_suspend	NULL +#define sci_runtime_resume	NULL  #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */ @@ -2203,6 +2251,8 @@ static int sci_resume(struct device *dev)  }  static const struct dev_pm_ops sci_dev_pm_ops = { +	.runtime_suspend = sci_runtime_suspend, +	.runtime_resume = sci_runtime_resume,  	.suspend	= sci_suspend,  	.resume		= sci_resume,  }; diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c index c327218cad4..9af9f0879a2 100644 --- a/drivers/tty/serial/ucc_uart.c +++ b/drivers/tty/serial/ucc_uart.c @@ -235,7 +235,7 @@ static inline void *qe2cpu_addr(dma_addr_t addr, struct uart_qe_port *qe_port)  		return qe_port->bd_virt + (addr - qe_port->bd_dma_addr);  	/* something nasty happened */ -	printk(KERN_ERR "%s: addr=%x\n", __func__, addr); +	printk(KERN_ERR "%s: addr=%llx\n", __func__, (u64)addr);  	BUG();  	return NULL;  } diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 150e4f747c7..4f1fc81112e 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -1295,8 +1295,7 @@ static int tty_driver_install_tty(struct tty_driver *driver,   *   *	Locking: tty_mutex for now   */ -static void tty_driver_remove_tty(struct tty_driver *driver, -						struct tty_struct *tty) +void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *tty)  {  	if (driver->ops->remove)  		driver->ops->remove(driver, tty); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 8669ba3fe79..73cbbd85219 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1775,6 +1775,8 @@ int usb_hcd_alloc_bandwidth(struct usb_device *udev,  		struct usb_interface *iface = usb_ifnum_to_if(udev,  				cur_alt->desc.bInterfaceNumber); +		if (!iface) +			return -EINVAL;  		if (iface->resetting_device) {  			/*  			 * The USB core just reset the device, so the xHCI host diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c index 8f8d3f6cd89..8f3eab1af88 100644 --- a/drivers/usb/gadget/f_phonet.c +++ b/drivers/usb/gadget/f_phonet.c @@ -434,6 +434,7 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)  			    config_ep_by_speed(gadget, f, fp->out_ep)) {  				fp->in_ep->desc = NULL;  				fp->out_ep->desc = NULL; +				spin_unlock(&port->lock);  				return -EINVAL;  			}  			usb_ep_enable(fp->out_ep); diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c index a06e2c27b43..ff4d40d77c3 100644 --- a/drivers/usb/gadget/langwell_udc.c +++ b/drivers/usb/gadget/langwell_udc.c @@ -2969,7 +2969,7 @@ static irqreturn_t langwell_irq(int irq, void *_dev)  		handle_port_change(dev);  	} -	/* suspend interrrupt */ +	/* suspend interrupt */  	if (irq_sts & STS_SLI) {  		dev_vdbg(&dev->pdev->dev, "suspend interrupt\n");  		handle_bus_suspend(dev); diff --git a/drivers/usb/gadget/mv_udc_core.c b/drivers/usb/gadget/mv_udc_core.c index ce1ac2bcb31..0b3b8d0462d 100644 --- a/drivers/usb/gadget/mv_udc_core.c +++ b/drivers/usb/gadget/mv_udc_core.c @@ -335,7 +335,7 @@ static int queue_dtd(struct mv_ep *ep, struct mv_req *req)  	} else {  		/* Write dQH next pointer and terminate bit to 0 */  		dqh->next_dtd_ptr = req->head->td_dma -			& EP_QUEUE_HEAD_NEXT_POINTER_MASK;; +			& EP_QUEUE_HEAD_NEXT_POINTER_MASK;  		dqh->size_ioc_int_sts = 0;  		/* Ensure that updates to the QH will occur before priming. */ @@ -376,7 +376,7 @@ static int queue_dtd(struct mv_ep *ep, struct mv_req *req)  		}  	}  done: -	return retval;; +	return retval;  }  static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length, diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c index 3dd40b4e675..a218a4de5dc 100644 --- a/drivers/usb/gadget/net2280.c +++ b/drivers/usb/gadget/net2280.c @@ -2481,7 +2481,7 @@ static void handle_stat1_irqs (struct net2280 *dev, u32 stat)  	mask = (1 << HIGH_SPEED) | (1 << FULL_SPEED);  	/* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set. -	 * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRRUPT set and +	 * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and  	 * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT  	 * only indicates a change in the reset state).  	 */ diff --git a/drivers/usb/gadget/uvc.h b/drivers/usb/gadget/uvc.h index 5b7919460fd..81971fbc7ea 100644 --- a/drivers/usb/gadget/uvc.h +++ b/drivers/usb/gadget/uvc.h @@ -56,6 +56,7 @@ struct uvc_event  #include <linux/usb.h>	/* For usb_endpoint_* */  #include <linux/usb/gadget.h>  #include <linux/videodev2.h> +#include <linux/version.h>  #include <media/v4l2-fh.h>  #include "uvc_queue.h" diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c index 52f8f9e513a..cfb58384394 100644 --- a/drivers/usb/gadget/uvc_v4l2.c +++ b/drivers/usb/gadget/uvc_v4l2.c @@ -16,7 +16,6 @@  #include <linux/errno.h>  #include <linux/list.h>  #include <linux/mutex.h> -#include <linux/version.h>  #include <linux/videodev2.h>  #include <linux/vmalloc.h>  #include <linux/wait.h> diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index e051b30c184..4c32cb19b40 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -343,7 +343,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)  	u32			temp;  	u32			power_okay;  	int			i; -	u8			resume_needed = 0; +	unsigned long		resume_needed = 0;  	if (time_before (jiffies, ehci->next_statechange))  		msleep(5); @@ -416,7 +416,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)  		if (test_bit(i, &ehci->bus_suspended) &&  				(temp & PORT_SUSPEND)) {  			temp |= PORT_RESUME; -			resume_needed = 1; +			set_bit(i, &resume_needed);  		}  		ehci_writel(ehci, temp, &ehci->regs->port_status [i]);  	} @@ -431,8 +431,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)  	i = HCS_N_PORTS (ehci->hcs_params);  	while (i--) {  		temp = ehci_readl(ehci, &ehci->regs->port_status [i]); -		if (test_bit(i, &ehci->bus_suspended) && -				(temp & PORT_SUSPEND)) { +		if (test_bit(i, &resume_needed)) {  			temp &= ~(PORT_RWC_BITS | PORT_RESUME);  			ehci_writel(ehci, temp, &ehci->regs->port_status [i]);  			ehci_vdbg (ehci, "resumed port %d\n", i + 1); diff --git a/drivers/usb/host/ehci-s5p.c b/drivers/usb/host/ehci-s5p.c index b3958b3d316..9e77f1c8bdb 100644 --- a/drivers/usb/host/ehci-s5p.c +++ b/drivers/usb/host/ehci-s5p.c @@ -86,6 +86,7 @@ static int __devinit s5p_ehci_probe(struct platform_device *pdev)  		goto fail_hcd;  	} +	s5p_ehci->hcd = hcd;  	s5p_ehci->clk = clk_get(&pdev->dev, "usbhost");  	if (IS_ERR(s5p_ehci->clk)) { diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 0be788cc2fd..723f8231193 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -463,11 +463,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,  					&& (temp & PORT_POWER))  				status |= USB_PORT_STAT_SUSPEND;  		} -		if ((temp & PORT_PLS_MASK) == XDEV_RESUME) { +		if ((temp & PORT_PLS_MASK) == XDEV_RESUME && +				!DEV_SUPERSPEED(temp)) {  			if ((temp & PORT_RESET) || !(temp & PORT_PE))  				goto error; -			if (!DEV_SUPERSPEED(temp) && time_after_eq(jiffies, -						bus_state->resume_done[wIndex])) { +			if (time_after_eq(jiffies, +					bus_state->resume_done[wIndex])) {  				xhci_dbg(xhci, "Resume USB2 port %d\n",  					wIndex + 1);  				bus_state->resume_done[wIndex] = 0; @@ -487,6 +488,14 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,  				xhci_ring_device(xhci, slot_id);  				bus_state->port_c_suspend |= 1 << wIndex;  				bus_state->suspended_ports &= ~(1 << wIndex); +			} else { +				/* +				 * The resume has been signaling for less than +				 * 20ms. Report the port status as SUSPEND, +				 * let the usbcore check port status again +				 * and clear resume signaling later. +				 */ +				status |= USB_PORT_STAT_SUSPEND;  			}  		}  		if ((temp & PORT_PLS_MASK) == XDEV_U0 @@ -664,7 +673,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,  			xhci_dbg(xhci, "PORTSC %04x\n", temp);  			if (temp & PORT_RESET)  				goto error; -			if (temp & XDEV_U3) { +			if ((temp & PORT_PLS_MASK) == XDEV_U3) {  				if ((temp & PORT_PE) == 0)  					goto error; @@ -752,7 +761,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)  	memset(buf, 0, retval);  	status = 0; -	mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC; +	mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC;  	spin_lock_irqsave(&xhci->lock, flags);  	/* For each port, did anything change?  If so, set that bit in buf. */ diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 7113d16e2d3..952e2ded61a 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -514,8 +514,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,  			(unsigned long long) addr);  } +/* flip_cycle means flip the cycle bit of all but the first and last TRB. + * (The last TRB actually points to the ring enqueue pointer, which is not part + * of this TD.)  This is used to remove partially enqueued isoc TDs from a ring. + */  static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, -		struct xhci_td *cur_td) +		struct xhci_td *cur_td, bool flip_cycle)  {  	struct xhci_segment *cur_seg;  	union xhci_trb *cur_trb; @@ -528,6 +532,12 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,  			 * leave the pointers intact.  			 */  			cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN); +			/* Flip the cycle bit (link TRBs can't be the first +			 * or last TRB). +			 */ +			if (flip_cycle) +				cur_trb->generic.field[3] ^= +					cpu_to_le32(TRB_CYCLE);  			xhci_dbg(xhci, "Cancel (unchain) link TRB\n");  			xhci_dbg(xhci, "Address = %p (0x%llx dma); "  					"in seg %p (0x%llx dma)\n", @@ -541,6 +551,11 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,  			cur_trb->generic.field[2] = 0;  			/* Preserve only the cycle bit of this TRB */  			cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); +			/* Flip the cycle bit except on the first or last TRB */ +			if (flip_cycle && cur_trb != cur_td->first_trb && +					cur_trb != cur_td->last_trb) +				cur_trb->generic.field[3] ^= +					cpu_to_le32(TRB_CYCLE);  			cur_trb->generic.field[3] |= cpu_to_le32(  				TRB_TYPE(TRB_TR_NOOP));  			xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " @@ -719,14 +734,14 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,  					cur_td->urb->stream_id,  					cur_td, &deq_state);  		else -			td_to_noop(xhci, ep_ring, cur_td); +			td_to_noop(xhci, ep_ring, cur_td, false);  remove_finished_td:  		/*  		 * The event handler won't see a completion for this TD anymore,  		 * so remove it from the endpoint ring's TD list.  Keep it in  		 * the cancelled TD list for URB completion later.  		 */ -		list_del(&cur_td->td_list); +		list_del_init(&cur_td->td_list);  	}  	last_unlinked_td = cur_td;  	xhci_stop_watchdog_timer_in_irq(xhci, ep); @@ -754,7 +769,7 @@ remove_finished_td:  	do {  		cur_td = list_entry(ep->cancelled_td_list.next,  				struct xhci_td, cancelled_td_list); -		list_del(&cur_td->cancelled_td_list); +		list_del_init(&cur_td->cancelled_td_list);  		/* Clean up the cancelled URB */  		/* Doesn't matter what we pass for status, since the core will @@ -862,9 +877,9 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)  				cur_td = list_first_entry(&ring->td_list,  						struct xhci_td,  						td_list); -				list_del(&cur_td->td_list); +				list_del_init(&cur_td->td_list);  				if (!list_empty(&cur_td->cancelled_td_list)) -					list_del(&cur_td->cancelled_td_list); +					list_del_init(&cur_td->cancelled_td_list);  				xhci_giveback_urb_in_irq(xhci, cur_td,  						-ESHUTDOWN, "killed");  			} @@ -873,7 +888,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)  						&temp_ep->cancelled_td_list,  						struct xhci_td,  						cancelled_td_list); -				list_del(&cur_td->cancelled_td_list); +				list_del_init(&cur_td->cancelled_td_list);  				xhci_giveback_urb_in_irq(xhci, cur_td,  						-ESHUTDOWN, "killed");  			} @@ -1565,10 +1580,10 @@ td_cleanup:  			else  				*status = 0;  		} -		list_del(&td->td_list); +		list_del_init(&td->td_list);  		/* Was this TD slated to be cancelled but completed anyway? */  		if (!list_empty(&td->cancelled_td_list)) -			list_del(&td->cancelled_td_list); +			list_del_init(&td->cancelled_td_list);  		urb_priv->td_cnt++;  		/* Giveback the urb when all the tds are completed */ @@ -1919,8 +1934,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,  	int status = -EINPROGRESS;  	struct urb_priv *urb_priv;  	struct xhci_ep_ctx *ep_ctx; +	struct list_head *tmp;  	u32 trb_comp_code;  	int ret = 0; +	int td_num = 0;  	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));  	xdev = xhci->devs[slot_id]; @@ -1942,6 +1959,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,  		return -ENODEV;  	} +	/* Count current td numbers if ep->skip is set */ +	if (ep->skip) { +		list_for_each(tmp, &ep_ring->td_list) +			td_num++; +	} +  	event_dma = le64_to_cpu(event->buffer);  	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));  	/* Look for common error cases */ @@ -2053,7 +2076,18 @@ static int handle_tx_event(struct xhci_hcd *xhci,  			goto cleanup;  		} +		/* We've skipped all the TDs on the ep ring when ep->skip set */ +		if (ep->skip && td_num == 0) { +			ep->skip = false; +			xhci_dbg(xhci, "All tds on the ep_ring skipped. " +						"Clear skip flag.\n"); +			ret = 0; +			goto cleanup; +		} +  		td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); +		if (ep->skip) +			td_num--;  		/* Is this a TRB in the currently executing TD? */  		event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, @@ -2500,11 +2534,8 @@ static int prepare_transfer(struct xhci_hcd *xhci,  	if (td_index == 0) {  		ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); -		if (unlikely(ret)) { -			xhci_urb_free_priv(xhci, urb_priv); -			urb->hcpriv = NULL; +		if (unlikely(ret))  			return ret; -		}  	}  	td->urb = urb; @@ -2672,6 +2703,10 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,  {  	int packets_transferred; +	/* One TRB with a zero-length data packet. */ +	if (running_total == 0 && trb_buff_len == 0) +		return 0; +  	/* All the TRB queueing functions don't count the current TRB in  	 * running_total.  	 */ @@ -3113,21 +3148,16 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,  		struct urb *urb, int i)  {  	int num_trbs = 0; -	u64 addr, td_len, running_total; +	u64 addr, td_len;  	addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);  	td_len = urb->iso_frame_desc[i].length; -	running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); -	running_total &= TRB_MAX_BUFF_SIZE - 1; -	if (running_total != 0) +	num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)), +			TRB_MAX_BUFF_SIZE); +	if (num_trbs == 0)  		num_trbs++; -	while (running_total < td_len) { -		num_trbs++; -		running_total += TRB_MAX_BUFF_SIZE; -	} -  	return num_trbs;  } @@ -3226,6 +3256,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,  	start_trb = &ep_ring->enqueue->generic;  	start_cycle = ep_ring->cycle_state; +	urb_priv = urb->hcpriv;  	/* Queue the first TRB, even if it's zero-length */  	for (i = 0; i < num_tds; i++) {  		unsigned int total_packet_count; @@ -3237,9 +3268,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,  		addr = start_addr + urb->iso_frame_desc[i].offset;  		td_len = urb->iso_frame_desc[i].length;  		td_remain_len = td_len; -		/* FIXME: Ignoring zero-length packets, can those happen? */  		total_packet_count = roundup(td_len,  				le16_to_cpu(urb->ep->desc.wMaxPacketSize)); +		/* A zero-length transfer still involves at least one packet. */ +		if (total_packet_count == 0) +			total_packet_count++;  		burst_count = xhci_get_burst_count(xhci, urb->dev, urb,  				total_packet_count);  		residue = xhci_get_last_burst_packet_count(xhci, @@ -3249,12 +3282,13 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,  		ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,  				urb->stream_id, trbs_per_td, urb, i, mem_flags); -		if (ret < 0) -			return ret; +		if (ret < 0) { +			if (i == 0) +				return ret; +			goto cleanup; +		} -		urb_priv = urb->hcpriv;  		td = urb_priv->td[i]; -  		for (j = 0; j < trbs_per_td; j++) {  			u32 remainder = 0;  			field = TRB_TBC(burst_count) | TRB_TLBPC(residue); @@ -3344,6 +3378,27 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,  	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,  			start_cycle, start_trb);  	return 0; +cleanup: +	/* Clean up a partially enqueued isoc transfer. */ + +	for (i--; i >= 0; i--) +		list_del_init(&urb_priv->td[i]->td_list); + +	/* Use the first TD as a temporary variable to turn the TDs we've queued +	 * into No-ops with a software-owned cycle bit. That way the hardware +	 * won't accidentally start executing bogus TDs when we partially +	 * overwrite them.  td->first_trb and td->start_seg are already set. +	 */ +	urb_priv->td[0]->last_trb = ep_ring->enqueue; +	/* Every TRB except the first & last will have its cycle bit flipped. */ +	td_to_noop(xhci, ep_ring, urb_priv->td[0], true); + +	/* Reset the ring enqueue back to the first TRB and its cycle bit. */ +	ep_ring->enqueue = urb_priv->td[0]->first_trb; +	ep_ring->enq_seg = urb_priv->td[0]->start_seg; +	ep_ring->cycle_state = start_cycle; +	usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); +	return ret;  }  /* diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 1c4432d8fc1..3a0f695138f 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1085,8 +1085,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)  		if (urb->dev->speed == USB_SPEED_FULL) {  			ret = xhci_check_maxpacket(xhci, slot_id,  					ep_index, urb); -			if (ret < 0) +			if (ret < 0) { +				xhci_urb_free_priv(xhci, urb_priv); +				urb->hcpriv = NULL;  				return ret; +			}  		}  		/* We have a spinlock and interrupts disabled, so we must pass @@ -1097,6 +1100,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)  			goto dying;  		ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,  				slot_id, ep_index); +		if (ret) +			goto free_priv;  		spin_unlock_irqrestore(&xhci->lock, flags);  	} else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {  		spin_lock_irqsave(&xhci->lock, flags); @@ -1117,6 +1122,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)  			ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,  					slot_id, ep_index);  		} +		if (ret) +			goto free_priv;  		spin_unlock_irqrestore(&xhci->lock, flags);  	} else if (usb_endpoint_xfer_int(&urb->ep->desc)) {  		spin_lock_irqsave(&xhci->lock, flags); @@ -1124,6 +1131,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)  			goto dying;  		ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,  				slot_id, ep_index); +		if (ret) +			goto free_priv;  		spin_unlock_irqrestore(&xhci->lock, flags);  	} else {  		spin_lock_irqsave(&xhci->lock, flags); @@ -1131,18 +1140,22 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)  			goto dying;  		ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,  				slot_id, ep_index); +		if (ret) +			goto free_priv;  		spin_unlock_irqrestore(&xhci->lock, flags);  	}  exit:  	return ret;  dying: -	xhci_urb_free_priv(xhci, urb_priv); -	urb->hcpriv = NULL;  	xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "  			"non-responsive xHCI host.\n",  			urb->ep->desc.bEndpointAddress, urb); +	ret = -ESHUTDOWN; +free_priv: +	xhci_urb_free_priv(xhci, urb_priv); +	urb->hcpriv = NULL;  	spin_unlock_irqrestore(&xhci->lock, flags); -	return -ESHUTDOWN; +	return ret;  }  /* Get the right ring for the given URB. @@ -1239,6 +1252,13 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)  	if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {  		xhci_dbg(xhci, "HW died, freeing TD.\n");  		urb_priv = urb->hcpriv; +		for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { +			td = urb_priv->td[i]; +			if (!list_empty(&td->td_list)) +				list_del_init(&td->td_list); +			if (!list_empty(&td->cancelled_td_list)) +				list_del_init(&td->cancelled_td_list); +		}  		usb_hcd_unlink_urb_from_ep(hcd, urb);  		spin_unlock_irqrestore(&xhci->lock, flags); diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c index ae8c3961774..5e7cfba5b07 100644 --- a/drivers/usb/musb/blackfin.c +++ b/drivers/usb/musb/blackfin.c @@ -17,6 +17,7 @@  #include <linux/io.h>  #include <linux/platform_device.h>  #include <linux/dma-mapping.h> +#include <linux/prefetch.h>  #include <asm/cacheflush.h> diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c index 149f3f310a0..318fb4e8a88 100644 --- a/drivers/usb/musb/cppi_dma.c +++ b/drivers/usb/musb/cppi_dma.c @@ -226,8 +226,10 @@ static int cppi_controller_stop(struct dma_controller *c)  	struct cppi		*controller;  	void __iomem		*tibase;  	int			i; +	struct musb		*musb;  	controller = container_of(c, struct cppi, controller); +	musb = controller->musb;  	tibase = controller->tibase;  	/* DISABLE INDIVIDUAL CHANNEL Interrupts */ @@ -289,9 +291,11 @@ cppi_channel_allocate(struct dma_controller *c,  	u8			index;  	struct cppi_channel	*cppi_ch;  	void __iomem		*tibase; +	struct musb		*musb;  	controller = container_of(c, struct cppi, controller);  	tibase = controller->tibase; +	musb = controller->musb;  	/* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */  	index = ep->epnum - 1; @@ -339,7 +343,8 @@ static void cppi_channel_release(struct dma_channel *channel)  	c = container_of(channel, struct cppi_channel, channel);  	tibase = c->controller->tibase;  	if (!c->hw_ep) -		dev_dbg(musb->controller, "releasing idle DMA channel %p\n", c); +		dev_dbg(c->controller->musb->controller, +			"releasing idle DMA channel %p\n", c);  	else if (!c->transmit)  		core_rxirq_enable(tibase, c->index + 1); @@ -357,10 +362,11 @@ cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)  	musb_ep_select(base, c->index + 1); -	DBG(level, "RX DMA%d%s: %d left, csr %04x, " -			"%08x H%08x S%08x C%08x, " -			"B%08x L%08x %08x .. %08x" -			"\n", +	dev_dbg(c->controller->musb->controller, +		"RX DMA%d%s: %d left, csr %04x, " +		"%08x H%08x S%08x C%08x, " +		"B%08x L%08x %08x .. %08x" +		"\n",  		c->index, tag,  		musb_readl(c->controller->tibase,  			DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index), @@ -387,10 +393,11 @@ cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)  	musb_ep_select(base, c->index + 1); -	DBG(level, "TX DMA%d%s: csr %04x, " -			"H%08x S%08x C%08x %08x, " -			"F%08x L%08x .. %08x" -			"\n", +	dev_dbg(c->controller->musb->controller, +		"TX DMA%d%s: csr %04x, " +		"H%08x S%08x C%08x %08x, " +		"F%08x L%08x .. %08x" +		"\n",  		c->index, tag,  		musb_readw(c->hw_ep->regs, MUSB_TXCSR), @@ -1022,6 +1029,7 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)  	int				i;  	dma_addr_t			safe2ack;  	void __iomem			*regs = rx->hw_ep->regs; +	struct musb			*musb = cppi->musb;  	cppi_dump_rx(6, rx, "/K"); diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index 668eeef601a..b3c065ab9db 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h @@ -172,7 +172,8 @@ enum musb_g_ep0_state {  #endif  /* TUSB mapping: "flat" plus ep0 special cases */ -#if	defined(CONFIG_USB_MUSB_TUSB6010) +#if defined(CONFIG_USB_MUSB_TUSB6010) || \ +	defined(CONFIG_USB_MUSB_TUSB6010_MODULE)  #define musb_ep_select(_mbase, _epnum) \  	musb_writeb((_mbase), MUSB_INDEX, (_epnum))  #define	MUSB_EP_OFFSET			MUSB_TUSB_OFFSET @@ -241,7 +242,8 @@ struct musb_hw_ep {  	void __iomem		*fifo;  	void __iomem		*regs; -#ifdef CONFIG_USB_MUSB_TUSB6010 +#if defined(CONFIG_USB_MUSB_TUSB6010) || \ +	defined(CONFIG_USB_MUSB_TUSB6010_MODULE)  	void __iomem		*conf;  #endif @@ -258,7 +260,8 @@ struct musb_hw_ep {  	struct dma_channel	*tx_channel;  	struct dma_channel	*rx_channel; -#ifdef CONFIG_USB_MUSB_TUSB6010 +#if defined(CONFIG_USB_MUSB_TUSB6010) || \ +	defined(CONFIG_USB_MUSB_TUSB6010_MODULE)  	/* TUSB has "asynchronous" and "synchronous" dma modes */  	dma_addr_t		fifo_async;  	dma_addr_t		fifo_sync; @@ -356,7 +359,8 @@ struct musb {  	void __iomem		*ctrl_base;  	void __iomem		*mregs; -#ifdef CONFIG_USB_MUSB_TUSB6010 +#if defined(CONFIG_USB_MUSB_TUSB6010) || \ +	defined(CONFIG_USB_MUSB_TUSB6010_MODULE)  	dma_addr_t		async;  	dma_addr_t		sync;  	void __iomem		*sync_va; diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 8c41a2e6ea7..fe8d14cac43 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -704,7 +704,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)  	 * most these gadgets, end of is signified either by a short packet,  	 * or filling the last byte of the buffer.  (Sending extra data in  	 * that last pckate should trigger an overflow fault.)  But in mode 1, -	 * we don't get DMA completion interrrupt for short packets. +	 * we don't get DMA completion interrupt for short packets.  	 *  	 * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),  	 * to get endpoint interrupt on every DMA req, but that didn't seem @@ -1856,6 +1856,7 @@ int __init musb_gadget_setup(struct musb *musb)  	return 0;  err: +	musb->g.dev.parent = NULL;  	device_unregister(&musb->g.dev);  	return status;  } @@ -1863,7 +1864,8 @@ err:  void musb_gadget_cleanup(struct musb *musb)  {  	usb_del_gadget_udc(&musb->g); -	device_unregister(&musb->g.dev); +	if (musb->g.dev.parent) +		device_unregister(&musb->g.dev);  }  /* diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h index 82410703dcd..03f2655af29 100644 --- a/drivers/usb/musb/musb_regs.h +++ b/drivers/usb/musb/musb_regs.h @@ -234,7 +234,8 @@  #define MUSB_TESTMODE		0x0F	/* 8 bit */  /* Get offset for a given FIFO from musb->mregs */ -#ifdef	CONFIG_USB_MUSB_TUSB6010 +#if defined(CONFIG_USB_MUSB_TUSB6010) ||	\ +	defined(CONFIG_USB_MUSB_TUSB6010_MODULE)  #define MUSB_FIFO_OFFSET(epnum)	(0x200 + ((epnum) * 0x20))  #else  #define MUSB_FIFO_OFFSET(epnum)	(0x20 + ((epnum) * 4)) @@ -295,7 +296,8 @@  #define MUSB_FLAT_OFFSET(_epnum, _offset)	\  	(0x100 + (0x10*(_epnum)) + (_offset)) -#ifdef CONFIG_USB_MUSB_TUSB6010 +#if defined(CONFIG_USB_MUSB_TUSB6010) ||	\ +	defined(CONFIG_USB_MUSB_TUSB6010_MODULE)  /* TUSB6010 EP0 configuration register is special */  #define MUSB_TUSB_OFFSET(_epnum, _offset)	\  	(0x10 + _offset) diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index 9eec41fbf3a..ec1480191f7 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c @@ -18,6 +18,7 @@  #include <linux/kernel.h>  #include <linux/errno.h>  #include <linux/init.h> +#include <linux/prefetch.h>  #include <linux/usb.h>  #include <linux/irq.h>  #include <linux/platform_device.h> diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c index 07c8a73dfe4..b67b4bc596c 100644 --- a/drivers/usb/musb/tusb6010_omap.c +++ b/drivers/usb/musb/tusb6010_omap.c @@ -20,6 +20,7 @@  #include <plat/mux.h>  #include "musb_core.h" +#include "tusb6010.h"  #define to_chdat(c)		((struct tusb_omap_dma_ch *)(c)->private_data) diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c index cecace41183..ef4333f4bbe 100644 --- a/drivers/usb/musb/ux500_dma.c +++ b/drivers/usb/musb/ux500_dma.c @@ -65,7 +65,8 @@ static void ux500_tx_work(struct work_struct *data)  	struct musb *musb = hw_ep->musb;  	unsigned long flags; -	DBG(4, "DMA tx transfer done on hw_ep=%d\n", hw_ep->epnum); +	dev_dbg(musb->controller, "DMA tx transfer done on hw_ep=%d\n", +		hw_ep->epnum);  	spin_lock_irqsave(&musb->lock, flags);  	ux500_channel->channel.actual_len = ux500_channel->cur_len; @@ -84,7 +85,8 @@ static void ux500_rx_work(struct work_struct *data)  	struct musb *musb = hw_ep->musb;  	unsigned long flags; -	DBG(4, "DMA rx transfer done on hw_ep=%d\n", hw_ep->epnum); +	dev_dbg(musb->controller, "DMA rx transfer done on hw_ep=%d\n", +		hw_ep->epnum);  	spin_lock_irqsave(&musb->lock, flags);  	ux500_channel->channel.actual_len = ux500_channel->cur_len; @@ -116,9 +118,11 @@ static bool ux500_configure_channel(struct dma_channel *channel,  	enum dma_slave_buswidth addr_width;  	dma_addr_t usb_fifo_addr = (MUSB_FIFO_OFFSET(hw_ep->epnum) +  					ux500_channel->controller->phy_base); +	struct musb *musb = ux500_channel->controller->private_data; -	DBG(4, "packet_sz=%d, mode=%d, dma_addr=0x%x, len=%d is_tx=%d\n", -			packet_sz, mode, dma_addr, len, ux500_channel->is_tx); +	dev_dbg(musb->controller, +		"packet_sz=%d, mode=%d, dma_addr=0x%x, len=%d is_tx=%d\n", +		packet_sz, mode, dma_addr, len, ux500_channel->is_tx);  	ux500_channel->cur_len = len; @@ -133,15 +137,13 @@ static bool ux500_configure_channel(struct dma_channel *channel,  					DMA_SLAVE_BUSWIDTH_4_BYTES;  	slave_conf.direction = direction; -	if (direction == DMA_FROM_DEVICE) { -		slave_conf.src_addr = usb_fifo_addr; -		slave_conf.src_addr_width = addr_width; -		slave_conf.src_maxburst = 16; -	} else { -		slave_conf.dst_addr = usb_fifo_addr; -		slave_conf.dst_addr_width = addr_width; -		slave_conf.dst_maxburst = 16; -	} +	slave_conf.src_addr = usb_fifo_addr; +	slave_conf.src_addr_width = addr_width; +	slave_conf.src_maxburst = 16; +	slave_conf.dst_addr = usb_fifo_addr; +	slave_conf.dst_addr_width = addr_width; +	slave_conf.dst_maxburst = 16; +  	dma_chan->device->device_control(dma_chan, DMA_SLAVE_CONFIG,  					     (unsigned long) &slave_conf); @@ -166,6 +168,7 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,  	struct ux500_dma_controller *controller = container_of(c,  			struct ux500_dma_controller, controller);  	struct ux500_dma_channel *ux500_channel = NULL; +	struct musb *musb = controller->private_data;  	u8 ch_num = hw_ep->epnum - 1;  	u32 max_ch; @@ -192,7 +195,7 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,  	ux500_channel->hw_ep = hw_ep;  	ux500_channel->is_allocated = 1; -	DBG(7, "hw_ep=%d, is_tx=0x%x, channel=%d\n", +	dev_dbg(musb->controller, "hw_ep=%d, is_tx=0x%x, channel=%d\n",  		hw_ep->epnum, is_tx, ch_num);  	return &(ux500_channel->channel); @@ -201,8 +204,9 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,  static void ux500_dma_channel_release(struct dma_channel *channel)  {  	struct ux500_dma_channel *ux500_channel = channel->private_data; +	struct musb *musb = ux500_channel->controller->private_data; -	DBG(7, "channel=%d\n", ux500_channel->ch_num); +	dev_dbg(musb->controller, "channel=%d\n", ux500_channel->ch_num);  	if (ux500_channel->is_allocated) {  		ux500_channel->is_allocated = 0; @@ -252,8 +256,8 @@ static int ux500_dma_channel_abort(struct dma_channel *channel)  	void __iomem *epio = musb->endpoints[ux500_channel->hw_ep->epnum].regs;  	u16 csr; -	DBG(4, "channel=%d, is_tx=%d\n", ux500_channel->ch_num, -						ux500_channel->is_tx); +	dev_dbg(musb->controller, "channel=%d, is_tx=%d\n", +		ux500_channel->ch_num, ux500_channel->is_tx);  	if (channel->status == MUSB_DMA_STATUS_BUSY) {  		if (ux500_channel->is_tx) { diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c index 86fbba6336c..e92cbefc0f8 100644 --- a/drivers/usb/serial/digi_acceleport.c +++ b/drivers/usb/serial/digi_acceleport.c @@ -227,7 +227,7 @@  *  - All sleeps use a timeout of DIGI_RETRY_TIMEOUT before looping to  *    recheck the condition they are sleeping on.  This is defensive,  *    in case a wake up is lost. -*  - Following Documentation/DocBook/kernel-locking.pdf no spin locks +*  - Following Documentation/DocBook/kernel-locking.tmpl no spin locks  *    are held when calling copy_to/from_user or printk.  */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 78a2cf9551c..5fc13e71791 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -101,6 +101,7 @@ static int   ftdi_jtag_probe(struct usb_serial *serial);  static int   ftdi_mtxorb_hack_setup(struct usb_serial *serial);  static int   ftdi_NDI_device_setup(struct usb_serial *serial);  static int   ftdi_stmclite_probe(struct usb_serial *serial); +static int   ftdi_8u2232c_probe(struct usb_serial *serial);  static void  ftdi_USB_UIRT_setup(struct ftdi_private *priv);  static void  ftdi_HE_TIRA1_setup(struct ftdi_private *priv); @@ -128,6 +129,10 @@ static struct ftdi_sio_quirk ftdi_stmclite_quirk = {  	.probe	= ftdi_stmclite_probe,  }; +static struct ftdi_sio_quirk ftdi_8u2232c_quirk = { +	.probe	= ftdi_8u2232c_probe, +}; +  /*   * The 8U232AM has the same API as the sio except for:   * - it can support MUCH higher baudrates; up to: @@ -178,7 +183,8 @@ static struct usb_device_id id_table_combined [] = {  	{ USB_DEVICE(FTDI_VID, FTDI_8U232AM_PID) },  	{ USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) },  	{ USB_DEVICE(FTDI_VID, FTDI_232RL_PID) }, -	{ USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) }, +	{ USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) , +		.driver_info = (kernel_ulong_t)&ftdi_8u2232c_quirk },  	{ USB_DEVICE(FTDI_VID, FTDI_4232H_PID) },  	{ USB_DEVICE(FTDI_VID, FTDI_232H_PID) },  	{ USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) }, @@ -1737,6 +1743,18 @@ static int ftdi_jtag_probe(struct usb_serial *serial)  	return 0;  } +static int ftdi_8u2232c_probe(struct usb_serial *serial) +{ +	struct usb_device *udev = serial->dev; + +	dbg("%s", __func__); + +	if (strcmp(udev->manufacturer, "CALAO Systems") == 0) +		return ftdi_jtag_probe(serial); + +	return 0; +} +  /*   * First and second port on STMCLiteadaptors is reserved for JTAG interface   * and the forth port for pio diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 40abedbc594..3524a105d04 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -2006,7 +2006,6 @@ static int mos7720_ioctl(struct tty_struct *tty,  		dbg("%s (%d) TIOCSERGETLSR", __func__,  port->number);  		return get_lsr_info(tty, mos7720_port,  					(unsigned int __user *)arg); -		return 0;  	/* FIXME: These should be using the mode methods */  	case TIOCMBIS: diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 7b50aa12275..c72abd52498 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -2263,7 +2263,6 @@ static int mos7840_ioctl(struct tty_struct *tty,  	case TIOCSERGETLSR:  		dbg("%s (%d) TIOCSERGETLSR", __func__, port->number);  		return mos7840_get_lsr_info(tty, argp); -		return 0;  	case TIOCGSERIAL:  		dbg("%s (%d) TIOCGSERIAL", __func__, port->number); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 81565619891..fe22e90bc87 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -148,6 +148,8 @@ static void option_instat_callback(struct urb *urb);  #define HUAWEI_PRODUCT_K4505			0x1464  #define HUAWEI_PRODUCT_K3765			0x1465  #define HUAWEI_PRODUCT_E14AC			0x14AC +#define HUAWEI_PRODUCT_K3806			0x14AE +#define HUAWEI_PRODUCT_K4605			0x14C6  #define HUAWEI_PRODUCT_K3770			0x14C9  #define HUAWEI_PRODUCT_K3771			0x14CA  #define HUAWEI_PRODUCT_K4510			0x14CB @@ -416,6 +418,56 @@ static void option_instat_callback(struct urb *urb);  #define SAMSUNG_VENDOR_ID                       0x04e8  #define SAMSUNG_PRODUCT_GT_B3730                0x6889 +/* YUGA products  www.yuga-info.com*/ +#define YUGA_VENDOR_ID				0x257A +#define YUGA_PRODUCT_CEM600			0x1601 +#define YUGA_PRODUCT_CEM610			0x1602 +#define YUGA_PRODUCT_CEM500			0x1603 +#define YUGA_PRODUCT_CEM510			0x1604 +#define YUGA_PRODUCT_CEM800			0x1605 +#define YUGA_PRODUCT_CEM900			0x1606 + +#define YUGA_PRODUCT_CEU818			0x1607 +#define YUGA_PRODUCT_CEU816			0x1608 +#define YUGA_PRODUCT_CEU828			0x1609 +#define YUGA_PRODUCT_CEU826			0x160A +#define YUGA_PRODUCT_CEU518			0x160B +#define YUGA_PRODUCT_CEU516			0x160C +#define YUGA_PRODUCT_CEU528			0x160D +#define YUGA_PRODUCT_CEU526			0x160F + +#define YUGA_PRODUCT_CWM600			0x2601 +#define YUGA_PRODUCT_CWM610			0x2602 +#define YUGA_PRODUCT_CWM500			0x2603 +#define YUGA_PRODUCT_CWM510			0x2604 +#define YUGA_PRODUCT_CWM800			0x2605 +#define YUGA_PRODUCT_CWM900			0x2606 + +#define YUGA_PRODUCT_CWU718			0x2607 +#define YUGA_PRODUCT_CWU716			0x2608 +#define YUGA_PRODUCT_CWU728			0x2609 +#define YUGA_PRODUCT_CWU726			0x260A +#define YUGA_PRODUCT_CWU518			0x260B +#define YUGA_PRODUCT_CWU516			0x260C +#define YUGA_PRODUCT_CWU528			0x260D +#define YUGA_PRODUCT_CWU526			0x260F + +#define YUGA_PRODUCT_CLM600			0x2601 +#define YUGA_PRODUCT_CLM610			0x2602 +#define YUGA_PRODUCT_CLM500			0x2603 +#define YUGA_PRODUCT_CLM510			0x2604 +#define YUGA_PRODUCT_CLM800			0x2605 +#define YUGA_PRODUCT_CLM900			0x2606 + +#define YUGA_PRODUCT_CLU718			0x2607 +#define YUGA_PRODUCT_CLU716			0x2608 +#define YUGA_PRODUCT_CLU728			0x2609 +#define YUGA_PRODUCT_CLU726			0x260A +#define YUGA_PRODUCT_CLU518			0x260B +#define YUGA_PRODUCT_CLU516			0x260C +#define YUGA_PRODUCT_CLU528			0x260D +#define YUGA_PRODUCT_CLU526			0x260F +  /* some devices interfaces need special handling due to a number of reasons */  enum option_blacklist_reason {  		OPTION_BLACKLIST_NONE = 0, @@ -551,6 +603,8 @@ static const struct usb_device_id option_ids[] = {  	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },  	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },  	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) }, +	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) }, +	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff) },  	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },  	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },  	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) }, @@ -1005,6 +1059,48 @@ static const struct usb_device_id option_ids[] = {  	{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */  	{ USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */  	{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM500) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM510) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM800) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM900) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU818) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU816) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU828) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU826) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU518) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU516) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU528) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU526) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM600) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM610) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM500) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM510) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM800) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM900) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU718) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU716) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU728) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU726) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU518) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU516) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU528) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU526) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM600) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM610) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM500) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM510) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM800) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM900) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU718) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU716) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU728) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU726) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU518) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) }, +	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },  	{ } /* Terminating entry */  };  MODULE_DEVICE_TABLE(usb, option_ids); @@ -1134,11 +1230,13 @@ static int option_probe(struct usb_serial *serial,  		serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff)  		return -ENODEV; -	/* Don't bind network interfaces on Huawei K3765 & K4505 */ +	/* Don't bind network interfaces on Huawei K3765, K4505 & K4605 */  	if (serial->dev->descriptor.idVendor == HUAWEI_VENDOR_ID &&  		(serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K3765 || -			serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505) && -		serial->interface->cur_altsetting->desc.bInterfaceNumber == 1) +			serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505 || +			serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4605) && +		(serial->interface->cur_altsetting->desc.bInterfaceNumber == 1 || +			serial->interface->cur_altsetting->desc.bInterfaceNumber == 2))  		return -ENODEV;  	/* Don't bind network interface on Samsung GT-B3730, it is handled by a separate module */ diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c index 34adc4b42ce..6fd13068481 100644 --- a/drivers/usb/storage/realtek_cr.c +++ b/drivers/usb/storage/realtek_cr.c @@ -25,7 +25,6 @@  #include <linux/kthread.h>  #include <linux/sched.h>  #include <linux/kernel.h> -#include <linux/version.h>  #include <scsi/scsi.h>  #include <scsi/scsi_cmnd.h> diff --git a/drivers/uwb/uwb-internal.h b/drivers/uwb/uwb-internal.h index 157485c862c..a7494bf1008 100644 --- a/drivers/uwb/uwb-internal.h +++ b/drivers/uwb/uwb-internal.h @@ -28,7 +28,6 @@  #ifndef __UWB_INTERNAL_H__  #define __UWB_INTERNAL_H__ -#include <linux/version.h>  #include <linux/kernel.h>  #include <linux/device.h>  #include <linux/uwb.h> diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c index 05a8832bb3e..d06886a2bfb 100644 --- a/drivers/video/backlight/adp8870_bl.c +++ b/drivers/video/backlight/adp8870_bl.c @@ -1009,4 +1009,4 @@ module_exit(adp8870_exit);  MODULE_LICENSE("GPL v2");  MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");  MODULE_DESCRIPTION("ADP8870 Backlight driver"); -MODULE_ALIAS("platform:adp8870-backlight"); +MODULE_ALIAS("i2c:adp8870-backlight"); diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index 80d292fb92d..7363c1b169e 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c @@ -19,7 +19,7 @@  #include <asm/backlight.h>  #endif -static const char const *backlight_types[] = { +static const char *const backlight_types[] = {  	[BACKLIGHT_RAW] = "raw",  	[BACKLIGHT_PLATFORM] = "platform",  	[BACKLIGHT_FIRMWARE] = "firmware", diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c index 9f1e389d51d..b0582917f0c 100644 --- a/drivers/video/backlight/ep93xx_bl.c +++ b/drivers/video/backlight/ep93xx_bl.c @@ -11,7 +11,7 @@   * BRIGHT, on the Cirrus EP9307, EP9312, and EP9315 processors.   */ - +#include <linux/module.h>  #include <linux/platform_device.h>  #include <linux/io.h>  #include <linux/fb.h> diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index b8f38ec6eb1..8b5b2a4124c 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -28,6 +28,8 @@ struct pwm_bl_data {  	unsigned int		lth_brightness;  	int			(*notify)(struct device *,  					  int brightness); +	void			(*notify_after)(struct device *, +					int brightness);  	int			(*check_fb)(struct device *, struct fb_info *);  }; @@ -55,6 +57,10 @@ static int pwm_backlight_update_status(struct backlight_device *bl)  		pwm_config(pb->pwm, brightness, pb->period);  		pwm_enable(pb->pwm);  	} + +	if (pb->notify_after) +		pb->notify_after(pb->dev, brightness); +  	return 0;  } @@ -105,6 +111,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)  	pb->period = data->pwm_period_ns;  	pb->notify = data->notify; +	pb->notify_after = data->notify_after;  	pb->check_fb = data->check_fb;  	pb->lth_brightness = data->lth_brightness *  		(data->pwm_period_ns / data->max_brightness); @@ -172,6 +179,8 @@ static int pwm_backlight_suspend(struct platform_device *pdev,  		pb->notify(pb->dev, 0);  	pwm_config(pb->pwm, 0, pb->period);  	pwm_disable(pb->pwm); +	if (pb->notify_after) +		pb->notify_after(pb->dev, 0);  	return 0;  } diff --git a/drivers/video/igafb.c b/drivers/video/igafb.c index d885c770eb8..2d97752f79a 100644 --- a/drivers/video/igafb.c +++ b/drivers/video/igafb.c @@ -428,7 +428,7 @@ static int __init igafb_init(void)  	 *  	 * IGS2000 has its I/O memory mapped and we want  	 * to generate memory cycles on PCI, e.g. do ioremap(), -	 * then readb/writeb() as in Documentation/IO-mapping.txt. +	 * then readb/writeb() as in Documentation/io-mapping.txt.  	 *  	 * IGS1682 is more traditional, it responds to PCI I/O  	 * cycles, so we want to access it with inb()/outb(). diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c index 02bf7bf7160..b5abaae38e9 100644 --- a/drivers/w1/masters/ds2490.c +++ b/drivers/w1/masters/ds2490.c @@ -1,7 +1,7 @@  /*   *	dscore.c   * - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>   *   *   * This program is free software; you can redistribute it and/or modify @@ -1024,5 +1024,5 @@ module_init(ds_init);  module_exit(ds_fini);  MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); +MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");  MODULE_DESCRIPTION("DS2490 USB <-> W1 bus master driver (DS9490*)"); diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c index 334d1ccf9c9..f667c26b219 100644 --- a/drivers/w1/masters/matrox_w1.c +++ b/drivers/w1/masters/matrox_w1.c @@ -1,7 +1,7 @@  /*   *	matrox_w1.c   * - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>   *   *   * This program is free software; you can redistribute it and/or modify @@ -39,7 +39,7 @@  #include "../w1_log.h"  MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); +MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");  MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire prtocol) over VGA DDC(matrox gpio).");  static struct pci_device_id matrox_w1_tbl[] = { diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c index c37781899d9..7c8cdb8aed2 100644 --- a/drivers/w1/slaves/w1_ds2408.c +++ b/drivers/w1/slaves/w1_ds2408.c @@ -373,7 +373,7 @@ static int w1_f29_add_slave(struct w1_slave *sl)  static void w1_f29_remove_slave(struct w1_slave *sl)  {  	int i; -	for (i = NB_SYSFS_BIN_FILES; i <= 0; --i) +	for (i = NB_SYSFS_BIN_FILES - 1; i >= 0; --i)  		sysfs_remove_bin_file(&sl->dev.kobj,  			&(w1_f29_sysfs_bin_files[i]));  } diff --git a/drivers/w1/slaves/w1_smem.c b/drivers/w1/slaves/w1_smem.c index cc8c02e9259..84655625c87 100644 --- a/drivers/w1/slaves/w1_smem.c +++ b/drivers/w1/slaves/w1_smem.c @@ -1,7 +1,7 @@  /*   *	w1_smem.c   * - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>   *   *   * This program is free software; you can redistribute it and/or modify @@ -32,7 +32,7 @@  #include "../w1_family.h"  MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); +MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");  MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, 64bit memory family.");  static struct w1_family w1_smem_family_01 = { diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c index 402928b135d..a1ef9b5b38c 100644 --- a/drivers/w1/slaves/w1_therm.c +++ b/drivers/w1/slaves/w1_therm.c @@ -1,7 +1,7 @@  /*   *	w1_therm.c   * - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>   *   *   * This program is free software; you can redistribute it and/or modify @@ -34,7 +34,7 @@  #include "../w1_family.h"  MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); +MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");  MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature family.");  /* Allow the strong pullup to be disabled, but default to enabled. diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c index 6c136c19e98..c3749782385 100644 --- a/drivers/w1/w1.c +++ b/drivers/w1/w1.c @@ -1,7 +1,7 @@  /*   *	w1.c   * - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>   *   *   * This program is free software; you can redistribute it and/or modify @@ -42,7 +42,7 @@  #include "w1_netlink.h"  MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); +MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");  MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol.");  static int w1_timeout = 10; diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h index 1ce23fc6186..4d012ca3f32 100644 --- a/drivers/w1/w1.h +++ b/drivers/w1/w1.h @@ -1,7 +1,7 @@  /*   *	w1.h   * - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>   *   *   * This program is free software; you can redistribute it and/or modify diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c index 4a099041f28..63359797c8b 100644 --- a/drivers/w1/w1_family.c +++ b/drivers/w1/w1_family.c @@ -1,7 +1,7 @@  /*   *	w1_family.c   * - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>   *   *   * This program is free software; you can redistribute it and/or modify diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h index 98a1ac0f469..490cda2281b 100644 --- a/drivers/w1/w1_family.h +++ b/drivers/w1/w1_family.h @@ -1,7 +1,7 @@  /*   *	w1_family.h   * - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>   *   *   * This program is free software; you can redistribute it and/or modify diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c index b50be3f1073..d220bce2cee 100644 --- a/drivers/w1/w1_int.c +++ b/drivers/w1/w1_int.c @@ -1,7 +1,7 @@  /*   *	w1_int.c   * - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>   *   *   * This program is free software; you can redistribute it and/or modify diff --git a/drivers/w1/w1_int.h b/drivers/w1/w1_int.h index 4274082d226..2ad7d4414be 100644 --- a/drivers/w1/w1_int.h +++ b/drivers/w1/w1_int.h @@ -1,7 +1,7 @@  /*   *	w1_int.h   * - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>   *   *   * This program is free software; you can redistribute it and/or modify diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c index 8e8b64cfafb..765b37b62a4 100644 --- a/drivers/w1/w1_io.c +++ b/drivers/w1/w1_io.c @@ -1,7 +1,7 @@  /*   *	w1_io.c   * - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>   *   *   * This program is free software; you can redistribute it and/or modify diff --git a/drivers/w1/w1_log.h b/drivers/w1/w1_log.h index e6ab7cf08f8..9c7bd62e6bd 100644 --- a/drivers/w1/w1_log.h +++ b/drivers/w1/w1_log.h @@ -1,7 +1,7 @@  /*   *	w1_log.h   * - * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> + * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>   *   *   * This program is free software; you can redistribute it and/or modify diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c index 55aabd927c6..40788c925d1 100644 --- a/drivers/w1/w1_netlink.c +++ b/drivers/w1/w1_netlink.c @@ -1,7 +1,7 @@  /*   * w1_netlink.c   * - * Copyright (c) 2003 Evgeniy Polyakov <johnpol@2ka.mipt.ru> + * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net>   *   *   * This program is free software; you can redistribute it and/or modify diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h index 27e950f935b..b0922dc2965 100644 --- a/drivers/w1/w1_netlink.h +++ b/drivers/w1/w1_netlink.h @@ -1,7 +1,7 @@  /*   * w1_netlink.h   * - * Copyright (c) 2003 Evgeniy Polyakov <johnpol@2ka.mipt.ru> + * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net>   *   *   * This program is free software; you can redistribute it and/or modify diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index 410fba45378..809cbda03d7 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c @@ -494,15 +494,16 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,  		asminline_call(&cmn_regs, cru_rom_addr);  	die_nmi_called = 1;  	spin_unlock_irqrestore(&rom_lock, rom_pl); + +	if (allow_kdump) +		hpwdt_stop(); +  	if (!is_icru) {  		if (cmn_regs.u1.ral == 0) { -			printk(KERN_WARNING "hpwdt: An NMI occurred, " +			panic("An NMI occurred, "  				"but unable to determine source.\n");  		}  	} - -	if (allow_kdump) -		hpwdt_stop();  	panic("An NMI occurred, please see the Integrated "  		"Management Log for details.\n"); diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c index 7d82adac1cb..102aed0efbf 100644 --- a/drivers/watchdog/lantiq_wdt.c +++ b/drivers/watchdog/lantiq_wdt.c @@ -51,16 +51,16 @@ static int ltq_wdt_ok_to_close;  static void  ltq_wdt_enable(void)  { -	ltq_wdt_timeout = ltq_wdt_timeout * +	unsigned long int timeout = ltq_wdt_timeout *  			(ltq_io_region_clk_rate / LTQ_WDT_DIVIDER) + 0x1000; -	if (ltq_wdt_timeout > LTQ_MAX_TIMEOUT) -		ltq_wdt_timeout = LTQ_MAX_TIMEOUT; +	if (timeout > LTQ_MAX_TIMEOUT) +		timeout = LTQ_MAX_TIMEOUT;  	/* write the first password magic */  	ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR);  	/* write the second magic plus the configuration and new timeout */  	ltq_w32(LTQ_WDT_SR_EN | LTQ_WDT_SR_PWD | LTQ_WDT_SR_CLKDIV | -		LTQ_WDT_PW2 | ltq_wdt_timeout, ltq_wdt_membase + LTQ_WDT_CR); +		LTQ_WDT_PW2 | timeout, ltq_wdt_membase + LTQ_WDT_CR);  }  static void diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c index 3066a5127ca..eaca366b723 100644 --- a/drivers/watchdog/sbc_epx_c3.c +++ b/drivers/watchdog/sbc_epx_c3.c @@ -173,7 +173,7 @@ static struct notifier_block epx_c3_notifier = {  	.notifier_call = epx_c3_notify_sys,  }; -static const char banner[] __initdata = KERN_INFO PFX +static const char banner[] __initconst = KERN_INFO PFX  	"Hardware Watchdog Timer for Winsystems EPX-C3 SBC: 0.1\n";  static int __init watchdog_init(void) diff --git a/drivers/watchdog/smsc37b787_wdt.c b/drivers/watchdog/smsc37b787_wdt.c index e97b0499bd0..97b8184614a 100644 --- a/drivers/watchdog/smsc37b787_wdt.c +++ b/drivers/watchdog/smsc37b787_wdt.c @@ -40,7 +40,7 @@   *  mknod /dev/watchdog c 10 130   *   * For an example userspace keep-alive daemon, see: - *   Documentation/watchdog/watchdog.txt + *   Documentation/watchdog/wdt.txt   */  #include <linux/module.h> diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index d33520d0b4c..1199da0f98c 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -59,7 +59,7 @@ static struct watchdog_device *wdd;  static int watchdog_ping(struct watchdog_device *wddev)  { -	if (test_bit(WDOG_ACTIVE, &wdd->status)) { +	if (test_bit(WDOG_ACTIVE, &wddev->status)) {  		if (wddev->ops->ping)  			return wddev->ops->ping(wddev);  /* ping the watchdog */  		else @@ -81,12 +81,12 @@ static int watchdog_start(struct watchdog_device *wddev)  {  	int err; -	if (!test_bit(WDOG_ACTIVE, &wdd->status)) { +	if (!test_bit(WDOG_ACTIVE, &wddev->status)) {  		err = wddev->ops->start(wddev);  		if (err < 0)  			return err; -		set_bit(WDOG_ACTIVE, &wdd->status); +		set_bit(WDOG_ACTIVE, &wddev->status);  	}  	return 0;  } @@ -105,18 +105,18 @@ static int watchdog_stop(struct watchdog_device *wddev)  {  	int err = -EBUSY; -	if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) { +	if (test_bit(WDOG_NO_WAY_OUT, &wddev->status)) {  		pr_info("%s: nowayout prevents watchdog to be stopped!\n", -							wdd->info->identity); +							wddev->info->identity);  		return err;  	} -	if (test_bit(WDOG_ACTIVE, &wdd->status)) { +	if (test_bit(WDOG_ACTIVE, &wddev->status)) {  		err = wddev->ops->stop(wddev);  		if (err < 0)  			return err; -		clear_bit(WDOG_ACTIVE, &wdd->status); +		clear_bit(WDOG_ACTIVE, &wddev->status);  	}  	return 0;  } diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 5f7ff8e2fc1..8795480c235 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -137,16 +137,6 @@ config XEN_GRANT_DEV_ALLOC  	  to other domains. This can be used to implement frontend drivers  	  or as part of an inter-domain shared memory channel. -config XEN_PLATFORM_PCI -	tristate "xen platform pci device driver" -	depends on XEN_PVHVM && PCI -	default m -	help -	  Driver for the Xen PCI Platform device: it is responsible for -	  initializing xenbus and grant_table when running in a Xen HVM -	  domain. As a consequence this driver is required to run any Xen PV -	  frontend on Xen HVM. -  config SWIOTLB_XEN  	def_bool y  	depends on PCI diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 72bbb27d7a6..974fffdf22b 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -14,7 +14,7 @@ obj-$(CONFIG_XEN_GNTDEV)		+= xen-gntdev.o  obj-$(CONFIG_XEN_GRANT_DEV_ALLOC)	+= xen-gntalloc.o  obj-$(CONFIG_XENFS)			+= xenfs/  obj-$(CONFIG_XEN_SYS_HYPERVISOR)	+= sys-hypervisor.o -obj-$(CONFIG_XEN_PLATFORM_PCI)		+= xen-platform-pci.o +obj-$(CONFIG_XEN_PVHVM)			+= platform-pci.o  obj-$(CONFIG_XEN_TMEM)			+= tmem.o  obj-$(CONFIG_SWIOTLB_XEN)		+= swiotlb-xen.o  obj-$(CONFIG_XEN_DOM0)			+= pci.o @@ -23,5 +23,3 @@ obj-$(CONFIG_XEN_PCIDEV_BACKEND)	+= xen-pciback/  xen-evtchn-y				:= evtchn.o  xen-gntdev-y				:= gntdev.o  xen-gntalloc-y				:= gntalloc.o - -xen-platform-pci-y			:= platform-pci.o diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 5dfd8f8ff07..5876e1ae6c2 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -501,20 +501,24 @@ EXPORT_SYMBOL_GPL(balloon_set_new_target);   * alloc_xenballooned_pages - get pages that have been ballooned out   * @nr_pages: Number of pages to get   * @pages: pages returned + * @highmem: highmem or lowmem pages   * @return 0 on success, error otherwise   */ -int alloc_xenballooned_pages(int nr_pages, struct page** pages) +int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)  {  	int pgno = 0;  	struct page* page;  	mutex_lock(&balloon_mutex);  	while (pgno < nr_pages) { -		page = balloon_retrieve(true); -		if (page) { +		page = balloon_retrieve(highmem); +		if (page && PageHighMem(page) == highmem) {  			pages[pgno++] = page;  		} else {  			enum bp_state st; -			st = decrease_reservation(nr_pages - pgno, GFP_HIGHUSER); +			if (page) +				balloon_append(page); +			st = decrease_reservation(nr_pages - pgno, +					highmem ? GFP_HIGHUSER : GFP_USER);  			if (st != BP_DONE)  				goto out_undo;  		} @@ -555,17 +559,40 @@ void free_xenballooned_pages(int nr_pages, struct page** pages)  }  EXPORT_SYMBOL(free_xenballooned_pages); -static int __init balloon_init(void) +static void __init balloon_add_region(unsigned long start_pfn, +				      unsigned long pages)  {  	unsigned long pfn, extra_pfn_end;  	struct page *page; +	/* +	 * If the amount of usable memory has been limited (e.g., with +	 * the 'mem' command line parameter), don't add pages beyond +	 * this limit. +	 */ +	extra_pfn_end = min(max_pfn, start_pfn + pages); + +	for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) { +		page = pfn_to_page(pfn); +		/* totalram_pages and totalhigh_pages do not +		   include the boot-time balloon extension, so +		   don't subtract from it. */ +		__balloon_append(page); +	} +} + +static int __init balloon_init(void) +{ +	int i; +  	if (!xen_domain())  		return -ENODEV;  	pr_info("xen/balloon: Initialising balloon driver.\n"); -	balloon_stats.current_pages = xen_pv_domain() ? min(xen_start_info->nr_pages, max_pfn) : max_pfn; +	balloon_stats.current_pages = xen_pv_domain() +		? min(xen_start_info->nr_pages - xen_released_pages, max_pfn) +		: max_pfn;  	balloon_stats.target_pages  = balloon_stats.current_pages;  	balloon_stats.balloon_low   = 0;  	balloon_stats.balloon_high  = 0; @@ -584,24 +611,13 @@ static int __init balloon_init(void)  #endif  	/* -	 * Initialise the balloon with excess memory space.  We need -	 * to make sure we don't add memory which doesn't exist or -	 * logically exist.  The E820 map can be trimmed to be smaller -	 * than the amount of physical memory due to the mem= command -	 * line parameter.  And if this is a 32-bit non-HIGHMEM kernel -	 * on a system with memory which requires highmem to access, -	 * don't try to use it. +	 * Initialize the balloon with pages from the extra memory +	 * regions (see arch/x86/xen/setup.c).  	 */ -	extra_pfn_end = min(min(max_pfn, e820_end_of_ram_pfn()), -			    (unsigned long)PFN_DOWN(xen_extra_mem_start + xen_extra_mem_size)); -	for (pfn = PFN_UP(xen_extra_mem_start); -	     pfn < extra_pfn_end; -	     pfn++) { -		page = pfn_to_page(pfn); -		/* totalram_pages and totalhigh_pages do not include the boot-time -		   balloon extension, so don't subtract from it. */ -		__balloon_append(page); -	} +	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) +		if (xen_extra_mem[i].size) +			balloon_add_region(PFN_UP(xen_extra_mem[i].start), +					   PFN_DOWN(xen_extra_mem[i].size));  	return 0;  } diff --git a/drivers/xen/events.c b/drivers/xen/events.c index da70f5c32eb..7a55b292bf3 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -54,7 +54,7 @@   * This lock protects updates to the following mapping and reference-count   * arrays. The lock does not need to be acquired to read the mapping tables.   */ -static DEFINE_SPINLOCK(irq_mapping_update_lock); +static DEFINE_MUTEX(irq_mapping_update_lock);  static LIST_HEAD(xen_irq_list_head); @@ -432,7 +432,8 @@ static int __must_check xen_allocate_irq_dynamic(void)  	irq = irq_alloc_desc_from(first, -1); -	xen_irq_init(irq); +	if (irq >= 0) +		xen_irq_init(irq);  	return irq;  } @@ -631,7 +632,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,  	int irq = -1;  	struct physdev_irq irq_op; -	spin_lock(&irq_mapping_update_lock); +	mutex_lock(&irq_mapping_update_lock);  	irq = find_irq_by_gsi(gsi);  	if (irq != -1) { @@ -684,7 +685,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,  				handle_edge_irq, name);  out: -	spin_unlock(&irq_mapping_update_lock); +	mutex_unlock(&irq_mapping_update_lock);  	return irq;  } @@ -710,10 +711,10 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,  {  	int irq, ret; -	spin_lock(&irq_mapping_update_lock); +	mutex_lock(&irq_mapping_update_lock);  	irq = xen_allocate_irq_dynamic(); -	if (irq == -1) +	if (irq < 0)  		goto out;  	irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq, @@ -724,12 +725,12 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,  	if (ret < 0)  		goto error_irq;  out: -	spin_unlock(&irq_mapping_update_lock); +	mutex_unlock(&irq_mapping_update_lock);  	return irq;  error_irq: -	spin_unlock(&irq_mapping_update_lock); +	mutex_unlock(&irq_mapping_update_lock);  	xen_free_irq(irq); -	return -1; +	return ret;  }  #endif @@ -740,7 +741,7 @@ int xen_destroy_irq(int irq)  	struct irq_info *info = info_for_irq(irq);  	int rc = -ENOENT; -	spin_lock(&irq_mapping_update_lock); +	mutex_lock(&irq_mapping_update_lock);  	desc = irq_to_desc(irq);  	if (!desc) @@ -766,7 +767,7 @@ int xen_destroy_irq(int irq)  	xen_free_irq(irq);  out: -	spin_unlock(&irq_mapping_update_lock); +	mutex_unlock(&irq_mapping_update_lock);  	return rc;  } @@ -776,10 +777,10 @@ int xen_irq_from_pirq(unsigned pirq)  	struct irq_info *info; -	spin_lock(&irq_mapping_update_lock); +	mutex_lock(&irq_mapping_update_lock);  	list_for_each_entry(info, &xen_irq_list_head, list) { -		if (info == NULL || info->type != IRQT_PIRQ) +		if (info->type != IRQT_PIRQ)  			continue;  		irq = info->irq;  		if (info->u.pirq.pirq == pirq) @@ -787,7 +788,7 @@ int xen_irq_from_pirq(unsigned pirq)  	}  	irq = -1;  out: -	spin_unlock(&irq_mapping_update_lock); +	mutex_unlock(&irq_mapping_update_lock);  	return irq;  } @@ -802,7 +803,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)  {  	int irq; -	spin_lock(&irq_mapping_update_lock); +	mutex_lock(&irq_mapping_update_lock);  	irq = evtchn_to_irq[evtchn]; @@ -818,7 +819,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)  	}  out: -	spin_unlock(&irq_mapping_update_lock); +	mutex_unlock(&irq_mapping_update_lock);  	return irq;  } @@ -829,7 +830,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)  	struct evtchn_bind_ipi bind_ipi;  	int evtchn, irq; -	spin_lock(&irq_mapping_update_lock); +	mutex_lock(&irq_mapping_update_lock);  	irq = per_cpu(ipi_to_irq, cpu)[ipi]; @@ -853,7 +854,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)  	}   out: -	spin_unlock(&irq_mapping_update_lock); +	mutex_unlock(&irq_mapping_update_lock);  	return irq;  } @@ -872,13 +873,34 @@ static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,  	return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);  } +static int find_virq(unsigned int virq, unsigned int cpu) +{ +	struct evtchn_status status; +	int port, rc = -ENOENT; + +	memset(&status, 0, sizeof(status)); +	for (port = 0; port <= NR_EVENT_CHANNELS; port++) { +		status.dom = DOMID_SELF; +		status.port = port; +		rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status); +		if (rc < 0) +			continue; +		if (status.status != EVTCHNSTAT_virq) +			continue; +		if (status.u.virq == virq && status.vcpu == cpu) { +			rc = port; +			break; +		} +	} +	return rc; +}  int bind_virq_to_irq(unsigned int virq, unsigned int cpu)  {  	struct evtchn_bind_virq bind_virq; -	int evtchn, irq; +	int evtchn, irq, ret; -	spin_lock(&irq_mapping_update_lock); +	mutex_lock(&irq_mapping_update_lock);  	irq = per_cpu(virq_to_irq, cpu)[virq]; @@ -892,10 +914,16 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)  		bind_virq.virq = virq;  		bind_virq.vcpu = cpu; -		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, -						&bind_virq) != 0) -			BUG(); -		evtchn = bind_virq.port; +		ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, +						&bind_virq); +		if (ret == 0) +			evtchn = bind_virq.port; +		else { +			if (ret == -EEXIST) +				ret = find_virq(virq, cpu); +			BUG_ON(ret < 0); +			evtchn = ret; +		}  		xen_irq_info_virq_init(cpu, irq, evtchn, virq); @@ -903,7 +931,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)  	}  out: -	spin_unlock(&irq_mapping_update_lock); +	mutex_unlock(&irq_mapping_update_lock);  	return irq;  } @@ -913,7 +941,7 @@ static void unbind_from_irq(unsigned int irq)  	struct evtchn_close close;  	int evtchn = evtchn_from_irq(irq); -	spin_lock(&irq_mapping_update_lock); +	mutex_lock(&irq_mapping_update_lock);  	if (VALID_EVTCHN(evtchn)) {  		close.port = evtchn; @@ -943,7 +971,7 @@ static void unbind_from_irq(unsigned int irq)  	xen_free_irq(irq); -	spin_unlock(&irq_mapping_update_lock); +	mutex_unlock(&irq_mapping_update_lock);  }  int bind_evtchn_to_irqhandler(unsigned int evtchn, @@ -1279,7 +1307,7 @@ void rebind_evtchn_irq(int evtchn, int irq)  	   will also be masked. */  	disable_irq(irq); -	spin_lock(&irq_mapping_update_lock); +	mutex_lock(&irq_mapping_update_lock);  	/* After resume the irq<->evtchn mappings are all cleared out */  	BUG_ON(evtchn_to_irq[evtchn] != -1); @@ -1289,7 +1317,7 @@ void rebind_evtchn_irq(int evtchn, int irq)  	xen_irq_info_evtchn_init(irq, evtchn); -	spin_unlock(&irq_mapping_update_lock); +	mutex_unlock(&irq_mapping_update_lock);  	/* new event channels are always bound to cpu 0 */  	irq_set_affinity(irq, cpumask_of(0)); @@ -1670,6 +1698,7 @@ void __init xen_init_IRQ(void)  	evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),  				    GFP_KERNEL); +	BUG_ON(!evtchn_to_irq);  	for (i = 0; i < NR_EVENT_CHANNELS; i++)  		evtchn_to_irq[i] = -1; diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index f914b26cf0c..880798aae2f 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -83,6 +83,7 @@ struct grant_map {  	struct ioctl_gntdev_grant_ref *grants;  	struct gnttab_map_grant_ref   *map_ops;  	struct gnttab_unmap_grant_ref *unmap_ops; +	struct gnttab_map_grant_ref   *kmap_ops;  	struct page **pages;  }; @@ -116,19 +117,22 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)  	add->grants    = kzalloc(sizeof(add->grants[0])    * count, GFP_KERNEL);  	add->map_ops   = kzalloc(sizeof(add->map_ops[0])   * count, GFP_KERNEL);  	add->unmap_ops = kzalloc(sizeof(add->unmap_ops[0]) * count, GFP_KERNEL); +	add->kmap_ops  = kzalloc(sizeof(add->kmap_ops[0])  * count, GFP_KERNEL);  	add->pages     = kzalloc(sizeof(add->pages[0])     * count, GFP_KERNEL);  	if (NULL == add->grants    ||  	    NULL == add->map_ops   ||  	    NULL == add->unmap_ops || +	    NULL == add->kmap_ops  ||  	    NULL == add->pages)  		goto err; -	if (alloc_xenballooned_pages(count, add->pages)) +	if (alloc_xenballooned_pages(count, add->pages, false /* lowmem */))  		goto err;  	for (i = 0; i < count; i++) {  		add->map_ops[i].handle = -1;  		add->unmap_ops[i].handle = -1; +		add->kmap_ops[i].handle = -1;  	}  	add->index = 0; @@ -142,6 +146,7 @@ err:  	kfree(add->grants);  	kfree(add->map_ops);  	kfree(add->unmap_ops); +	kfree(add->kmap_ops);  	kfree(add);  	return NULL;  } @@ -243,10 +248,35 @@ static int map_grant_pages(struct grant_map *map)  			gnttab_set_unmap_op(&map->unmap_ops[i], addr,  				map->flags, -1 /* handle */);  		} +	} else { +		/* +		 * Setup the map_ops corresponding to the pte entries pointing +		 * to the kernel linear addresses of the struct pages. +		 * These ptes are completely different from the user ptes dealt +		 * with find_grant_ptes. +		 */ +		for (i = 0; i < map->count; i++) { +			unsigned level; +			unsigned long address = (unsigned long) +				pfn_to_kaddr(page_to_pfn(map->pages[i])); +			pte_t *ptep; +			u64 pte_maddr = 0; +			BUG_ON(PageHighMem(map->pages[i])); + +			ptep = lookup_address(address, &level); +			pte_maddr = arbitrary_virt_to_machine(ptep).maddr; +			gnttab_set_map_op(&map->kmap_ops[i], pte_maddr, +				map->flags | +				GNTMAP_host_map | +				GNTMAP_contains_pte, +				map->grants[i].ref, +				map->grants[i].domid); +		}  	}  	pr_debug("map %d+%d\n", map->index, map->count); -	err = gnttab_map_refs(map->map_ops, map->pages, map->count); +	err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL, +			map->pages, map->count);  	if (err)  		return err; @@ -462,13 +492,11 @@ static int gntdev_release(struct inode *inode, struct file *flip)  	pr_debug("priv %p\n", priv); -	spin_lock(&priv->lock);  	while (!list_empty(&priv->maps)) {  		map = list_entry(priv->maps.next, struct grant_map, next);  		list_del(&map->next);  		gntdev_put_map(map);  	} -	spin_unlock(&priv->lock);  	if (use_ptemod)  		mmu_notifier_unregister(&priv->mn, priv->mm); @@ -532,10 +560,11 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,  	map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);  	if (map) {  		list_del(&map->next); -		gntdev_put_map(map);  		err = 0;  	}  	spin_unlock(&priv->lock); +	if (map) +		gntdev_put_map(map);  	return err;  } diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 4f44b347b24..8c71ab80175 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -448,7 +448,8 @@ unsigned int gnttab_max_grant_frames(void)  EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);  int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, -		    struct page **pages, unsigned int count) +			struct gnttab_map_grant_ref *kmap_ops, +			struct page **pages, unsigned int count)  {  	int i, ret;  	pte_t *pte; @@ -488,8 +489,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,  			 */  			return -EOPNOTSUPP;  		} -		ret = m2p_add_override(mfn, pages[i], -				       map_ops[i].flags & GNTMAP_contains_pte); +		ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);  		if (ret)  			return ret;  	} diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c index cef4bafc07d..66057075d6e 100644 --- a/drivers/xen/pci.c +++ b/drivers/xen/pci.c @@ -18,6 +18,7 @@   */  #include <linux/pci.h> +#include <linux/acpi.h>  #include <xen/xen.h>  #include <xen/interface/physdev.h>  #include <xen/interface/xen.h> @@ -26,26 +27,85 @@  #include <asm/xen/hypercall.h>  #include "../pci/pci.h" +static bool __read_mostly pci_seg_supported = true; +  static int xen_add_device(struct device *dev)  {  	int r;  	struct pci_dev *pci_dev = to_pci_dev(dev); +#ifdef CONFIG_PCI_IOV +	struct pci_dev *physfn = pci_dev->physfn; +#endif + +	if (pci_seg_supported) { +		struct physdev_pci_device_add add = { +			.seg = pci_domain_nr(pci_dev->bus), +			.bus = pci_dev->bus->number, +			.devfn = pci_dev->devfn +		}; +#ifdef CONFIG_ACPI +		acpi_handle handle; +#endif  #ifdef CONFIG_PCI_IOV -	if (pci_dev->is_virtfn) { +		if (pci_dev->is_virtfn) { +			add.flags = XEN_PCI_DEV_VIRTFN; +			add.physfn.bus = physfn->bus->number; +			add.physfn.devfn = physfn->devfn; +		} else +#endif +		if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) +			add.flags = XEN_PCI_DEV_EXTFN; + +#ifdef CONFIG_ACPI +		handle = DEVICE_ACPI_HANDLE(&pci_dev->dev); +		if (!handle) +			handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge); +#ifdef CONFIG_PCI_IOV +		if (!handle && pci_dev->is_virtfn) +			handle = DEVICE_ACPI_HANDLE(physfn->bus->bridge); +#endif +		if (handle) { +			acpi_status status; + +			do { +				unsigned long long pxm; + +				status = acpi_evaluate_integer(handle, "_PXM", +							       NULL, &pxm); +				if (ACPI_SUCCESS(status)) { +					add.optarr[0] = pxm; +					add.flags |= XEN_PCI_DEV_PXM; +					break; +				} +				status = acpi_get_parent(handle, &handle); +			} while (ACPI_SUCCESS(status)); +		} +#endif /* CONFIG_ACPI */ + +		r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, &add); +		if (r != -ENOSYS) +			return r; +		pci_seg_supported = false; +	} + +	if (pci_domain_nr(pci_dev->bus)) +		r = -ENOSYS; +#ifdef CONFIG_PCI_IOV +	else if (pci_dev->is_virtfn) {  		struct physdev_manage_pci_ext manage_pci_ext = {  			.bus		= pci_dev->bus->number,  			.devfn		= pci_dev->devfn,  			.is_virtfn 	= 1, -			.physfn.bus	= pci_dev->physfn->bus->number, -			.physfn.devfn	= pci_dev->physfn->devfn, +			.physfn.bus	= physfn->bus->number, +			.physfn.devfn	= physfn->devfn,  		};  		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,  			&manage_pci_ext); -	} else +	}  #endif -	if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) { +	else if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {  		struct physdev_manage_pci_ext manage_pci_ext = {  			.bus		= pci_dev->bus->number,  			.devfn		= pci_dev->devfn, @@ -71,13 +131,27 @@ static int xen_remove_device(struct device *dev)  {  	int r;  	struct pci_dev *pci_dev = to_pci_dev(dev); -	struct physdev_manage_pci manage_pci; -	manage_pci.bus = pci_dev->bus->number; -	manage_pci.devfn = pci_dev->devfn; +	if (pci_seg_supported) { +		struct physdev_pci_device device = { +			.seg = pci_domain_nr(pci_dev->bus), +			.bus = pci_dev->bus->number, +			.devfn = pci_dev->devfn +		}; + +		r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_remove, +					  &device); +	} else if (pci_domain_nr(pci_dev->bus)) +		r = -ENOSYS; +	else { +		struct physdev_manage_pci manage_pci = { +			.bus = pci_dev->bus->number, +			.devfn = pci_dev->devfn +		}; -	r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove, -		&manage_pci); +		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove, +					  &manage_pci); +	}  	return r;  } @@ -96,13 +170,16 @@ static int xen_pci_notifier(struct notifier_block *nb,  		r = xen_remove_device(dev);  		break;  	default: -		break; +		return NOTIFY_DONE;  	} - -	return r; +	if (r) +		dev_err(dev, "Failed to %s - passthrough or MSI/MSI-X might fail!\n", +			action == BUS_NOTIFY_ADD_DEVICE ? "add" : +			(action == BUS_NOTIFY_DEL_DEVICE ? "delete" : "?")); +	return NOTIFY_OK;  } -struct notifier_block device_nb = { +static struct notifier_block device_nb = {  	.notifier_call = xen_pci_notifier,  }; diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 6e8c15a2320..c984768d98c 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -38,6 +38,7 @@  #include <xen/swiotlb-xen.h>  #include <xen/page.h>  #include <xen/xen-ops.h> +#include <xen/hvc-console.h>  /*   * Used to do a quick range check in swiotlb_tbl_unmap_single and   * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this @@ -146,8 +147,10 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)  void __init xen_swiotlb_init(int verbose)  {  	unsigned long bytes; -	int rc; +	int rc = -ENOMEM;  	unsigned long nr_tbl; +	char *m = NULL; +	unsigned int repeat = 3;  	nr_tbl = swioltb_nr_tbl();  	if (nr_tbl) @@ -156,16 +159,17 @@ void __init xen_swiotlb_init(int verbose)  		xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);  		xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);  	} - +retry:  	bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;  	/*  	 * Get IO TLB memory from any location.  	 */  	xen_io_tlb_start = alloc_bootmem(bytes); -	if (!xen_io_tlb_start) -		panic("Cannot allocate SWIOTLB buffer"); - +	if (!xen_io_tlb_start) { +		m = "Cannot allocate Xen-SWIOTLB buffer!\n"; +		goto error; +	}  	xen_io_tlb_end = xen_io_tlb_start + bytes;  	/*  	 * And replace that memory with pages under 4GB. @@ -173,17 +177,28 @@ void __init xen_swiotlb_init(int verbose)  	rc = xen_swiotlb_fixup(xen_io_tlb_start,  			       bytes,  			       xen_io_tlb_nslabs); -	if (rc) +	if (rc) { +		free_bootmem(__pa(xen_io_tlb_start), bytes); +		m = "Failed to get contiguous memory for DMA from Xen!\n"\ +		    "You either: don't have the permissions, do not have"\ +		    " enough free memory under 4GB, or the hypervisor memory"\ +		    "is too fragmented!";  		goto error; - +	}  	start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);  	swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose);  	return;  error: -	panic("DMA(%d): Failed to exchange pages allocated for DMA with Xen! "\ -	      "We either don't have the permission or you do not have enough"\ -	      "free memory under 4GB!\n", rc); +	if (repeat--) { +		xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */ +					(xen_io_tlb_nslabs >> 1)); +		printk(KERN_INFO "Xen-SWIOTLB: Lowering to %luMB\n", +		      (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20); +		goto retry; +	} +	xen_raw_printk("%s (rc:%d)", m, rc); +	panic("%s (rc:%d)", m, rc);  }  void * @@ -194,6 +209,8 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,  	int order = get_order(size);  	u64 dma_mask = DMA_BIT_MASK(32);  	unsigned long vstart; +	phys_addr_t phys; +	dma_addr_t dev_addr;  	/*  	* Ignore region specifiers - the kernel's ideas of @@ -209,18 +226,26 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,  	vstart = __get_free_pages(flags, order);  	ret = (void *)vstart; +	if (!ret) +		return ret; +  	if (hwdev && hwdev->coherent_dma_mask) -		dma_mask = dma_alloc_coherent_mask(hwdev, flags); +		dma_mask = hwdev->coherent_dma_mask; -	if (ret) { +	phys = virt_to_phys(ret); +	dev_addr = xen_phys_to_bus(phys); +	if (((dev_addr + size - 1 <= dma_mask)) && +	    !range_straddles_page_boundary(phys, size)) +		*dma_handle = dev_addr; +	else {  		if (xen_create_contiguous_region(vstart, order,  						 fls64(dma_mask)) != 0) {  			free_pages(vstart, order);  			return NULL;  		} -		memset(ret, 0, size);  		*dma_handle = virt_to_machine(ret).maddr;  	} +	memset(ret, 0, size);  	return ret;  }  EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent); @@ -230,11 +255,21 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,  			  dma_addr_t dev_addr)  {  	int order = get_order(size); +	phys_addr_t phys; +	u64 dma_mask = DMA_BIT_MASK(32);  	if (dma_release_from_coherent(hwdev, order, vaddr))  		return; -	xen_destroy_contiguous_region((unsigned long)vaddr, order); +	if (hwdev && hwdev->coherent_dma_mask) +		dma_mask = hwdev->coherent_dma_mask; + +	phys = virt_to_phys(vaddr); + +	if (((dev_addr + size - 1 > dma_mask)) || +	    range_straddles_page_boundary(phys, size)) +		xen_destroy_contiguous_region((unsigned long)vaddr, order); +  	free_pages((unsigned long)vaddr, order);  }  EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent); @@ -278,9 +313,10 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,  	/*  	 * Ensure that the address returned is DMA'ble  	 */ -	if (!dma_capable(dev, dev_addr, size)) -		panic("map_single: bounce buffer is not DMA'ble"); - +	if (!dma_capable(dev, dev_addr, size)) { +		swiotlb_tbl_unmap_single(dev, map, size, dir); +		dev_addr = 0; +	}  	return dev_addr;  }  EXPORT_SYMBOL_GPL(xen_swiotlb_map_page); diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c index a8031445d94..444345afbd5 100644 --- a/drivers/xen/xen-pciback/conf_space.c +++ b/drivers/xen/xen-pciback/conf_space.c @@ -15,7 +15,6 @@  #include "conf_space.h"  #include "conf_space_quirks.h" -#define DRV_NAME	"xen-pciback"  static int permissive;  module_param(permissive, bool, 0644); diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c index da3cbdfcb5d..3daf862d739 100644 --- a/drivers/xen/xen-pciback/conf_space_header.c +++ b/drivers/xen/xen-pciback/conf_space_header.c @@ -15,7 +15,6 @@ struct pci_bar_info {  	int which;  }; -#define DRV_NAME	"xen-pciback"  #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))  #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER) @@ -25,7 +24,7 @@ static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)  	int ret;  	ret = xen_pcibk_read_config_word(dev, offset, value, data); -	if (!atomic_read(&dev->enable_cnt)) +	if (!pci_is_enabled(dev))  		return ret;  	for (i = 0; i < PCI_ROM_RESOURCE; i++) { @@ -187,7 +186,7 @@ static inline void read_dev_bar(struct pci_dev *dev,  	bar_info->val = res[pos].start |  			(res[pos].flags & PCI_REGION_FLAG_MASK); -	bar_info->len_val = res[pos].end - res[pos].start + 1; +	bar_info->len_val = resource_size(&res[pos]);  }  static void *bar_init(struct pci_dev *dev, int offset) diff --git a/drivers/xen/xen-pciback/conf_space_quirks.c b/drivers/xen/xen-pciback/conf_space_quirks.c index 921a889e65e..7476791cab4 100644 --- a/drivers/xen/xen-pciback/conf_space_quirks.c +++ b/drivers/xen/xen-pciback/conf_space_quirks.c @@ -12,7 +12,6 @@  #include "conf_space_quirks.h"  LIST_HEAD(xen_pcibk_quirks); -#define	DRV_NAME	"xen-pciback"  static inline const struct pci_device_id *  match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)  { @@ -36,7 +35,7 @@ static struct xen_pcibk_config_quirk *xen_pcibk_find_quirk(struct pci_dev *dev)  			goto out;  	tmp_quirk = NULL;  	printk(KERN_DEBUG DRV_NAME -	       ":quirk didn't match any device xen_pciback knows about\n"); +	       ": quirk didn't match any device known\n");  out:  	return tmp_quirk;  } diff --git a/drivers/xen/xen-pciback/passthrough.c b/drivers/xen/xen-pciback/passthrough.c index 1d32a9a42c0..828dddc360d 100644 --- a/drivers/xen/xen-pciback/passthrough.c +++ b/drivers/xen/xen-pciback/passthrough.c @@ -7,13 +7,13 @@  #include <linux/list.h>  #include <linux/pci.h> -#include <linux/spinlock.h> +#include <linux/mutex.h>  #include "pciback.h"  struct passthrough_dev_data {  	/* Access to dev_list must be protected by lock */  	struct list_head dev_list; -	spinlock_t lock; +	struct mutex lock;  };  static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev, @@ -24,9 +24,8 @@ static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,  	struct passthrough_dev_data *dev_data = pdev->pci_dev_data;  	struct pci_dev_entry *dev_entry;  	struct pci_dev *dev = NULL; -	unsigned long flags; -	spin_lock_irqsave(&dev_data->lock, flags); +	mutex_lock(&dev_data->lock);  	list_for_each_entry(dev_entry, &dev_data->dev_list, list) {  		if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus) @@ -37,7 +36,7 @@ static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,  		}  	} -	spin_unlock_irqrestore(&dev_data->lock, flags); +	mutex_unlock(&dev_data->lock);  	return dev;  } @@ -48,7 +47,6 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,  {  	struct passthrough_dev_data *dev_data = pdev->pci_dev_data;  	struct pci_dev_entry *dev_entry; -	unsigned long flags;  	unsigned int domain, bus, devfn;  	int err; @@ -57,9 +55,9 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,  		return -ENOMEM;  	dev_entry->dev = dev; -	spin_lock_irqsave(&dev_data->lock, flags); +	mutex_lock(&dev_data->lock);  	list_add_tail(&dev_entry->list, &dev_data->dev_list); -	spin_unlock_irqrestore(&dev_data->lock, flags); +	mutex_unlock(&dev_data->lock);  	/* Publish this device. */  	domain = (unsigned int)pci_domain_nr(dev->bus); @@ -76,9 +74,8 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,  	struct passthrough_dev_data *dev_data = pdev->pci_dev_data;  	struct pci_dev_entry *dev_entry, *t;  	struct pci_dev *found_dev = NULL; -	unsigned long flags; -	spin_lock_irqsave(&dev_data->lock, flags); +	mutex_lock(&dev_data->lock);  	list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {  		if (dev_entry->dev == dev) { @@ -88,7 +85,7 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,  		}  	} -	spin_unlock_irqrestore(&dev_data->lock, flags); +	mutex_unlock(&dev_data->lock);  	if (found_dev)  		pcistub_put_pci_dev(found_dev); @@ -102,7 +99,7 @@ static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)  	if (!dev_data)  		return -ENOMEM; -	spin_lock_init(&dev_data->lock); +	mutex_init(&dev_data->lock);  	INIT_LIST_HEAD(&dev_data->dev_list); @@ -116,14 +113,14 @@ static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,  {  	int err = 0;  	struct passthrough_dev_data *dev_data = pdev->pci_dev_data; -	struct pci_dev_entry *dev_entry, *e, *tmp; +	struct pci_dev_entry *dev_entry, *e;  	struct pci_dev *dev;  	int found;  	unsigned int domain, bus; -	spin_lock(&dev_data->lock); +	mutex_lock(&dev_data->lock); -	list_for_each_entry_safe(dev_entry, tmp, &dev_data->dev_list, list) { +	list_for_each_entry(dev_entry, &dev_data->dev_list, list) {  		/* Only publish this device as a root if none of its  		 * parent bridges are exported  		 */ @@ -142,16 +139,13 @@ static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,  		bus = (unsigned int)dev_entry->dev->bus->number;  		if (!found) { -			spin_unlock(&dev_data->lock);  			err = publish_root_cb(pdev, domain, bus);  			if (err)  				break; -			spin_lock(&dev_data->lock);  		}  	} -	if (!err) -		spin_unlock(&dev_data->lock); +	mutex_unlock(&dev_data->lock);  	return err;  } @@ -182,7 +176,7 @@ static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,  	return 1;  } -struct xen_pcibk_backend xen_pcibk_passthrough_backend = { +const struct xen_pcibk_backend xen_pcibk_passthrough_backend = {  	.name           = "passthrough",  	.init           = __xen_pcibk_init_devices,  	.free		= __xen_pcibk_release_devices, diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index aec214ac0a1..8f06e1ed028 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c @@ -21,8 +21,6 @@  #include "conf_space.h"  #include "conf_space_quirks.h" -#define DRV_NAME	"xen-pciback" -  static char *pci_devs_to_hide;  wait_queue_head_t xen_pcibk_aer_wait_queue;  /*Add sem for sync AER handling and xen_pcibk remove/reconfigue ops, @@ -222,6 +220,8 @@ void pcistub_put_pci_dev(struct pci_dev *dev)  	}  	spin_unlock_irqrestore(&pcistub_devices_lock, flags); +	if (WARN_ON(!found_psdev)) +		return;  	/*hold this lock for avoiding breaking link between  	* pcistub and xen_pcibk when AER is in processing @@ -514,12 +514,9 @@ static void kill_domain_by_device(struct pcistub_device *psdev)  	int err;  	char nodename[PCI_NODENAME_MAX]; -	if (!psdev) -		dev_err(&psdev->dev->dev, -			"device is NULL when do AER recovery/kill_domain\n"); +	BUG_ON(!psdev);  	snprintf(nodename, PCI_NODENAME_MAX, "/local/domain/0/backend/pci/%d/0",  		psdev->pdev->xdev->otherend_id); -	nodename[strlen(nodename)] = '\0';  again:  	err = xenbus_transaction_start(&xbt); @@ -605,7 +602,7 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,  	if (test_bit(_XEN_PCIF_active,  		(unsigned long *)&psdev->pdev->sh_info->flags)) {  		dev_dbg(&psdev->dev->dev, -			"schedule pci_conf service in xen_pcibk\n"); +			"schedule pci_conf service in " DRV_NAME "\n");  		xen_pcibk_test_and_schedule_op(psdev->pdev);  	} @@ -995,8 +992,7 @@ out:  		err = count;  	return err;  } - -DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add); +static DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add);  static ssize_t pcistub_slot_remove(struct device_driver *drv, const char *buf,  				   size_t count) @@ -1015,8 +1011,7 @@ out:  		err = count;  	return err;  } - -DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove); +static DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove);  static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)  { @@ -1039,8 +1034,7 @@ static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)  	return count;  } - -DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL); +static DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL);  static ssize_t pcistub_irq_handler_show(struct device_driver *drv, char *buf)  { @@ -1069,8 +1063,7 @@ static ssize_t pcistub_irq_handler_show(struct device_driver *drv, char *buf)  	spin_unlock_irqrestore(&pcistub_devices_lock, flags);  	return count;  } - -DRIVER_ATTR(irq_handlers, S_IRUSR, pcistub_irq_handler_show, NULL); +static DRIVER_ATTR(irq_handlers, S_IRUSR, pcistub_irq_handler_show, NULL);  static ssize_t pcistub_irq_handler_switch(struct device_driver *drv,  					  const char *buf, @@ -1106,7 +1099,8 @@ out:  		err = count;  	return err;  } -DRIVER_ATTR(irq_handler_state, S_IWUSR, NULL, pcistub_irq_handler_switch); +static DRIVER_ATTR(irq_handler_state, S_IWUSR, NULL, +		   pcistub_irq_handler_switch);  static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf,  				 size_t count) @@ -1170,8 +1164,8 @@ out:  	return count;  } - -DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show, pcistub_quirk_add); +static DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show, +		   pcistub_quirk_add);  static ssize_t permissive_add(struct device_driver *drv, const char *buf,  			      size_t count) @@ -1236,8 +1230,8 @@ static ssize_t permissive_show(struct device_driver *drv, char *buf)  	spin_unlock_irqrestore(&pcistub_devices_lock, flags);  	return count;  } - -DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show, permissive_add); +static DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show, +		   permissive_add);  static void pcistub_exit(void)  { @@ -1374,3 +1368,4 @@ module_init(xen_pcibk_init);  module_exit(xen_pcibk_cleanup);  MODULE_LICENSE("Dual BSD/GPL"); +MODULE_ALIAS("xen-backend:pci"); diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h index a0e131a8150..e9b4011c5f9 100644 --- a/drivers/xen/xen-pciback/pciback.h +++ b/drivers/xen/xen-pciback/pciback.h @@ -15,6 +15,8 @@  #include <linux/atomic.h>  #include <xen/interface/io/pciif.h> +#define DRV_NAME	"xen-pciback" +  struct pci_dev_entry {  	struct list_head list;  	struct pci_dev *dev; @@ -27,7 +29,7 @@ struct pci_dev_entry {  struct xen_pcibk_device {  	void *pci_dev_data; -	spinlock_t dev_lock; +	struct mutex dev_lock;  	struct xenbus_device *xdev;  	struct xenbus_watch be_watch;  	u8 be_watching; @@ -89,7 +91,7 @@ typedef int (*publish_pci_root_cb) (struct xen_pcibk_device *pdev,   *  passthrough - BDFs are exactly like in the host.   */  struct xen_pcibk_backend { -	char *name; +	const char *name;  	int (*init)(struct xen_pcibk_device *pdev);  	void (*free)(struct xen_pcibk_device *pdev);  	int (*find)(struct pci_dev *pcidev, struct xen_pcibk_device *pdev, @@ -104,9 +106,9 @@ struct xen_pcibk_backend {  			       unsigned int devfn);  }; -extern struct xen_pcibk_backend xen_pcibk_vpci_backend; -extern struct xen_pcibk_backend xen_pcibk_passthrough_backend; -extern struct xen_pcibk_backend *xen_pcibk_backend; +extern const struct xen_pcibk_backend xen_pcibk_vpci_backend; +extern const struct xen_pcibk_backend xen_pcibk_passthrough_backend; +extern const struct xen_pcibk_backend *xen_pcibk_backend;  static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,  					struct pci_dev *dev, @@ -116,13 +118,14 @@ static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,  	if (xen_pcibk_backend && xen_pcibk_backend->add)  		return xen_pcibk_backend->add(pdev, dev, devid, publish_cb);  	return -1; -}; +} +  static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,  					     struct pci_dev *dev)  {  	if (xen_pcibk_backend && xen_pcibk_backend->free)  		return xen_pcibk_backend->release(pdev, dev); -}; +}  static inline struct pci_dev *  xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev, unsigned int domain, @@ -131,7 +134,8 @@ xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev, unsigned int domain,  	if (xen_pcibk_backend && xen_pcibk_backend->get)  		return xen_pcibk_backend->get(pdev, domain, bus, devfn);  	return NULL; -}; +} +  /**  * Add for domain0 PCIE-AER handling. Get guest domain/bus/devfn in xen_pcibk  * before sending aer request to pcifront, so that guest could identify @@ -148,25 +152,29 @@ static inline int xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,  		return xen_pcibk_backend->find(pcidev, pdev, domain, bus,  					       devfn);  	return -1; -}; +} +  static inline int xen_pcibk_init_devices(struct xen_pcibk_device *pdev)  {  	if (xen_pcibk_backend && xen_pcibk_backend->init)  		return xen_pcibk_backend->init(pdev);  	return -1; -}; +} +  static inline int xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,  					      publish_pci_root_cb cb)  {  	if (xen_pcibk_backend && xen_pcibk_backend->publish)  		return xen_pcibk_backend->publish(pdev, cb);  	return -1; -}; +} +  static inline void xen_pcibk_release_devices(struct xen_pcibk_device *pdev)  {  	if (xen_pcibk_backend && xen_pcibk_backend->free)  		return xen_pcibk_backend->free(pdev); -}; +} +  /* Handles events from front-end */  irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id);  void xen_pcibk_do_op(struct work_struct *data); diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c index 8c95c3415b7..63616d7453e 100644 --- a/drivers/xen/xen-pciback/pciback_ops.c +++ b/drivers/xen/xen-pciback/pciback_ops.c @@ -10,7 +10,6 @@  #include <linux/sched.h>  #include "pciback.h" -#define DRV_NAME	"xen-pciback"  int verbose_request;  module_param(verbose_request, int, 0644); diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c index 4a42cfb0959..46d140baebd 100644 --- a/drivers/xen/xen-pciback/vpci.c +++ b/drivers/xen/xen-pciback/vpci.c @@ -8,16 +8,15 @@  #include <linux/list.h>  #include <linux/slab.h>  #include <linux/pci.h> -#include <linux/spinlock.h> +#include <linux/mutex.h>  #include "pciback.h"  #define PCI_SLOT_MAX 32 -#define DRV_NAME	"xen-pciback"  struct vpci_dev_data {  	/* Access to dev_list must be protected by lock */  	struct list_head dev_list[PCI_SLOT_MAX]; -	spinlock_t lock; +	struct mutex lock;  };  static inline struct list_head *list_first(struct list_head *head) @@ -33,13 +32,12 @@ static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,  	struct pci_dev_entry *entry;  	struct pci_dev *dev = NULL;  	struct vpci_dev_data *vpci_dev = pdev->pci_dev_data; -	unsigned long flags;  	if (domain != 0 || bus != 0)  		return NULL;  	if (PCI_SLOT(devfn) < PCI_SLOT_MAX) { -		spin_lock_irqsave(&vpci_dev->lock, flags); +		mutex_lock(&vpci_dev->lock);  		list_for_each_entry(entry,  				    &vpci_dev->dev_list[PCI_SLOT(devfn)], @@ -50,7 +48,7 @@ static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,  			}  		} -		spin_unlock_irqrestore(&vpci_dev->lock, flags); +		mutex_unlock(&vpci_dev->lock);  	}  	return dev;  } @@ -71,7 +69,6 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,  	int err = 0, slot, func = -1;  	struct pci_dev_entry *t, *dev_entry;  	struct vpci_dev_data *vpci_dev = pdev->pci_dev_data; -	unsigned long flags;  	if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {  		err = -EFAULT; @@ -90,7 +87,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,  	dev_entry->dev = dev; -	spin_lock_irqsave(&vpci_dev->lock, flags); +	mutex_lock(&vpci_dev->lock);  	/* Keep multi-function devices together on the virtual PCI bus */  	for (slot = 0; slot < PCI_SLOT_MAX; slot++) { @@ -129,7 +126,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,  			 "No more space on root virtual PCI bus");  unlock: -	spin_unlock_irqrestore(&vpci_dev->lock, flags); +	mutex_unlock(&vpci_dev->lock);  	/* Publish this device. */  	if (!err) @@ -145,14 +142,13 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,  	int slot;  	struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;  	struct pci_dev *found_dev = NULL; -	unsigned long flags; -	spin_lock_irqsave(&vpci_dev->lock, flags); +	mutex_lock(&vpci_dev->lock);  	for (slot = 0; slot < PCI_SLOT_MAX; slot++) { -		struct pci_dev_entry *e, *tmp; -		list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot], -					 list) { +		struct pci_dev_entry *e; + +		list_for_each_entry(e, &vpci_dev->dev_list[slot], list) {  			if (e->dev == dev) {  				list_del(&e->list);  				found_dev = e->dev; @@ -163,7 +159,7 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,  	}  out: -	spin_unlock_irqrestore(&vpci_dev->lock, flags); +	mutex_unlock(&vpci_dev->lock);  	if (found_dev)  		pcistub_put_pci_dev(found_dev); @@ -178,7 +174,7 @@ static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)  	if (!vpci_dev)  		return -ENOMEM; -	spin_lock_init(&vpci_dev->lock); +	mutex_init(&vpci_dev->lock);  	for (slot = 0; slot < PCI_SLOT_MAX; slot++)  		INIT_LIST_HEAD(&vpci_dev->dev_list[slot]); @@ -222,10 +218,9 @@ static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,  	struct pci_dev_entry *entry;  	struct pci_dev *dev = NULL;  	struct vpci_dev_data *vpci_dev = pdev->pci_dev_data; -	unsigned long flags;  	int found = 0, slot; -	spin_lock_irqsave(&vpci_dev->lock, flags); +	mutex_lock(&vpci_dev->lock);  	for (slot = 0; slot < PCI_SLOT_MAX; slot++) {  		list_for_each_entry(entry,  			    &vpci_dev->dev_list[slot], @@ -243,11 +238,11 @@ static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,  			}  		}  	} -	spin_unlock_irqrestore(&vpci_dev->lock, flags); +	mutex_unlock(&vpci_dev->lock);  	return found;  } -struct xen_pcibk_backend xen_pcibk_vpci_backend = { +const struct xen_pcibk_backend xen_pcibk_vpci_backend = {  	.name		= "vpci",  	.init		= __xen_pcibk_init_devices,  	.free		= __xen_pcibk_release_devices, diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c index 978d2c6f5dc..474d52ec337 100644 --- a/drivers/xen/xen-pciback/xenbus.c +++ b/drivers/xen/xen-pciback/xenbus.c @@ -13,7 +13,6 @@  #include <asm/xen/pci.h>  #include "pciback.h" -#define	DRV_NAME	"xen-pciback"  #define INVALID_EVTCHN_IRQ  (-1)  struct workqueue_struct *xen_pcibk_wq; @@ -44,7 +43,7 @@ static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev)  	pdev->xdev = xdev;  	dev_set_drvdata(&xdev->dev, pdev); -	spin_lock_init(&pdev->dev_lock); +	mutex_init(&pdev->dev_lock);  	pdev->sh_info = NULL;  	pdev->evtchn_irq = INVALID_EVTCHN_IRQ; @@ -62,14 +61,12 @@ out:  static void xen_pcibk_disconnect(struct xen_pcibk_device *pdev)  { -	spin_lock(&pdev->dev_lock); - +	mutex_lock(&pdev->dev_lock);  	/* Ensure the guest can't trigger our handler before removing devices */  	if (pdev->evtchn_irq != INVALID_EVTCHN_IRQ) {  		unbind_from_irqhandler(pdev->evtchn_irq, pdev);  		pdev->evtchn_irq = INVALID_EVTCHN_IRQ;  	} -	spin_unlock(&pdev->dev_lock);  	/* If the driver domain started an op, make sure we complete it  	 * before releasing the shared memory */ @@ -77,13 +74,11 @@ static void xen_pcibk_disconnect(struct xen_pcibk_device *pdev)  	/* Note, the workqueue does not use spinlocks at all.*/  	flush_workqueue(xen_pcibk_wq); -	spin_lock(&pdev->dev_lock);  	if (pdev->sh_info != NULL) {  		xenbus_unmap_ring_vfree(pdev->xdev, pdev->sh_info);  		pdev->sh_info = NULL;  	} -	spin_unlock(&pdev->dev_lock); - +	mutex_unlock(&pdev->dev_lock);  }  static void free_pdev(struct xen_pcibk_device *pdev) @@ -120,9 +115,7 @@ static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref,  		goto out;  	} -	spin_lock(&pdev->dev_lock);  	pdev->sh_info = vaddr; -	spin_unlock(&pdev->dev_lock);  	err = bind_interdomain_evtchn_to_irqhandler(  		pdev->xdev->otherend_id, remote_evtchn, xen_pcibk_handle_event, @@ -132,10 +125,7 @@ static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref,  				 "Error binding event channel to IRQ");  		goto out;  	} - -	spin_lock(&pdev->dev_lock);  	pdev->evtchn_irq = err; -	spin_unlock(&pdev->dev_lock);  	err = 0;  	dev_dbg(&pdev->xdev->dev, "Attached!\n"); @@ -150,6 +140,7 @@ static int xen_pcibk_attach(struct xen_pcibk_device *pdev)  	char *magic = NULL; +	mutex_lock(&pdev->dev_lock);  	/* Make sure we only do this setup once */  	if (xenbus_read_driver_state(pdev->xdev->nodename) !=  	    XenbusStateInitialised) @@ -176,7 +167,7 @@ static int xen_pcibk_attach(struct xen_pcibk_device *pdev)  	if (magic == NULL || strcmp(magic, XEN_PCI_MAGIC) != 0) {  		xenbus_dev_fatal(pdev->xdev, -EFAULT,  				 "version mismatch (%s/%s) with pcifront - " -				 "halting xen_pcibk", +				 "halting " DRV_NAME,  				 magic, XEN_PCI_MAGIC);  		goto out;  	} @@ -194,6 +185,7 @@ static int xen_pcibk_attach(struct xen_pcibk_device *pdev)  	dev_dbg(&pdev->xdev->dev, "Connected? %d\n", err);  out: +	mutex_unlock(&pdev->dev_lock);  	kfree(magic); @@ -369,6 +361,7 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)  	dev_dbg(&pdev->xdev->dev, "Reconfiguring device ...\n"); +	mutex_lock(&pdev->dev_lock);  	/* Make sure we only reconfigure once */  	if (xenbus_read_driver_state(pdev->xdev->nodename) !=  	    XenbusStateReconfiguring) @@ -506,6 +499,7 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)  	}  out: +	mutex_unlock(&pdev->dev_lock);  	return 0;  } @@ -562,6 +556,7 @@ static int xen_pcibk_setup_backend(struct xen_pcibk_device *pdev)  	char dev_str[64];  	char state_str[64]; +	mutex_lock(&pdev->dev_lock);  	/* It's possible we could get the call to setup twice, so make sure  	 * we're not already connected.  	 */ @@ -642,10 +637,10 @@ static int xen_pcibk_setup_backend(struct xen_pcibk_device *pdev)  				 "Error switching to initialised state!");  out: +	mutex_unlock(&pdev->dev_lock);  	if (!err)  		/* see if pcifront is already configured (if not, we'll wait) */  		xen_pcibk_attach(pdev); -  	return err;  } @@ -724,7 +719,7 @@ static struct xenbus_driver xenbus_xen_pcibk_driver = {  	.otherend_changed	= xen_pcibk_frontend_changed,  }; -struct xen_pcibk_backend *xen_pcibk_backend; +const struct xen_pcibk_backend *__read_mostly xen_pcibk_backend;  int __init xen_pcibk_xenbus_register(void)  { diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c index 6ea852e2516..d93c70857e0 100644 --- a/drivers/xen/xen-selfballoon.c +++ b/drivers/xen/xen-selfballoon.c @@ -68,6 +68,8 @@   */  #include <linux/kernel.h> +#include <linux/bootmem.h> +#include <linux/swap.h>  #include <linux/mm.h>  #include <linux/mman.h>  #include <linux/module.h> @@ -93,6 +95,15 @@ static unsigned int selfballoon_uphysteresis __read_mostly = 1;  /* In HZ, controls frequency of worker invocation. */  static unsigned int selfballoon_interval __read_mostly = 5; +/* + * Minimum usable RAM in MB for selfballooning target for balloon. + * If non-zero, it is added to totalreserve_pages and self-ballooning + * will not balloon below the sum.  If zero, a piecewise linear function + * is calculated as a minimum and added to totalreserve_pages.  Note that + * setting this value indiscriminately may cause OOMs and crashes. + */ +static unsigned int selfballoon_min_usable_mb; +  static void selfballoon_process(struct work_struct *work);  static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process); @@ -189,20 +200,23 @@ static int __init xen_selfballooning_setup(char *s)  __setup("selfballooning", xen_selfballooning_setup);  #endif /* CONFIG_FRONTSWAP */ +#define MB2PAGES(mb)	((mb) << (20 - PAGE_SHIFT)) +  /*   * Use current balloon size, the goal (vm_committed_as), and hysteresis   * parameters to set a new target balloon size   */  static void selfballoon_process(struct work_struct *work)  { -	unsigned long cur_pages, goal_pages, tgt_pages; +	unsigned long cur_pages, goal_pages, tgt_pages, floor_pages; +	unsigned long useful_pages;  	bool reset_timer = false;  	if (xen_selfballooning_enabled) { -		cur_pages = balloon_stats.current_pages; +		cur_pages = totalram_pages;  		tgt_pages = cur_pages; /* default is no change */  		goal_pages = percpu_counter_read_positive(&vm_committed_as) + -			balloon_stats.current_pages - totalram_pages; +				totalreserve_pages;  #ifdef CONFIG_FRONTSWAP  		/* allow space for frontswap pages to be repatriated */  		if (frontswap_selfshrinking && frontswap_enabled) @@ -217,7 +231,26 @@ static void selfballoon_process(struct work_struct *work)  				((goal_pages - cur_pages) /  				  selfballoon_uphysteresis);  		/* else if cur_pages == goal_pages, no change */ -		balloon_set_new_target(tgt_pages); +		useful_pages = max_pfn - totalreserve_pages; +		if (selfballoon_min_usable_mb != 0) +			floor_pages = totalreserve_pages + +					MB2PAGES(selfballoon_min_usable_mb); +		/* piecewise linear function ending in ~3% slope */ +		else if (useful_pages < MB2PAGES(16)) +			floor_pages = max_pfn; /* not worth ballooning */ +		else if (useful_pages < MB2PAGES(64)) +			floor_pages = totalreserve_pages + MB2PAGES(16) + +					((useful_pages - MB2PAGES(16)) >> 1); +		else if (useful_pages < MB2PAGES(512)) +			floor_pages = totalreserve_pages + MB2PAGES(40) + +					((useful_pages - MB2PAGES(40)) >> 3); +		else /* useful_pages >= MB2PAGES(512) */ +			floor_pages = totalreserve_pages + MB2PAGES(99) + +					((useful_pages - MB2PAGES(99)) >> 5); +		if (tgt_pages < floor_pages) +			tgt_pages = floor_pages; +		balloon_set_new_target(tgt_pages + +			balloon_stats.current_pages - totalram_pages);  		reset_timer = true;  	}  #ifdef CONFIG_FRONTSWAP @@ -340,6 +373,31 @@ static ssize_t store_selfballoon_uphys(struct sys_device *dev,  static SYSDEV_ATTR(selfballoon_uphysteresis, S_IRUGO | S_IWUSR,  		   show_selfballoon_uphys, store_selfballoon_uphys); +SELFBALLOON_SHOW(selfballoon_min_usable_mb, "%d\n", +				selfballoon_min_usable_mb); + +static ssize_t store_selfballoon_min_usable_mb(struct sys_device *dev, +					       struct sysdev_attribute *attr, +					       const char *buf, +					       size_t count) +{ +	unsigned long val; +	int err; + +	if (!capable(CAP_SYS_ADMIN)) +		return -EPERM; +	err = strict_strtoul(buf, 10, &val); +	if (err || val == 0) +		return -EINVAL; +	selfballoon_min_usable_mb = val; +	return count; +} + +static SYSDEV_ATTR(selfballoon_min_usable_mb, S_IRUGO | S_IWUSR, +		   show_selfballoon_min_usable_mb, +		   store_selfballoon_min_usable_mb); + +  #ifdef CONFIG_FRONTSWAP  SELFBALLOON_SHOW(frontswap_selfshrinking, "%d\n", frontswap_selfshrinking); @@ -421,6 +479,7 @@ static struct attribute *selfballoon_attrs[] = {  	&attr_selfballoon_interval.attr,  	&attr_selfballoon_downhysteresis.attr,  	&attr_selfballoon_uphysteresis.attr, +	&attr_selfballoon_min_usable_mb.attr,  #ifdef CONFIG_FRONTSWAP  	&attr_frontswap_selfshrinking.attr,  	&attr_frontswap_hysteresis.attr, diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c index 090c61ee8fd..2eff7a6aaa2 100644 --- a/drivers/xen/xenbus/xenbus_comms.c +++ b/drivers/xen/xenbus/xenbus_comms.c @@ -212,7 +212,9 @@ int xb_init_comms(void)  		printk(KERN_WARNING "XENBUS response ring is not quiescent "  		       "(%08x:%08x): fixing up\n",  		       intf->rsp_cons, intf->rsp_prod); -		intf->rsp_cons = intf->rsp_prod; +		/* breaks kdump */ +		if (!reset_devices) +			intf->rsp_cons = intf->rsp_prod;  	}  	if (xenbus_irq) { diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index bd2f90c9ac8..cef9b0bf63d 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -684,64 +684,74 @@ static int __init xenbus_probe_initcall(void)  device_initcall(xenbus_probe_initcall); -static int __init xenbus_init(void) +/* Set up event channel for xenstored which is run as a local process + * (this is normally used only in dom0) + */ +static int __init xenstored_local_init(void)  {  	int err = 0;  	unsigned long page = 0; +	struct evtchn_alloc_unbound alloc_unbound; -	DPRINTK(""); +	/* Allocate Xenstore page */ +	page = get_zeroed_page(GFP_KERNEL); +	if (!page) +		goto out_err; -	err = -ENODEV; -	if (!xen_domain()) -		return err; +	xen_store_mfn = xen_start_info->store_mfn = +		pfn_to_mfn(virt_to_phys((void *)page) >> +			   PAGE_SHIFT); -	/* -	 * Domain0 doesn't have a store_evtchn or store_mfn yet. -	 */ -	if (xen_initial_domain()) { -		struct evtchn_alloc_unbound alloc_unbound; +	/* Next allocate a local port which xenstored can bind to */ +	alloc_unbound.dom        = DOMID_SELF; +	alloc_unbound.remote_dom = DOMID_SELF; -		/* Allocate Xenstore page */ -		page = get_zeroed_page(GFP_KERNEL); -		if (!page) -			goto out_error; +	err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, +					  &alloc_unbound); +	if (err == -ENOSYS) +		goto out_err; -		xen_store_mfn = xen_start_info->store_mfn = -			pfn_to_mfn(virt_to_phys((void *)page) >> -				   PAGE_SHIFT); +	BUG_ON(err); +	xen_store_evtchn = xen_start_info->store_evtchn = +		alloc_unbound.port; -		/* Next allocate a local port which xenstored can bind to */ -		alloc_unbound.dom        = DOMID_SELF; -		alloc_unbound.remote_dom = 0; +	return 0; -		err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, -						  &alloc_unbound); -		if (err == -ENOSYS) -			goto out_error; + out_err: +	if (page != 0) +		free_page(page); +	return err; +} -		BUG_ON(err); -		xen_store_evtchn = xen_start_info->store_evtchn = -			alloc_unbound.port; +static int __init xenbus_init(void) +{ +	int err = 0; -		xen_store_interface = mfn_to_virt(xen_store_mfn); +	if (!xen_domain()) +		return -ENODEV; + +	if (xen_hvm_domain()) { +		uint64_t v = 0; +		err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); +		if (err) +			goto out_error; +		xen_store_evtchn = (int)v; +		err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); +		if (err) +			goto out_error; +		xen_store_mfn = (unsigned long)v; +		xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);  	} else { -		if (xen_hvm_domain()) { -			uint64_t v = 0; -			err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); -			if (err) -				goto out_error; -			xen_store_evtchn = (int)v; -			err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); +		xen_store_evtchn = xen_start_info->store_evtchn; +		xen_store_mfn = xen_start_info->store_mfn; +		if (xen_store_evtchn) +			xenstored_ready = 1; +		else { +			err = xenstored_local_init();  			if (err)  				goto out_error; -			xen_store_mfn = (unsigned long)v; -			xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); -		} else { -			xen_store_evtchn = xen_start_info->store_evtchn; -			xen_store_mfn = xen_start_info->store_mfn; -			xen_store_interface = mfn_to_virt(xen_store_mfn); -			xenstored_ready = 1;  		} +		xen_store_interface = mfn_to_virt(xen_store_mfn);  	}  	/* Initialize the interface to xenstore. */ @@ -760,12 +770,7 @@ static int __init xenbus_init(void)  	proc_mkdir("xen", NULL);  #endif -	return 0; - -  out_error: -	if (page != 0) -		free_page(page); - + out_error:  	return err;  } diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c index 60adf919d78..32417b5064f 100644 --- a/drivers/xen/xenbus/xenbus_probe_backend.c +++ b/drivers/xen/xenbus/xenbus_probe_backend.c @@ -104,8 +104,6 @@ static int xenbus_uevent_backend(struct device *dev,  	xdev = to_xenbus_device(dev);  	bus = container_of(xdev->dev.bus, struct xen_bus_type, bus); -	if (xdev == NULL) -		return -ENODEV;  	if (add_uevent_var(env, "MODALIAS=xen-backend:%s", xdev->devicetype))  		return -ENOMEM; diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c index ed2ba474a56..540587e18a9 100644 --- a/drivers/xen/xenbus/xenbus_probe_frontend.c +++ b/drivers/xen/xenbus/xenbus_probe_frontend.c @@ -248,10 +248,131 @@ int __xenbus_register_frontend(struct xenbus_driver *drv,  }  EXPORT_SYMBOL_GPL(__xenbus_register_frontend); +static DECLARE_WAIT_QUEUE_HEAD(backend_state_wq); +static int backend_state; + +static void xenbus_reset_backend_state_changed(struct xenbus_watch *w, +					const char **v, unsigned int l) +{ +	xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i", &backend_state); +	printk(KERN_DEBUG "XENBUS: backend %s %s\n", +			v[XS_WATCH_PATH], xenbus_strstate(backend_state)); +	wake_up(&backend_state_wq); +} + +static void xenbus_reset_wait_for_backend(char *be, int expected) +{ +	long timeout; +	timeout = wait_event_interruptible_timeout(backend_state_wq, +			backend_state == expected, 5 * HZ); +	if (timeout <= 0) +		printk(KERN_INFO "XENBUS: backend %s timed out.\n", be); +} + +/* + * Reset frontend if it is in Connected or Closed state. + * Wait for backend to catch up. + * State Connected happens during kdump, Closed after kexec. + */ +static void xenbus_reset_frontend(char *fe, char *be, int be_state) +{ +	struct xenbus_watch be_watch; + +	printk(KERN_DEBUG "XENBUS: backend %s %s\n", +			be, xenbus_strstate(be_state)); + +	memset(&be_watch, 0, sizeof(be_watch)); +	be_watch.node = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/state", be); +	if (!be_watch.node) +		return; + +	be_watch.callback = xenbus_reset_backend_state_changed; +	backend_state = XenbusStateUnknown; + +	printk(KERN_INFO "XENBUS: triggering reconnect on %s\n", be); +	register_xenbus_watch(&be_watch); + +	/* fall through to forward backend to state XenbusStateInitialising */ +	switch (be_state) { +	case XenbusStateConnected: +		xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosing); +		xenbus_reset_wait_for_backend(be, XenbusStateClosing); + +	case XenbusStateClosing: +		xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosed); +		xenbus_reset_wait_for_backend(be, XenbusStateClosed); + +	case XenbusStateClosed: +		xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateInitialising); +		xenbus_reset_wait_for_backend(be, XenbusStateInitWait); +	} + +	unregister_xenbus_watch(&be_watch); +	printk(KERN_INFO "XENBUS: reconnect done on %s\n", be); +	kfree(be_watch.node); +} + +static void xenbus_check_frontend(char *class, char *dev) +{ +	int be_state, fe_state, err; +	char *backend, *frontend; + +	frontend = kasprintf(GFP_NOIO | __GFP_HIGH, "device/%s/%s", class, dev); +	if (!frontend) +		return; + +	err = xenbus_scanf(XBT_NIL, frontend, "state", "%i", &fe_state); +	if (err != 1) +		goto out; + +	switch (fe_state) { +	case XenbusStateConnected: +	case XenbusStateClosed: +		printk(KERN_DEBUG "XENBUS: frontend %s %s\n", +				frontend, xenbus_strstate(fe_state)); +		backend = xenbus_read(XBT_NIL, frontend, "backend", NULL); +		if (!backend || IS_ERR(backend)) +			goto out; +		err = xenbus_scanf(XBT_NIL, backend, "state", "%i", &be_state); +		if (err == 1) +			xenbus_reset_frontend(frontend, backend, be_state); +		kfree(backend); +		break; +	default: +		break; +	} +out: +	kfree(frontend); +} + +static void xenbus_reset_state(void) +{ +	char **devclass, **dev; +	int devclass_n, dev_n; +	int i, j; + +	devclass = xenbus_directory(XBT_NIL, "device", "", &devclass_n); +	if (IS_ERR(devclass)) +		return; + +	for (i = 0; i < devclass_n; i++) { +		dev = xenbus_directory(XBT_NIL, "device", devclass[i], &dev_n); +		if (IS_ERR(dev)) +			continue; +		for (j = 0; j < dev_n; j++) +			xenbus_check_frontend(devclass[i], dev[j]); +		kfree(dev); +	} +	kfree(devclass); +} +  static int frontend_probe_and_watch(struct notifier_block *notifier,  				   unsigned long event,  				   void *data)  { +	/* reset devices in Connected or Closed state */ +	if (xen_hvm_domain()) +		xenbus_reset_state();  	/* Enumerate devices in xenstore and watch for changes. */  	xenbus_probe_devices(&xenbus_frontend);  	register_xenbus_watch(&fe_watch); diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index 5534690075a..b3b8f2f3ad1 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c @@ -45,6 +45,7 @@  #include <linux/module.h>  #include <linux/mutex.h>  #include <xen/xenbus.h> +#include <xen/xen.h>  #include "xenbus_comms.h"  struct xs_stored_msg { @@ -620,6 +621,15 @@ static struct xenbus_watch *find_watch(const char *token)  	return NULL;  } +static void xs_reset_watches(void) +{ +	int err; + +	err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL)); +	if (err && err != -EEXIST) +		printk(KERN_WARNING "xs_reset_watches failed: %d\n", err); +} +  /* Register callback to watch this node. */  int register_xenbus_watch(struct xenbus_watch *watch)  { @@ -638,8 +648,7 @@ int register_xenbus_watch(struct xenbus_watch *watch)  	err = xs_watch(watch->node, token); -	/* Ignore errors due to multiple registration. */ -	if ((err != 0) && (err != -EEXIST)) { +	if (err) {  		spin_lock(&watches_lock);  		list_del(&watch->list);  		spin_unlock(&watches_lock); @@ -897,5 +906,9 @@ int xs_init(void)  	if (IS_ERR(task))  		return PTR_ERR(task); +	/* shutdown watches for kexec boot */ +	if (xen_hvm_domain()) +		xs_reset_watches(); +  	return 0;  } diff --git a/drivers/zorro/zorro-driver.c b/drivers/zorro/zorro-driver.c index 7ee2b6e7178..229624f867d 100644 --- a/drivers/zorro/zorro-driver.c +++ b/drivers/zorro/zorro-driver.c @@ -37,6 +37,7 @@ zorro_match_device(const struct zorro_device_id *ids,  	}  	return NULL;  } +EXPORT_SYMBOL(zorro_match_device);  static int zorro_device_probe(struct device *dev) @@ -91,6 +92,7 @@ int zorro_register_driver(struct zorro_driver *drv)  	/* register with core */  	return driver_register(&drv->driver);  } +EXPORT_SYMBOL(zorro_register_driver);      /** @@ -107,6 +109,7 @@ void zorro_unregister_driver(struct zorro_driver *drv)  {  	driver_unregister(&drv->driver);  } +EXPORT_SYMBOL(zorro_unregister_driver);      /** @@ -168,6 +171,7 @@ struct bus_type zorro_bus_type = {  	.probe	= zorro_device_probe,  	.remove	= zorro_device_remove,  }; +EXPORT_SYMBOL(zorro_bus_type);  static int __init zorro_driver_init(void) @@ -177,7 +181,3 @@ static int __init zorro_driver_init(void)  postcore_initcall(zorro_driver_init); -EXPORT_SYMBOL(zorro_match_device); -EXPORT_SYMBOL(zorro_register_driver); -EXPORT_SYMBOL(zorro_unregister_driver); -EXPORT_SYMBOL(zorro_bus_type); diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c index e0c2807b097..181fa8158a8 100644 --- a/drivers/zorro/zorro.c +++ b/drivers/zorro/zorro.c @@ -148,10 +148,10 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)  	}  	platform_set_drvdata(pdev, bus); -	/* Register all devices */  	pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n",  		 zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s"); +	/* First identify all devices ... */  	for (i = 0; i < zorro_num_autocon; i++) {  		z = &zorro_autocon[i];  		z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8); @@ -172,6 +172,11 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)  		dev_set_name(&z->dev, "%02x", i);  		z->dev.parent = &bus->dev;  		z->dev.bus = &zorro_bus_type; +	} + +	/* ... then register them */ +	for (i = 0; i < zorro_num_autocon; i++) { +		z = &zorro_autocon[i];  		error = device_register(&z->dev);  		if (error) {  			dev_err(&bus->dev, "Error registering device %s\n",  |