diff options
| author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2013-05-01 08:47:44 -0700 | 
|---|---|---|
| committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2013-05-01 08:47:44 -0700 | 
| commit | bf61c8840efe60fd8f91446860b63338fb424158 (patch) | |
| tree | 7a71832407a4f0d6346db773343f4c3ae2257b19 /drivers/net/ethernet/nvidia | |
| parent | 5846115b30f3a881e542c8bfde59a699c1c13740 (diff) | |
| parent | 0c6a61657da78098472fd0eb71cc01f2387fa1bb (diff) | |
| download | olio-linux-3.10-bf61c8840efe60fd8f91446860b63338fb424158.tar.xz olio-linux-3.10-bf61c8840efe60fd8f91446860b63338fb424158.zip  | |
Merge branch 'next' into for-linus
Prepare first set of updates for 3.10 merge window.
Diffstat (limited to 'drivers/net/ethernet/nvidia')
| -rw-r--r-- | drivers/net/ethernet/nvidia/forcedeth.c | 45 | 
1 files changed, 39 insertions, 6 deletions
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 876beceaf2d..0b8de12bcbc 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -1821,6 +1821,11 @@ static int nv_alloc_rx(struct net_device *dev)  							     skb->data,  							     skb_tailroom(skb),  							     PCI_DMA_FROMDEVICE); +			if (pci_dma_mapping_error(np->pci_dev, +						  np->put_rx_ctx->dma)) { +				kfree_skb(skb); +				goto packet_dropped; +			}  			np->put_rx_ctx->dma_len = skb_tailroom(skb);  			np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);  			wmb(); @@ -1830,6 +1835,7 @@ static int nv_alloc_rx(struct net_device *dev)  			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))  				np->put_rx_ctx = np->first_rx_ctx;  		} else { +packet_dropped:  			u64_stats_update_begin(&np->swstats_rx_syncp);  			np->stat_rx_dropped++;  			u64_stats_update_end(&np->swstats_rx_syncp); @@ -1856,6 +1862,11 @@ static int nv_alloc_rx_optimized(struct net_device *dev)  							     skb->data,  							     skb_tailroom(skb),  							     PCI_DMA_FROMDEVICE); +			if (pci_dma_mapping_error(np->pci_dev, +						  np->put_rx_ctx->dma)) { +				kfree_skb(skb); +				goto packet_dropped; +			}  			np->put_rx_ctx->dma_len = skb_tailroom(skb);  			np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));  			np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma)); @@ -1866,6 +1877,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev)  			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))  				np->put_rx_ctx = np->first_rx_ctx;  		} else { +packet_dropped:  			u64_stats_update_begin(&np->swstats_rx_syncp);  			np->stat_rx_dropped++;  			u64_stats_update_end(&np->swstats_rx_syncp); @@ -2217,6 +2229,15 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)  		bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;  		np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,  						PCI_DMA_TODEVICE); +		if (pci_dma_mapping_error(np->pci_dev, +					  np->put_tx_ctx->dma)) { +			/* on DMA mapping error - drop the packet */ +			kfree_skb(skb); +			u64_stats_update_begin(&np->swstats_tx_syncp); +			np->stat_tx_dropped++; +			u64_stats_update_end(&np->swstats_tx_syncp); +			return NETDEV_TX_OK; +		}  		np->put_tx_ctx->dma_len = bcnt;  		np->put_tx_ctx->dma_single = 1;  		put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); @@ -2337,6 +2358,15 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,  		bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;  		np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,  						PCI_DMA_TODEVICE); +		if (pci_dma_mapping_error(np->pci_dev, +					  np->put_tx_ctx->dma)) { +			/* on DMA mapping error - drop the packet */ +			kfree_skb(skb); +			u64_stats_update_begin(&np->swstats_tx_syncp); +			np->stat_tx_dropped++; +			u64_stats_update_end(&np->swstats_tx_syncp); +			return NETDEV_TX_OK; +		}  		np->put_tx_ctx->dma_len = bcnt;  		np->put_tx_ctx->dma_single = 1;  		put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); @@ -3025,7 +3055,6 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)  	/* synchronized against open : rtnl_lock() held by caller */  	memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); -	dev->addr_assign_type &= ~NET_ADDR_RANDOM;  	if (netif_running(dev)) {  		netif_tx_lock_bh(dev); @@ -5003,6 +5032,11 @@ static int nv_loopback_test(struct net_device *dev)  	test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,  				       skb_tailroom(tx_skb),  				       PCI_DMA_FROMDEVICE); +	if (pci_dma_mapping_error(np->pci_dev, +				  test_dma_addr)) { +		dev_kfree_skb_any(tx_skb); +		goto out; +	}  	pkt_data = skb_put(tx_skb, pkt_len);  	for (i = 0; i < pkt_len; i++)  		pkt_data[i] = (u8)(i & 0xff); @@ -5520,7 +5554,7 @@ static const struct net_device_ops nv_netdev_ops_optimized = {  #endif  }; -static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) +static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)  {  	struct net_device *dev;  	struct fe_priv *np; @@ -5731,9 +5765,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i  			"%s: set workaround bit for reversed mac addr\n",  			__func__);  	} -	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); -	if (!is_valid_ether_addr(dev->perm_addr)) { +	if (!is_valid_ether_addr(dev->dev_addr)) {  		/*  		 * Bad mac address. At least one bios sets the mac address  		 * to 01:23:45:67:89:ab @@ -5995,7 +6028,7 @@ static void nv_restore_mac_addr(struct pci_dev *pci_dev)  	       base + NvRegTransmitPoll);  } -static void __devexit nv_remove(struct pci_dev *pci_dev) +static void nv_remove(struct pci_dev *pci_dev)  {  	struct net_device *dev = pci_get_drvdata(pci_dev); @@ -6271,7 +6304,7 @@ static struct pci_driver driver = {  	.name		= DRV_NAME,  	.id_table	= pci_tbl,  	.probe		= nv_probe, -	.remove		= __devexit_p(nv_remove), +	.remove		= nv_remove,  	.shutdown	= nv_shutdown,  	.driver.pm	= NV_PM_OPS,  };  |