diff options
Diffstat (limited to 'drivers/net/ethernet/nvidia/forcedeth.c')
| -rw-r--r-- | drivers/net/ethernet/nvidia/forcedeth.c | 88 | 
1 files changed, 31 insertions, 57 deletions
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 1e37eb98c4e..1dca57013cb 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -1682,6 +1682,7 @@ static void nv_get_hw_stats(struct net_device *dev)  		np->estats.tx_pause += readl(base + NvRegTxPause);  		np->estats.rx_pause += readl(base + NvRegRxPause);  		np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); +		np->estats.rx_errors_total += np->estats.rx_drop_frame;  	}  	if (np->driver_data & DEV_HAS_STATISTICS_V3) { @@ -1706,11 +1707,14 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)  		nv_get_hw_stats(dev);  		/* copy to net_device stats */ +		dev->stats.tx_packets = np->estats.tx_packets; +		dev->stats.rx_bytes = np->estats.rx_bytes;  		dev->stats.tx_bytes = np->estats.tx_bytes;  		dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;  		dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;  		dev->stats.rx_crc_errors = np->estats.rx_crc_errors;  		dev->stats.rx_over_errors = np->estats.rx_over_errors; +		dev->stats.rx_fifo_errors = np->estats.rx_drop_frame;  		dev->stats.rx_errors = np->estats.rx_errors_total;  		dev->stats.tx_errors = np->estats.tx_errors_total;  	} @@ -2099,10 +2103,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)  	/* add fragments to entries count */  	for (i = 0; i < fragments; i++) { -		u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]); +		u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]); -		entries += (size >> NV_TX2_TSO_MAX_SHIFT) + -			   ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); +		entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) + +			   ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);  	}  	spin_lock_irqsave(&np->lock, flags); @@ -2141,13 +2145,13 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)  	/* setup the fragments */  	for (i = 0; i < fragments; i++) {  		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; -		u32 size = skb_frag_size(frag); +		u32 frag_size = skb_frag_size(frag);  		offset = 0;  		do {  			prev_tx = put_tx;  			prev_tx_ctx = np->put_tx_ctx; -			bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; +			bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;  			np->put_tx_ctx->dma = skb_frag_dma_map(  							&np->pci_dev->dev,  							frag, offset, @@ -2159,12 +2163,12 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)  			put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);  			offset += bcnt; -			size -= bcnt; +			frag_size -= bcnt;  			if (unlikely(put_tx++ == np->last_tx.orig))  				put_tx = np->first_tx.orig;  			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))  				np->put_tx_ctx = np->first_tx_ctx; -		} while (size); +		} while (frag_size);  	}  	/* set last fragment flag  */ @@ -2213,10 +2217,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,  	/* add fragments to entries count */  	for (i = 0; i < fragments; i++) { -		u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]); +		u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]); -		entries += (size >> NV_TX2_TSO_MAX_SHIFT) + -			   ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); +		entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) + +			   ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);  	}  	spin_lock_irqsave(&np->lock, flags); @@ -2257,13 +2261,13 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,  	/* setup the fragments */  	for (i = 0; i < fragments; i++) {  		skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; -		u32 size = skb_frag_size(frag); +		u32 frag_size = skb_frag_size(frag);  		offset = 0;  		do {  			prev_tx = put_tx;  			prev_tx_ctx = np->put_tx_ctx; -			bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; +			bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;  			np->put_tx_ctx->dma = skb_frag_dma_map(  							&np->pci_dev->dev,  							frag, offset, @@ -2276,12 +2280,12 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,  			put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);  			offset += bcnt; -			size -= bcnt; +			frag_size -= bcnt;  			if (unlikely(put_tx++ == np->last_tx.ex))  				put_tx = np->first_tx.ex;  			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))  				np->put_tx_ctx = np->first_tx_ctx; -		} while (size); +		} while (frag_size);  	}  	/* set last fragment flag  */ @@ -2374,16 +2378,8 @@ static int nv_tx_done(struct net_device *dev, int limit)  		if (np->desc_ver == DESC_VER_1) {  			if (flags & NV_TX_LASTPACKET) {  				if (flags & NV_TX_ERROR) { -					if (flags & NV_TX_UNDERFLOW) -						dev->stats.tx_fifo_errors++; -					if (flags & NV_TX_CARRIERLOST) -						dev->stats.tx_carrier_errors++;  					if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))  						nv_legacybackoff_reseed(dev); -					dev->stats.tx_errors++; -				} else { -					dev->stats.tx_packets++; -					dev->stats.tx_bytes += np->get_tx_ctx->skb->len;  				}  				dev_kfree_skb_any(np->get_tx_ctx->skb);  				np->get_tx_ctx->skb = NULL; @@ -2392,16 +2388,8 @@ static int nv_tx_done(struct net_device *dev, int limit)  		} else {  			if (flags & NV_TX2_LASTPACKET) {  				if (flags & NV_TX2_ERROR) { -					if (flags & NV_TX2_UNDERFLOW) -						dev->stats.tx_fifo_errors++; -					if (flags & NV_TX2_CARRIERLOST) -						dev->stats.tx_carrier_errors++;  					if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))  						nv_legacybackoff_reseed(dev); -					dev->stats.tx_errors++; -				} else { -					dev->stats.tx_packets++; -					dev->stats.tx_bytes += np->get_tx_ctx->skb->len;  				}  				dev_kfree_skb_any(np->get_tx_ctx->skb);  				np->get_tx_ctx->skb = NULL; @@ -2434,9 +2422,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)  		nv_unmap_txskb(np, np->get_tx_ctx);  		if (flags & NV_TX2_LASTPACKET) { -			if (!(flags & NV_TX2_ERROR)) -				dev->stats.tx_packets++; -			else { +			if (flags & NV_TX2_ERROR) {  				if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {  					if (np->driver_data & DEV_HAS_GEAR_MODE)  						nv_gear_backoff_reseed(dev); @@ -2636,7 +2622,6 @@ static int nv_rx_process(struct net_device *dev, int limit)  					if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {  						len = nv_getlen(dev, skb->data, len);  						if (len < 0) { -							dev->stats.rx_errors++;  							dev_kfree_skb(skb);  							goto next_pkt;  						} @@ -2650,11 +2635,6 @@ static int nv_rx_process(struct net_device *dev, int limit)  					else {  						if (flags & NV_RX_MISSEDFRAME)  							dev->stats.rx_missed_errors++; -						if (flags & NV_RX_CRCERR) -							dev->stats.rx_crc_errors++; -						if (flags & NV_RX_OVERFLOW) -							dev->stats.rx_over_errors++; -						dev->stats.rx_errors++;  						dev_kfree_skb(skb);  						goto next_pkt;  					} @@ -2670,7 +2650,6 @@ static int nv_rx_process(struct net_device *dev, int limit)  					if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {  						len = nv_getlen(dev, skb->data, len);  						if (len < 0) { -							dev->stats.rx_errors++;  							dev_kfree_skb(skb);  							goto next_pkt;  						} @@ -2682,11 +2661,6 @@ static int nv_rx_process(struct net_device *dev, int limit)  					}  					/* the rest are hard errors */  					else { -						if (flags & NV_RX2_CRCERR) -							dev->stats.rx_crc_errors++; -						if (flags & NV_RX2_OVERFLOW) -							dev->stats.rx_over_errors++; -						dev->stats.rx_errors++;  						dev_kfree_skb(skb);  						goto next_pkt;  					} @@ -2704,7 +2678,6 @@ static int nv_rx_process(struct net_device *dev, int limit)  		skb->protocol = eth_type_trans(skb, dev);  		napi_gro_receive(&np->napi, skb);  		dev->stats.rx_packets++; -		dev->stats.rx_bytes += len;  next_pkt:  		if (unlikely(np->get_rx.orig++ == np->last_rx.orig))  			np->get_rx.orig = np->first_rx.orig; @@ -2787,9 +2760,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)  				__vlan_hwaccel_put_tag(skb, vid);  			}  			napi_gro_receive(&np->napi, skb); -  			dev->stats.rx_packets++; -			dev->stats.rx_bytes += len;  		} else {  			dev_kfree_skb(skb);  		} @@ -2962,11 +2933,11 @@ static void nv_set_multicast(struct net_device *dev)  				struct netdev_hw_addr *ha;  				netdev_for_each_mc_addr(ha, dev) { -					unsigned char *addr = ha->addr; +					unsigned char *hw_addr = ha->addr;  					u32 a, b; -					a = le32_to_cpu(*(__le32 *) addr); -					b = le16_to_cpu(*(__le16 *) (&addr[4])); +					a = le32_to_cpu(*(__le32 *) hw_addr); +					b = le16_to_cpu(*(__le16 *) (&hw_addr[4]));  					alwaysOn[0] &= a;  					alwaysOff[0] &= ~a;  					alwaysOn[1] &= b; @@ -3398,7 +3369,8 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)  	for (i = 0;; i++) {  		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; -		writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); +		writel(events, base + NvRegMSIXIrqStatus); +		netdev_dbg(dev, "tx irq events: %08x\n", events);  		if (!(events & np->irqmask))  			break; @@ -3509,7 +3481,8 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)  	for (i = 0;; i++) {  		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; -		writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); +		writel(events, base + NvRegMSIXIrqStatus); +		netdev_dbg(dev, "rx irq events: %08x\n", events);  		if (!(events & np->irqmask))  			break; @@ -3553,7 +3526,8 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)  	for (i = 0;; i++) {  		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; -		writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); +		writel(events, base + NvRegMSIXIrqStatus); +		netdev_dbg(dev, "irq events: %08x\n", events);  		if (!(events & np->irqmask))  			break; @@ -3617,10 +3591,10 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data)  	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {  		events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; -		writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); +		writel(events & NVREG_IRQ_TIMER, base + NvRegIrqStatus);  	} else {  		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; -		writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); +		writel(events & NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);  	}  	pci_push(base);  	if (!(events & NVREG_IRQ_TIMER)) @@ -4566,7 +4540,7 @@ static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *e  	struct fe_priv *np = netdev_priv(dev);  	/* update stats */ -	nv_do_stats_poll((unsigned long)dev); +	nv_get_hw_stats(dev);  	memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));  }  |