diff options
Diffstat (limited to 'drivers/net/ethernet')
| -rw-r--r-- | drivers/net/ethernet/broadcom/bgmac.c | 4 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 2 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 6 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | 64 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h | 3 | ||||
| -rw-r--r-- | drivers/net/ethernet/freescale/fec.c | 85 | ||||
| -rw-r--r-- | drivers/net/ethernet/freescale/fec.h | 18 | ||||
| -rw-r--r-- | drivers/net/ethernet/realtek/r8169.c | 27 | ||||
| -rw-r--r-- | drivers/net/ethernet/sfc/efx.c | 16 | ||||
| -rw-r--r-- | drivers/net/ethernet/sfc/net_driver.h | 4 | ||||
| -rw-r--r-- | drivers/net/ethernet/sfc/rx.c | 25 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/cpsw.c | 2 | 
12 files changed, 173 insertions, 83 deletions
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 639049d7e92..da5f4397f87 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -301,12 +301,16 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,  			bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",  				  ring->start);  		} else { +			/* Omit CRC. */ +			len -= ETH_FCS_LEN; +  			new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);  			if (new_skb) {  				skb_put(new_skb, len);  				skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,  								 new_skb->data,  								 len); +				skb_checksum_none_assert(skb);  				new_skb->protocol =  					eth_type_trans(new_skb, bgmac->net_dev);  				netif_receive_skb(new_skb); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ecac04a3687..a923bc4d5a1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3142,7 +3142,7 @@ static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)  		tsum = ~csum_fold(csum_add((__force __wsum) csum,  				  csum_partial(t_header, -fix, 0))); -	return bswab16(csum); +	return bswab16(tsum);  }  static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 9a674b14b40..edfa67adf2f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -281,6 +281,8 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)  			cmd->lp_advertising |= ADVERTISED_2500baseX_Full;  		if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE)  			cmd->lp_advertising |= ADVERTISED_10000baseT_Full; +		if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE) +			cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full;  	}  	cmd->maxtxpkt = 0; @@ -463,6 +465,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)  						ADVERTISED_10000baseKR_Full))  				bp->link_params.speed_cap_mask[cfg_idx] |=  					PORT_HW_CFG_SPEED_CAPABILITY_D0_10G; + +			if (cmd->advertising & ADVERTISED_20000baseKR2_Full) +				bp->link_params.speed_cap_mask[cfg_idx] |= +					PORT_HW_CFG_SPEED_CAPABILITY_D0_20G;  		}  	} else { /* forced speed */  		/* advertise the requested speed and duplex if supported */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 1663e0b6b5a..31c5787970d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -10422,6 +10422,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,  					 MDIO_PMA_DEVAD,  					 MDIO_PMA_REG_8481_LED1_MASK,  					 0x0); +			if (phy->type == +			    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) { +				/* Disable MI_INT interrupt before setting LED4 +				 * source to constant off. +				 */ +				if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + +					   params->port*4) & +				    NIG_MASK_MI_INT) { +					params->link_flags |= +					LINK_FLAGS_INT_DISABLED; + +					bnx2x_bits_dis( +						bp, +						NIG_REG_MASK_INTERRUPT_PORT0 + +						params->port*4, +						NIG_MASK_MI_INT); +				} +				bnx2x_cl45_write(bp, phy, +						 MDIO_PMA_DEVAD, +						 MDIO_PMA_REG_8481_SIGNAL_MASK, +						 0x0); +			}  		}  		break;  	case LED_MODE_ON: @@ -10468,6 +10490,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,  					 MDIO_PMA_DEVAD,  					 MDIO_PMA_REG_8481_LED1_MASK,  					 0x20); +			if (phy->type == +			    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) { +				/* Disable MI_INT interrupt before setting LED4 +				 * source to constant on. +				 */ +				if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + +					   params->port*4) & +				    NIG_MASK_MI_INT) { +					params->link_flags |= +					LINK_FLAGS_INT_DISABLED; + +					bnx2x_bits_dis( +						bp, +						NIG_REG_MASK_INTERRUPT_PORT0 + +						params->port*4, +						NIG_MASK_MI_INT); +				} +				bnx2x_cl45_write(bp, phy, +						 MDIO_PMA_DEVAD, +						 MDIO_PMA_REG_8481_SIGNAL_MASK, +						 0x20); +			}  		}  		break; @@ -10532,6 +10576,22 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,  					 MDIO_PMA_DEVAD,  					 MDIO_PMA_REG_8481_LINK_SIGNAL,  					 val); +			if (phy->type == +			    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) { +				/* Restore LED4 source to external link, +				 * and re-enable interrupts. +				 */ +				bnx2x_cl45_write(bp, phy, +						 MDIO_PMA_DEVAD, +						 MDIO_PMA_REG_8481_SIGNAL_MASK, +						 0x40); +				if (params->link_flags & +				    LINK_FLAGS_INT_DISABLED) { +					bnx2x_link_int_enable(params); +					params->link_flags &= +						~LINK_FLAGS_INT_DISABLED; +				} +			}  		}  		break;  	} @@ -11791,6 +11851,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,  			phy->media_type = ETH_PHY_KR;  			phy->flags |= FLAGS_WC_DUAL_MODE;  			phy->supported &= (SUPPORTED_20000baseKR2_Full | +					   SUPPORTED_10000baseT_Full | +					   SUPPORTED_1000baseT_Full |  					   SUPPORTED_Autoneg |  					   SUPPORTED_FIBRE |  					   SUPPORTED_Pause | @@ -13437,7 +13499,7 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)  		struct bnx2x_phy *phy = ¶ms->phy[INT_PHY];  		bnx2x_set_aer_mmd(params, phy);  		if ((phy->supported & SUPPORTED_20000baseKR2_Full) && -		    (phy->speed_cap_mask & SPEED_20000)) +		    (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))  			bnx2x_check_kr2_wa(params, vars, phy);  		bnx2x_check_over_curr(params, vars);  		if (vars->rx_tx_asic_rst) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index d25c7d79787..be5c195d03d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -307,7 +307,8 @@ struct link_params {  	struct bnx2x *bp;  	u16 req_fc_auto_adv; /* Should be set to TX / BOTH when  				req_flow_ctrl is set to AUTO */ -	u16 rsrv1; +	u16 link_flags; +#define LINK_FLAGS_INT_DISABLED		(1<<0)  	u32 lfa_base;  }; diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c index fccc3bf2141..069a155d16e 100644 --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec.c @@ -246,14 +246,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)  	struct bufdesc *bdp;  	void *bufaddr;  	unsigned short	status; -	unsigned long flags; +	unsigned int index;  	if (!fep->link) {  		/* Link is down or autonegotiation is in progress. */  		return NETDEV_TX_BUSY;  	} -	spin_lock_irqsave(&fep->hw_lock, flags);  	/* Fill in a Tx ring entry */  	bdp = fep->cur_tx; @@ -264,7 +263,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)  		 * This should not happen, since ndev->tbusy should be set.  		 */  		printk("%s: tx queue full!.\n", ndev->name); -		spin_unlock_irqrestore(&fep->hw_lock, flags);  		return NETDEV_TX_BUSY;  	} @@ -280,13 +278,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)  	 * 4-byte boundaries. Use bounce buffers to copy data  	 * and get it aligned. Ugh.  	 */ +	if (fep->bufdesc_ex) +		index = (struct bufdesc_ex *)bdp - +			(struct bufdesc_ex *)fep->tx_bd_base; +	else +		index = bdp - fep->tx_bd_base; +  	if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { -		unsigned int index; -		if (fep->bufdesc_ex) -			index = (struct bufdesc_ex *)bdp - -				(struct bufdesc_ex *)fep->tx_bd_base; -		else -			index = bdp - fep->tx_bd_base;  		memcpy(fep->tx_bounce[index], skb->data, skb->len);  		bufaddr = fep->tx_bounce[index];  	} @@ -300,10 +298,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)  		swap_buffer(bufaddr, skb->len);  	/* Save skb pointer */ -	fep->tx_skbuff[fep->skb_cur] = skb; - -	ndev->stats.tx_bytes += skb->len; -	fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; +	fep->tx_skbuff[index] = skb;  	/* Push the data cache so the CPM does not get stale memory  	 * data. @@ -331,26 +326,22 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)  			ebdp->cbd_esc = BD_ENET_TX_INT;  		}  	} -	/* Trigger transmission start */ -	writel(0, fep->hwp + FEC_X_DES_ACTIVE); -  	/* If this was the last BD in the ring, start at the beginning again. */  	if (status & BD_ENET_TX_WRAP)  		bdp = fep->tx_bd_base;  	else  		bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); -	if (bdp == fep->dirty_tx) { -		fep->tx_full = 1; +	fep->cur_tx = bdp; + +	if (fep->cur_tx == fep->dirty_tx)  		netif_stop_queue(ndev); -	} -	fep->cur_tx = bdp; +	/* Trigger transmission start */ +	writel(0, fep->hwp + FEC_X_DES_ACTIVE);  	skb_tx_timestamp(skb); -	spin_unlock_irqrestore(&fep->hw_lock, flags); -  	return NETDEV_TX_OK;  } @@ -406,11 +397,8 @@ fec_restart(struct net_device *ndev, int duplex)  		writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)  			* RX_RING_SIZE,	fep->hwp + FEC_X_DES_START); -	fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;  	fep->cur_rx = fep->rx_bd_base; -	/* Reset SKB transmit buffers. */ -	fep->skb_cur = fep->skb_dirty = 0;  	for (i = 0; i <= TX_RING_MOD_MASK; i++) {  		if (fep->tx_skbuff[i]) {  			dev_kfree_skb_any(fep->tx_skbuff[i]); @@ -573,20 +561,35 @@ fec_enet_tx(struct net_device *ndev)  	struct bufdesc *bdp;  	unsigned short status;  	struct	sk_buff	*skb; +	int	index = 0;  	fep = netdev_priv(ndev); -	spin_lock(&fep->hw_lock);  	bdp = fep->dirty_tx; +	/* get next bdp of dirty_tx */ +	if (bdp->cbd_sc & BD_ENET_TX_WRAP) +		bdp = fep->tx_bd_base; +	else +		bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); +  	while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { -		if (bdp == fep->cur_tx && fep->tx_full == 0) + +		/* current queue is empty */ +		if (bdp == fep->cur_tx)  			break; +		if (fep->bufdesc_ex) +			index = (struct bufdesc_ex *)bdp - +				(struct bufdesc_ex *)fep->tx_bd_base; +		else +			index = bdp - fep->tx_bd_base; +  		dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,  				FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);  		bdp->cbd_bufaddr = 0; -		skb = fep->tx_skbuff[fep->skb_dirty]; +		skb = fep->tx_skbuff[index]; +  		/* Check for errors. */  		if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |  				   BD_ENET_TX_RL | BD_ENET_TX_UN | @@ -631,8 +634,9 @@ fec_enet_tx(struct net_device *ndev)  		/* Free the sk buffer associated with this last transmit */  		dev_kfree_skb_any(skb); -		fep->tx_skbuff[fep->skb_dirty] = NULL; -		fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; +		fep->tx_skbuff[index] = NULL; + +		fep->dirty_tx = bdp;  		/* Update pointer to next buffer descriptor to be transmitted */  		if (status & BD_ENET_TX_WRAP) @@ -642,14 +646,12 @@ fec_enet_tx(struct net_device *ndev)  		/* Since we have freed up a buffer, the ring is no longer full  		 */ -		if (fep->tx_full) { -			fep->tx_full = 0; +		if (fep->dirty_tx != fep->cur_tx) {  			if (netif_queue_stopped(ndev))  				netif_wake_queue(ndev);  		}  	} -	fep->dirty_tx = bdp; -	spin_unlock(&fep->hw_lock); +	return;  } @@ -816,7 +818,7 @@ fec_enet_interrupt(int irq, void *dev_id)  		int_events = readl(fep->hwp + FEC_IEVENT);  		writel(int_events, fep->hwp + FEC_IEVENT); -		if (int_events & FEC_ENET_RXF) { +		if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {  			ret = IRQ_HANDLED;  			/* Disable the RX interrupt */ @@ -827,15 +829,6 @@ fec_enet_interrupt(int irq, void *dev_id)  			}  		} -		/* Transmit OK, or non-fatal error. Update the buffer -		 * descriptors. FEC handles all errors, we just discover -		 * them as part of the transmit process. -		 */ -		if (int_events & FEC_ENET_TXF) { -			ret = IRQ_HANDLED; -			fec_enet_tx(ndev); -		} -  		if (int_events & FEC_ENET_MII) {  			ret = IRQ_HANDLED;  			complete(&fep->mdio_done); @@ -851,6 +844,8 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)  	int pkts = fec_enet_rx(ndev, budget);  	struct fec_enet_private *fep = netdev_priv(ndev); +	fec_enet_tx(ndev); +  	if (pkts < budget) {  		napi_complete(napi);  		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); @@ -1646,6 +1641,7 @@ static int fec_enet_init(struct net_device *ndev)  	/* ...and the same for transmit */  	bdp = fep->tx_bd_base; +	fep->cur_tx = bdp;  	for (i = 0; i < TX_RING_SIZE; i++) {  		/* Initialize the BD for every fragment in the page. */ @@ -1657,6 +1653,7 @@ static int fec_enet_init(struct net_device *ndev)  	/* Set the last buffer to wrap */  	bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);  	bdp->cbd_sc |= BD_SC_WRAP; +	fep->dirty_tx = bdp;  	fec_restart(ndev, 0); diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 01579b8e37c..f5390071efd 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -97,6 +97,13 @@ struct bufdesc {  	unsigned short cbd_sc;	/* Control and status info */  	unsigned long cbd_bufaddr;	/* Buffer address */  }; +#else +struct bufdesc { +	unsigned short	cbd_sc;			/* Control and status info */ +	unsigned short	cbd_datlen;		/* Data length */ +	unsigned long	cbd_bufaddr;		/* Buffer address */ +}; +#endif  struct bufdesc_ex {  	struct bufdesc desc; @@ -107,14 +114,6 @@ struct bufdesc_ex {  	unsigned short res0[4];  }; -#else -struct bufdesc { -	unsigned short	cbd_sc;			/* Control and status info */ -	unsigned short	cbd_datlen;		/* Data length */ -	unsigned long	cbd_bufaddr;		/* Buffer address */ -}; -#endif -  /*   *	The following definitions courtesy of commproc.h, which where   *	Copyright (c) 1997 Dan Malek (dmalek@jlc.net). @@ -214,8 +213,6 @@ struct fec_enet_private {  	unsigned char *tx_bounce[TX_RING_SIZE];  	struct	sk_buff *tx_skbuff[TX_RING_SIZE];  	struct	sk_buff *rx_skbuff[RX_RING_SIZE]; -	ushort	skb_cur; -	ushort	skb_dirty;  	/* CPM dual port RAM relative addresses */  	dma_addr_t	bd_dma; @@ -227,7 +224,6 @@ struct fec_enet_private {  	/* The ring entries to be free()ed */  	struct bufdesc	*dirty_tx; -	uint	tx_full;  	/* hold while accessing the HW like ringbuffer for tx/rx but not MAC */  	spinlock_t hw_lock; diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 8900398ba10..28fb50a1e9c 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4765,8 +4765,10 @@ static void rtl_hw_start_8168bb(struct rtl8169_private *tp)  	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); -	rtl_tx_performance_tweak(pdev, -		(0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); +	if (tp->dev->mtu <= ETH_DATA_LEN) { +		rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) | +					 PCI_EXP_DEVCTL_NOSNOOP_EN); +	}  }  static void rtl_hw_start_8168bef(struct rtl8169_private *tp) @@ -4789,7 +4791,8 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)  	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); -	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); +	if (tp->dev->mtu <= ETH_DATA_LEN) +		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);  	rtl_disable_clock_request(pdev); @@ -4822,7 +4825,8 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)  	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); -	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); +	if (tp->dev->mtu <= ETH_DATA_LEN) +		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);  	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);  } @@ -4841,7 +4845,8 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)  	RTL_W8(MaxTxPacketSize, TxPacketMax); -	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); +	if (tp->dev->mtu <= ETH_DATA_LEN) +		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);  	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);  } @@ -4901,7 +4906,8 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)  	RTL_W8(MaxTxPacketSize, TxPacketMax); -	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); +	if (tp->dev->mtu <= ETH_DATA_LEN) +		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);  	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);  } @@ -4913,7 +4919,8 @@ static void rtl_hw_start_8168dp(struct rtl8169_private *tp)  	rtl_csi_access_enable_1(tp); -	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); +	if (tp->dev->mtu <= ETH_DATA_LEN) +		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);  	RTL_W8(MaxTxPacketSize, TxPacketMax); @@ -4972,7 +4979,8 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)  	rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); -	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); +	if (tp->dev->mtu <= ETH_DATA_LEN) +		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);  	RTL_W8(MaxTxPacketSize, TxPacketMax); @@ -4998,7 +5006,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)  	rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); -	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); +	if (tp->dev->mtu <= ETH_DATA_LEN) +		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);  	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);  	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index bf57b3cb16a..0bc00991d31 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -779,6 +779,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)  						tx_queue->txd.entries);  	} +	efx_device_detach_sync(efx);  	efx_stop_all(efx);  	efx_stop_interrupts(efx, true); @@ -832,6 +833,7 @@ out:  	efx_start_interrupts(efx, true);  	efx_start_all(efx); +	netif_device_attach(efx->net_dev);  	return rc;  rollback: @@ -1641,8 +1643,12 @@ static void efx_stop_all(struct efx_nic *efx)  	/* Flush efx_mac_work(), refill_workqueue, monitor_work */  	efx_flush_all(efx); -	/* Stop the kernel transmit interface late, so the watchdog -	 * timer isn't ticking over the flush */ +	/* Stop the kernel transmit interface.  This is only valid if +	 * the device is stopped or detached; otherwise the watchdog +	 * may fire immediately. +	 */ +	WARN_ON(netif_running(efx->net_dev) && +		netif_device_present(efx->net_dev));  	netif_tx_disable(efx->net_dev);  	efx_stop_datapath(efx); @@ -1963,16 +1969,18 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)  	if (new_mtu > EFX_MAX_MTU)  		return -EINVAL; -	efx_stop_all(efx); -  	netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); +	efx_device_detach_sync(efx); +	efx_stop_all(efx); +  	mutex_lock(&efx->mac_lock);  	net_dev->mtu = new_mtu;  	efx->type->reconfigure_mac(efx);  	mutex_unlock(&efx->mac_lock);  	efx_start_all(efx); +	netif_device_attach(efx->net_dev);  	return 0;  } diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 2d756c1d714..0a90abd2421 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -210,6 +210,7 @@ struct efx_tx_queue {   *	Will be %NULL if the buffer slot is currently free.   * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.   *	Will be %NULL if the buffer slot is currently free. + * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE.   * @len: Buffer length, in bytes.   * @flags: Flags for buffer and packet state.   */ @@ -219,7 +220,8 @@ struct efx_rx_buffer {  		struct sk_buff *skb;  		struct page *page;  	} u; -	unsigned int len; +	u16 page_offset; +	u16 len;  	u16 flags;  };  #define EFX_RX_BUF_PAGE		0x0001 diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index d780a0d096b..879ff5849bb 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -90,11 +90,7 @@ static unsigned int rx_refill_threshold;  static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,  					     struct efx_rx_buffer *buf)  { -	/* Offset is always within one page, so we don't need to consider -	 * the page order. -	 */ -	return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) + -		efx->type->rx_buffer_hash_size; +	return buf->page_offset + efx->type->rx_buffer_hash_size;  }  static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)  { @@ -187,6 +183,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)  	struct efx_nic *efx = rx_queue->efx;  	struct efx_rx_buffer *rx_buf;  	struct page *page; +	unsigned int page_offset;  	struct efx_rx_page_state *state;  	dma_addr_t dma_addr;  	unsigned index, count; @@ -211,12 +208,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)  		state->dma_addr = dma_addr;  		dma_addr += sizeof(struct efx_rx_page_state); +		page_offset = sizeof(struct efx_rx_page_state);  	split:  		index = rx_queue->added_count & rx_queue->ptr_mask;  		rx_buf = efx_rx_buffer(rx_queue, index);  		rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;  		rx_buf->u.page = page; +		rx_buf->page_offset = page_offset;  		rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;  		rx_buf->flags = EFX_RX_BUF_PAGE;  		++rx_queue->added_count; @@ -227,6 +226,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)  			/* Use the second half of the page */  			get_page(page);  			dma_addr += (PAGE_SIZE >> 1); +			page_offset += (PAGE_SIZE >> 1);  			++count;  			goto split;  		} @@ -236,7 +236,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)  }  static void efx_unmap_rx_buffer(struct efx_nic *efx, -				struct efx_rx_buffer *rx_buf) +				struct efx_rx_buffer *rx_buf, +				unsigned int used_len)  {  	if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {  		struct efx_rx_page_state *state; @@ -247,6 +248,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,  				       state->dma_addr,  				       efx_rx_buf_size(efx),  				       DMA_FROM_DEVICE); +		} else if (used_len) { +			dma_sync_single_for_cpu(&efx->pci_dev->dev, +						rx_buf->dma_addr, used_len, +						DMA_FROM_DEVICE);  		}  	} else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {  		dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr, @@ -269,7 +274,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,  static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,  			       struct efx_rx_buffer *rx_buf)  { -	efx_unmap_rx_buffer(rx_queue->efx, rx_buf); +	efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);  	efx_free_rx_buffer(rx_queue->efx, rx_buf);  } @@ -535,10 +540,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,  		goto out;  	} -	/* Release card resources - assumes all RX buffers consumed in-order -	 * per RX queue +	/* Release and/or sync DMA mapping - assumes all RX buffers +	 * consumed in-order per RX queue  	 */ -	efx_unmap_rx_buffer(efx, rx_buf); +	efx_unmap_rx_buffer(efx, rx_buf, len);  	/* Prefetch nice and early so data will (hopefully) be in cache by  	 * the time we look at it. diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 7e93df6585e..01ffbc48698 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -731,7 +731,7 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)  	writel(vlan, &priv->host_port_regs->port_vlan); -	for (i = 0; i < 2; i++) +	for (i = 0; i < priv->data.slaves; i++)  		slave_write(priv->slaves + i, vlan, reg);  	cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,  |