diff options
Diffstat (limited to 'drivers/net/ethernet/freescale/fec.c')
| -rw-r--r-- | drivers/net/ethernet/freescale/fec.c | 195 | 
1 files changed, 108 insertions, 87 deletions
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c index fccc3bf2141..73195f643c9 100644 --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec.c @@ -246,14 +246,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)  	struct bufdesc *bdp;  	void *bufaddr;  	unsigned short	status; -	unsigned long flags; +	unsigned int index;  	if (!fep->link) {  		/* Link is down or autonegotiation is in progress. */  		return NETDEV_TX_BUSY;  	} -	spin_lock_irqsave(&fep->hw_lock, flags);  	/* Fill in a Tx ring entry */  	bdp = fep->cur_tx; @@ -264,7 +263,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)  		 * This should not happen, since ndev->tbusy should be set.  		 */  		printk("%s: tx queue full!.\n", ndev->name); -		spin_unlock_irqrestore(&fep->hw_lock, flags);  		return NETDEV_TX_BUSY;  	} @@ -280,13 +278,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)  	 * 4-byte boundaries. Use bounce buffers to copy data  	 * and get it aligned. Ugh.  	 */ +	if (fep->bufdesc_ex) +		index = (struct bufdesc_ex *)bdp - +			(struct bufdesc_ex *)fep->tx_bd_base; +	else +		index = bdp - fep->tx_bd_base; +  	if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { -		unsigned int index; -		if (fep->bufdesc_ex) -			index = (struct bufdesc_ex *)bdp - -				(struct bufdesc_ex *)fep->tx_bd_base; -		else -			index = bdp - fep->tx_bd_base;  		memcpy(fep->tx_bounce[index], skb->data, skb->len);  		bufaddr = fep->tx_bounce[index];  	} @@ -300,10 +298,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)  		swap_buffer(bufaddr, skb->len);  	/* Save skb pointer */ -	fep->tx_skbuff[fep->skb_cur] = skb; - -	ndev->stats.tx_bytes += skb->len; -	fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; +	fep->tx_skbuff[index] = skb;  	/* Push the data cache so the CPM does not get stale memory  	 * data. @@ -331,29 +326,72 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)  			ebdp->cbd_esc = BD_ENET_TX_INT;  		}  	} -	/* Trigger transmission start */ -	writel(0, fep->hwp + FEC_X_DES_ACTIVE); -  	/* If this was the last BD in the ring, start at the beginning again. */  	if (status & BD_ENET_TX_WRAP)  		bdp = fep->tx_bd_base;  	else  		bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); -	if (bdp == fep->dirty_tx) { -		fep->tx_full = 1; +	fep->cur_tx = bdp; + +	if (fep->cur_tx == fep->dirty_tx)  		netif_stop_queue(ndev); -	} -	fep->cur_tx = bdp; +	/* Trigger transmission start */ +	writel(0, fep->hwp + FEC_X_DES_ACTIVE);  	skb_tx_timestamp(skb); -	spin_unlock_irqrestore(&fep->hw_lock, flags); -  	return NETDEV_TX_OK;  } +/* Init RX & TX buffer descriptors + */ +static void fec_enet_bd_init(struct net_device *dev) +{ +	struct fec_enet_private *fep = netdev_priv(dev); +	struct bufdesc *bdp; +	unsigned int i; + +	/* Initialize the receive buffer descriptors. */ +	bdp = fep->rx_bd_base; +	for (i = 0; i < RX_RING_SIZE; i++) { + +		/* Initialize the BD for every fragment in the page. */ +		if (bdp->cbd_bufaddr) +			bdp->cbd_sc = BD_ENET_RX_EMPTY; +		else +			bdp->cbd_sc = 0; +		bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); +	} + +	/* Set the last buffer to wrap */ +	bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); +	bdp->cbd_sc |= BD_SC_WRAP; + +	fep->cur_rx = fep->rx_bd_base; + +	/* ...and the same for transmit */ +	bdp = fep->tx_bd_base; +	fep->cur_tx = bdp; +	for (i = 0; i < TX_RING_SIZE; i++) { + +		/* Initialize the BD for every fragment in the page. */ +		bdp->cbd_sc = 0; +		if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) { +			dev_kfree_skb_any(fep->tx_skbuff[i]); +			fep->tx_skbuff[i] = NULL; +		} +		bdp->cbd_bufaddr = 0; +		bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); +	} + +	/* Set the last buffer to wrap */ +	bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); +	bdp->cbd_sc |= BD_SC_WRAP; +	fep->dirty_tx = bdp; +} +  /* This function is called to start or restart the FEC during a link   * change.  This only happens when switching between half and full   * duplex. @@ -397,6 +435,8 @@ fec_restart(struct net_device *ndev, int duplex)  	/* Set maximum receive buffer size. */  	writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); +	fec_enet_bd_init(ndev); +  	/* Set receive and transmit descriptor base. */  	writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);  	if (fep->bufdesc_ex) @@ -406,11 +446,7 @@ fec_restart(struct net_device *ndev, int duplex)  		writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)  			* RX_RING_SIZE,	fep->hwp + FEC_X_DES_START); -	fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; -	fep->cur_rx = fep->rx_bd_base; -	/* Reset SKB transmit buffers. */ -	fep->skb_cur = fep->skb_dirty = 0;  	for (i = 0; i <= TX_RING_MOD_MASK; i++) {  		if (fep->tx_skbuff[i]) {  			dev_kfree_skb_any(fep->tx_skbuff[i]); @@ -573,20 +609,35 @@ fec_enet_tx(struct net_device *ndev)  	struct bufdesc *bdp;  	unsigned short status;  	struct	sk_buff	*skb; +	int	index = 0;  	fep = netdev_priv(ndev); -	spin_lock(&fep->hw_lock);  	bdp = fep->dirty_tx; +	/* get next bdp of dirty_tx */ +	if (bdp->cbd_sc & BD_ENET_TX_WRAP) +		bdp = fep->tx_bd_base; +	else +		bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); +  	while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { -		if (bdp == fep->cur_tx && fep->tx_full == 0) + +		/* current queue is empty */ +		if (bdp == fep->cur_tx)  			break; +		if (fep->bufdesc_ex) +			index = (struct bufdesc_ex *)bdp - +				(struct bufdesc_ex *)fep->tx_bd_base; +		else +			index = bdp - fep->tx_bd_base; +  		dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,  				FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);  		bdp->cbd_bufaddr = 0; -		skb = fep->tx_skbuff[fep->skb_dirty]; +		skb = fep->tx_skbuff[index]; +  		/* Check for errors. */  		if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |  				   BD_ENET_TX_RL | BD_ENET_TX_UN | @@ -631,8 +682,9 @@ fec_enet_tx(struct net_device *ndev)  		/* Free the sk buffer associated with this last transmit */  		dev_kfree_skb_any(skb); -		fep->tx_skbuff[fep->skb_dirty] = NULL; -		fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; +		fep->tx_skbuff[index] = NULL; + +		fep->dirty_tx = bdp;  		/* Update pointer to next buffer descriptor to be transmitted */  		if (status & BD_ENET_TX_WRAP) @@ -642,14 +694,12 @@ fec_enet_tx(struct net_device *ndev)  		/* Since we have freed up a buffer, the ring is no longer full  		 */ -		if (fep->tx_full) { -			fep->tx_full = 0; +		if (fep->dirty_tx != fep->cur_tx) {  			if (netif_queue_stopped(ndev))  				netif_wake_queue(ndev);  		}  	} -	fep->dirty_tx = bdp; -	spin_unlock(&fep->hw_lock); +	return;  } @@ -816,7 +866,7 @@ fec_enet_interrupt(int irq, void *dev_id)  		int_events = readl(fep->hwp + FEC_IEVENT);  		writel(int_events, fep->hwp + FEC_IEVENT); -		if (int_events & FEC_ENET_RXF) { +		if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {  			ret = IRQ_HANDLED;  			/* Disable the RX interrupt */ @@ -827,15 +877,6 @@ fec_enet_interrupt(int irq, void *dev_id)  			}  		} -		/* Transmit OK, or non-fatal error. Update the buffer -		 * descriptors. FEC handles all errors, we just discover -		 * them as part of the transmit process. -		 */ -		if (int_events & FEC_ENET_TXF) { -			ret = IRQ_HANDLED; -			fec_enet_tx(ndev); -		} -  		if (int_events & FEC_ENET_MII) {  			ret = IRQ_HANDLED;  			complete(&fep->mdio_done); @@ -851,6 +892,8 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)  	int pkts = fec_enet_rx(ndev, budget);  	struct fec_enet_private *fep = netdev_priv(ndev); +	fec_enet_tx(ndev); +  	if (pkts < budget) {  		napi_complete(napi);  		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); @@ -939,24 +982,29 @@ static void fec_enet_adjust_link(struct net_device *ndev)  		goto spin_unlock;  	} -	/* Duplex link change */  	if (phy_dev->link) { -		if (fep->full_duplex != phy_dev->duplex) { -			fec_restart(ndev, phy_dev->duplex); -			/* prevent unnecessary second fec_restart() below */ +		if (!fep->link) {  			fep->link = phy_dev->link;  			status_change = 1;  		} -	} -	/* Link on or off change */ -	if (phy_dev->link != fep->link) { -		fep->link = phy_dev->link; -		if (phy_dev->link) +		if (fep->full_duplex != phy_dev->duplex) +			status_change = 1; + +		if (phy_dev->speed != fep->speed) { +			fep->speed = phy_dev->speed; +			status_change = 1; +		} + +		/* if any of the above changed restart the FEC */ +		if (status_change)  			fec_restart(ndev, phy_dev->duplex); -		else +	} else { +		if (fep->link) {  			fec_stop(ndev); -		status_change = 1; +			fep->link = phy_dev->link; +			status_change = 1; +		}  	}  spin_unlock: @@ -1333,7 +1381,7 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)  static void fec_enet_free_buffers(struct net_device *ndev)  {  	struct fec_enet_private *fep = netdev_priv(ndev); -	int i; +	unsigned int i;  	struct sk_buff *skb;  	struct bufdesc	*bdp; @@ -1357,7 +1405,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)  static int fec_enet_alloc_buffers(struct net_device *ndev)  {  	struct fec_enet_private *fep = netdev_priv(ndev); -	int i; +	unsigned int i;  	struct sk_buff *skb;  	struct bufdesc	*bdp; @@ -1442,6 +1490,7 @@ fec_enet_close(struct net_device *ndev)  	struct fec_enet_private *fep = netdev_priv(ndev);  	/* Don't know what to do yet. */ +	napi_disable(&fep->napi);  	fep->opened = 0;  	netif_stop_queue(ndev);  	fec_stop(ndev); @@ -1597,8 +1646,6 @@ static int fec_enet_init(struct net_device *ndev)  {  	struct fec_enet_private *fep = netdev_priv(ndev);  	struct bufdesc *cbd_base; -	struct bufdesc *bdp; -	int i;  	/* Allocate memory for buffer descriptors. */  	cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, @@ -1608,6 +1655,7 @@ static int fec_enet_init(struct net_device *ndev)  		return -ENOMEM;  	} +	memset(cbd_base, 0, PAGE_SIZE);  	spin_lock_init(&fep->hw_lock);  	fep->netdev = ndev; @@ -1631,33 +1679,6 @@ static int fec_enet_init(struct net_device *ndev)  	writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);  	netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); -	/* Initialize the receive buffer descriptors. */ -	bdp = fep->rx_bd_base; -	for (i = 0; i < RX_RING_SIZE; i++) { - -		/* Initialize the BD for every fragment in the page. */ -		bdp->cbd_sc = 0; -		bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); -	} - -	/* Set the last buffer to wrap */ -	bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); -	bdp->cbd_sc |= BD_SC_WRAP; - -	/* ...and the same for transmit */ -	bdp = fep->tx_bd_base; -	for (i = 0; i < TX_RING_SIZE; i++) { - -		/* Initialize the BD for every fragment in the page. */ -		bdp->cbd_sc = 0; -		bdp->cbd_bufaddr = 0; -		bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); -	} - -	/* Set the last buffer to wrap */ -	bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); -	bdp->cbd_sc |= BD_SC_WRAP; -  	fec_restart(ndev, 0);  	return 0;  |