diff options
Diffstat (limited to 'drivers/net')
| -rw-r--r-- | drivers/net/xen-netback/common.h | 3 | ||||
| -rw-r--r-- | drivers/net/xen-netback/interface.c | 23 | ||||
| -rw-r--r-- | drivers/net/xen-netback/netback.c | 62 | 
3 files changed, 62 insertions, 26 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 94b79c3338c..9d7f1723dd8 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);  /* Notify xenvif that ring now has space to send an skb to the frontend */  void xenvif_notify_tx_completion(struct xenvif *vif); +/* Prevent the device from generating any further traffic. */ +void xenvif_carrier_off(struct xenvif *vif); +  /* Returns number of ring slots required to send an skb to the frontend */  unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index b7d41f8c338..b8c5193bd42 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -343,17 +343,22 @@ err:  	return err;  } -void xenvif_disconnect(struct xenvif *vif) +void xenvif_carrier_off(struct xenvif *vif)  {  	struct net_device *dev = vif->dev; -	if (netif_carrier_ok(dev)) { -		rtnl_lock(); -		netif_carrier_off(dev); /* discard queued packets */ -		if (netif_running(dev)) -			xenvif_down(vif); -		rtnl_unlock(); -		xenvif_put(vif); -	} + +	rtnl_lock(); +	netif_carrier_off(dev); /* discard queued packets */ +	if (netif_running(dev)) +		xenvif_down(vif); +	rtnl_unlock(); +	xenvif_put(vif); +} + +void xenvif_disconnect(struct xenvif *vif) +{ +	if (netif_carrier_ok(vif->dev)) +		xenvif_carrier_off(vif);  	atomic_dec(&vif->refcnt);  	wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index f2d6b78d901..c2e3336b4f9 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -888,6 +888,13 @@ static void netbk_tx_err(struct xenvif *vif,  	xenvif_put(vif);  } +static void netbk_fatal_tx_err(struct xenvif *vif) +{ +	netdev_err(vif->dev, "fatal error; disabling device\n"); +	xenvif_carrier_off(vif); +	xenvif_put(vif); +} +  static int netbk_count_requests(struct xenvif *vif,  				struct xen_netif_tx_request *first,  				struct xen_netif_tx_request *txp, @@ -901,19 +908,22 @@ static int netbk_count_requests(struct xenvif *vif,  	do {  		if (frags >= work_to_do) { -			netdev_dbg(vif->dev, "Need more frags\n"); +			netdev_err(vif->dev, "Need more frags\n"); +			netbk_fatal_tx_err(vif);  			return -frags;  		}  		if (unlikely(frags >= MAX_SKB_FRAGS)) { -			netdev_dbg(vif->dev, "Too many frags\n"); +			netdev_err(vif->dev, "Too many frags\n"); +			netbk_fatal_tx_err(vif);  			return -frags;  		}  		memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),  		       sizeof(*txp));  		if (txp->size > first->size) { -			netdev_dbg(vif->dev, "Frags galore\n"); +			netdev_err(vif->dev, "Frag is bigger than frame.\n"); +			netbk_fatal_tx_err(vif);  			return -frags;  		} @@ -921,8 +931,9 @@ static int netbk_count_requests(struct xenvif *vif,  		frags++;  		if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { -			netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n", +			netdev_err(vif->dev, "txp->offset: %x, size: %u\n",  				 txp->offset, txp->size); +			netbk_fatal_tx_err(vif);  			return -frags;  		}  	} while ((txp++)->flags & XEN_NETTXF_more_data); @@ -1095,7 +1106,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,  	do {  		if (unlikely(work_to_do-- <= 0)) { -			netdev_dbg(vif->dev, "Missing extra info\n"); +			netdev_err(vif->dev, "Missing extra info\n"); +			netbk_fatal_tx_err(vif);  			return -EBADR;  		} @@ -1104,8 +1116,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,  		if (unlikely(!extra.type ||  			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {  			vif->tx.req_cons = ++cons; -			netdev_dbg(vif->dev, +			netdev_err(vif->dev,  				   "Invalid extra type: %d\n", extra.type); +			netbk_fatal_tx_err(vif);  			return -EINVAL;  		} @@ -1121,13 +1134,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,  			     struct xen_netif_extra_info *gso)  {  	if (!gso->u.gso.size) { -		netdev_dbg(vif->dev, "GSO size must not be zero.\n"); +		netdev_err(vif->dev, "GSO size must not be zero.\n"); +		netbk_fatal_tx_err(vif);  		return -EINVAL;  	}  	/* Currently only TCPv4 S.O. is supported. */  	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { -		netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); +		netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); +		netbk_fatal_tx_err(vif);  		return -EINVAL;  	} @@ -1264,9 +1279,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)  		/* Get a netif from the list with work to do. */  		vif = poll_net_schedule_list(netbk); +		/* This can sometimes happen because the test of +		 * list_empty(net_schedule_list) at the top of the +		 * loop is unlocked.  Just go back and have another +		 * look. +		 */  		if (!vif)  			continue; +		if (vif->tx.sring->req_prod - vif->tx.req_cons > +		    XEN_NETIF_TX_RING_SIZE) { +			netdev_err(vif->dev, +				   "Impossible number of requests. " +				   "req_prod %d, req_cons %d, size %ld\n", +				   vif->tx.sring->req_prod, vif->tx.req_cons, +				   XEN_NETIF_TX_RING_SIZE); +			netbk_fatal_tx_err(vif); +			continue; +		} +  		RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);  		if (!work_to_do) {  			xenvif_put(vif); @@ -1294,17 +1325,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)  			work_to_do = xen_netbk_get_extras(vif, extras,  							  work_to_do);  			idx = vif->tx.req_cons; -			if (unlikely(work_to_do < 0)) { -				netbk_tx_err(vif, &txreq, idx); +			if (unlikely(work_to_do < 0))  				continue; -			}  		}  		ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); -		if (unlikely(ret < 0)) { -			netbk_tx_err(vif, &txreq, idx - ret); +		if (unlikely(ret < 0))  			continue; -		} +  		idx += ret;  		if (unlikely(txreq.size < ETH_HLEN)) { @@ -1316,11 +1344,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)  		/* No crossing a page as the payload mustn't fragment. */  		if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { -			netdev_dbg(vif->dev, +			netdev_err(vif->dev,  				   "txreq.offset: %x, size: %u, end: %lu\n",  				   txreq.offset, txreq.size,  				   (txreq.offset&~PAGE_MASK) + txreq.size); -			netbk_tx_err(vif, &txreq, idx); +			netbk_fatal_tx_err(vif);  			continue;  		} @@ -1348,8 +1376,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)  			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];  			if (netbk_set_skb_gso(vif, skb, gso)) { +				/* Failure in netbk_set_skb_gso is fatal. */  				kfree_skb(skb); -				netbk_tx_err(vif, &txreq, idx);  				continue;  			}  		}  |