diff options
| author | Roland Dreier <rdreier@cisco.com> | 2007-10-09 15:47:37 -0700 | 
|---|---|---|
| committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-10 16:55:31 -0700 | 
| commit | bfe13f54f5028cff034e3b6247e9f433908f4f4f (patch) | |
| tree | 428853641da40769b53629e393a582a0e6956d45 /drivers/net/ibm_emac/ibm_emac_mal.c | |
| parent | 9153f66a5b8e63c61374df4e6a4cbd0056e45178 (diff) | |
| download | olio-linux-3.10-bfe13f54f5028cff034e3b6247e9f433908f4f4f.tar.xz olio-linux-3.10-bfe13f54f5028cff034e3b6247e9f433908f4f4f.zip  | |
ibm_emac: Convert to use napi_struct independent of struct net_device
Commit da3dedd9 ("[NET]: Make NAPI polling independent of struct
net_device objects.") changed the interface to NAPI polling.  Fix up
the ibm_emac driver so that it works with this new interface.  This is
actually a nice cleanup because ibm_emac is one of the drivers that
wants to have multiple NAPI structures for a single net_device.
Tested with the internal MAC of a PowerPC 440SPe SoC with an AMCC
'Yucca' evaluation board.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/ibm_emac/ibm_emac_mal.c')
| -rw-r--r-- | drivers/net/ibm_emac/ibm_emac_mal.c | 48 | 
1 files changed, 17 insertions, 31 deletions
diff --git a/drivers/net/ibm_emac/ibm_emac_mal.c b/drivers/net/ibm_emac/ibm_emac_mal.c index cabd9846a5e..4e49e8c4f87 100644 --- a/drivers/net/ibm_emac/ibm_emac_mal.c +++ b/drivers/net/ibm_emac/ibm_emac_mal.c @@ -207,10 +207,10 @@ static irqreturn_t mal_serr(int irq, void *dev_instance)  static inline void mal_schedule_poll(struct ibm_ocp_mal *mal)  { -	if (likely(netif_rx_schedule_prep(&mal->poll_dev))) { +	if (likely(napi_schedule_prep(&mal->napi))) {  		MAL_DBG2("%d: schedule_poll" NL, mal->def->index);  		mal_disable_eob_irq(mal); -		__netif_rx_schedule(&mal->poll_dev); +		__napi_schedule(&mal->napi);  	} else  		MAL_DBG2("%d: already in poll" NL, mal->def->index);  } @@ -273,11 +273,11 @@ static irqreturn_t mal_rxde(int irq, void *dev_instance)  	return IRQ_HANDLED;  } -static int mal_poll(struct net_device *ndev, int *budget) +static int mal_poll(struct napi_struct *napi, int budget)  { -	struct ibm_ocp_mal *mal = ndev->priv; +	struct ibm_ocp_mal *mal = container_of(napi, struct ibm_ocp_mal, napi);  	struct list_head *l; -	int rx_work_limit = min(ndev->quota, *budget), received = 0, done; +	int received = 0;  	MAL_DBG2("%d: poll(%d) %d ->" NL, mal->def->index, *budget,  		 rx_work_limit); @@ -295,38 +295,34 @@ static int mal_poll(struct net_device *ndev, int *budget)  	list_for_each(l, &mal->poll_list) {  		struct mal_commac *mc =  		    list_entry(l, struct mal_commac, poll_list); -		int n = mc->ops->poll_rx(mc->dev, rx_work_limit); +		int n = mc->ops->poll_rx(mc->dev, budget);  		if (n) {  			received += n; -			rx_work_limit -= n; -			if (rx_work_limit <= 0) { -				done = 0; +			budget -= n; +			if (budget <= 0)  				goto more_work;	// XXX What if this is the last one ? -			}  		}  	}  	/* We need to disable IRQs to protect from RXDE IRQ here */  	local_irq_disable(); -	__netif_rx_complete(ndev); +	__napi_complete(napi);  	mal_enable_eob_irq(mal);  	local_irq_enable(); -	done = 1; -  	/* Check for "rotting" packet(s) */  	list_for_each(l, &mal->poll_list) {  		struct mal_commac *mc =  		    list_entry(l, struct mal_commac, poll_list);  		if (unlikely(mc->ops->peek_rx(mc->dev) || mc->rx_stopped)) {  			MAL_DBG2("%d: rotting packet" NL, mal->def->index); -			if (netif_rx_reschedule(ndev, received)) +			if (napi_reschedule(napi))  				mal_disable_eob_irq(mal);  			else  				MAL_DBG2("%d: already in poll list" NL,  					 mal->def->index); -			if (rx_work_limit > 0) +			if (budget > 0)  				goto again;  			else  				goto more_work; @@ -335,12 +331,8 @@ static int mal_poll(struct net_device *ndev, int *budget)  	}        more_work: -	ndev->quota -= received; -	*budget -= received; - -	MAL_DBG2("%d: poll() %d <- %d" NL, mal->def->index, *budget, -		 done ? 0 : 1); -	return done ? 0 : 1; +	MAL_DBG2("%d: poll() %d <- %d" NL, mal->def->index, budget, received); +	return received;  }  static void mal_reset(struct ibm_ocp_mal *mal) @@ -425,11 +417,8 @@ static int __init mal_probe(struct ocp_device *ocpdev)  	mal->def = ocpdev->def;  	INIT_LIST_HEAD(&mal->poll_list); -	set_bit(__LINK_STATE_START, &mal->poll_dev.state); -	mal->poll_dev.weight = CONFIG_IBM_EMAC_POLL_WEIGHT; -	mal->poll_dev.poll = mal_poll; -	mal->poll_dev.priv = mal; -	atomic_set(&mal->poll_dev.refcnt, 1); +	mal->napi.weight = CONFIG_IBM_EMAC_POLL_WEIGHT; +	mal->napi.poll = mal_poll;  	INIT_LIST_HEAD(&mal->list); @@ -520,11 +509,8 @@ static void __exit mal_remove(struct ocp_device *ocpdev)  	MAL_DBG("%d: remove" NL, mal->def->index); -	/* Syncronize with scheduled polling,  -	   stolen from net/core/dev.c:dev_close()  -	 */ -	clear_bit(__LINK_STATE_START, &mal->poll_dev.state); -	netif_poll_disable(&mal->poll_dev); +	/* Synchronize with scheduled polling */ +	napi_disable(&mal->napi);  	if (!list_empty(&mal->list)) {  		/* This is *very* bad */  |