diff options
Diffstat (limited to 'drivers/net/b44.c')
| -rw-r--r-- | drivers/net/b44.c | 146 | 
1 files changed, 70 insertions, 76 deletions
diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 293f9c16e78..37617abc164 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c @@ -135,7 +135,6 @@ static void b44_init_rings(struct b44 *);  static void b44_init_hw(struct b44 *, int); -static int dma_desc_align_mask;  static int dma_desc_sync_size;  static int instance; @@ -150,9 +149,8 @@ static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,  						unsigned long offset,  						enum dma_data_direction dir)  { -	ssb_dma_sync_single_range_for_device(sdev, dma_base, -					     offset & dma_desc_align_mask, -					     dma_desc_sync_size, dir); +	dma_sync_single_for_device(sdev->dma_dev, dma_base + offset, +				   dma_desc_sync_size, dir);  }  static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev, @@ -160,9 +158,8 @@ static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,  					     unsigned long offset,  					     enum dma_data_direction dir)  { -	ssb_dma_sync_single_range_for_cpu(sdev, dma_base, -					  offset & dma_desc_align_mask, -					  dma_desc_sync_size, dir); +	dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset, +				dma_desc_sync_size, dir);  }  static inline unsigned long br32(const struct b44 *bp, unsigned long reg) @@ -608,10 +605,10 @@ static void b44_tx(struct b44 *bp)  		BUG_ON(skb == NULL); -		ssb_dma_unmap_single(bp->sdev, -				     rp->mapping, -				     skb->len, -				     DMA_TO_DEVICE); +		dma_unmap_single(bp->sdev->dma_dev, +				 rp->mapping, +				 skb->len, +				 DMA_TO_DEVICE);  		rp->skb = NULL;  		dev_kfree_skb_irq(skb);  	} @@ -648,29 +645,29 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)  	if (skb == NULL)  		return -ENOMEM; -	mapping = ssb_dma_map_single(bp->sdev, skb->data, -				     RX_PKT_BUF_SZ, -				     DMA_FROM_DEVICE); +	mapping = dma_map_single(bp->sdev->dma_dev, skb->data, +				 RX_PKT_BUF_SZ, +				 DMA_FROM_DEVICE);  	/* Hardware bug work-around, the chip is unable to do PCI DMA  	   to/from anything above 1GB :-( */ -	if (ssb_dma_mapping_error(bp->sdev, mapping) || +	if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||  		mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {  		/* Sigh... */ -		if (!ssb_dma_mapping_error(bp->sdev, mapping)) -			ssb_dma_unmap_single(bp->sdev, mapping, +		if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) +			dma_unmap_single(bp->sdev->dma_dev, mapping,  					     RX_PKT_BUF_SZ, DMA_FROM_DEVICE);  		dev_kfree_skb_any(skb);  		skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);  		if (skb == NULL)  			return -ENOMEM; -		mapping = ssb_dma_map_single(bp->sdev, skb->data, -					     RX_PKT_BUF_SZ, -					     DMA_FROM_DEVICE); -		if (ssb_dma_mapping_error(bp->sdev, mapping) || -			mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { -			if (!ssb_dma_mapping_error(bp->sdev, mapping)) -				ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); +		mapping = dma_map_single(bp->sdev->dma_dev, skb->data, +					 RX_PKT_BUF_SZ, +					 DMA_FROM_DEVICE); +		if (dma_mapping_error(bp->sdev->dma_dev, mapping) || +		    mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { +			if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) +				dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);  			dev_kfree_skb_any(skb);  			return -ENOMEM;  		} @@ -745,9 +742,9 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)  					     dest_idx * sizeof(*dest_desc),  					     DMA_BIDIRECTIONAL); -	ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping, -				       RX_PKT_BUF_SZ, -				       DMA_FROM_DEVICE); +	dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping, +				   RX_PKT_BUF_SZ, +				   DMA_FROM_DEVICE);  }  static int b44_rx(struct b44 *bp, int budget) @@ -767,9 +764,9 @@ static int b44_rx(struct b44 *bp, int budget)  		struct rx_header *rh;  		u16 len; -		ssb_dma_sync_single_for_cpu(bp->sdev, map, -					    RX_PKT_BUF_SZ, -					    DMA_FROM_DEVICE); +		dma_sync_single_for_cpu(bp->sdev->dma_dev, map, +					RX_PKT_BUF_SZ, +					DMA_FROM_DEVICE);  		rh = (struct rx_header *) skb->data;  		len = le16_to_cpu(rh->len);  		if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) || @@ -801,8 +798,8 @@ static int b44_rx(struct b44 *bp, int budget)  			skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);  			if (skb_size < 0)  				goto drop_it; -			ssb_dma_unmap_single(bp->sdev, map, -					     skb_size, DMA_FROM_DEVICE); +			dma_unmap_single(bp->sdev->dma_dev, map, +					 skb_size, DMA_FROM_DEVICE);  			/* Leave out rx_header */  			skb_put(skb, len + RX_PKT_OFFSET);  			skb_pull(skb, RX_PKT_OFFSET); @@ -954,24 +951,24 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)  		goto err_out;  	} -	mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE); -	if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) { +	mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE); +	if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {  		struct sk_buff *bounce_skb;  		/* Chip can't handle DMA to/from >1GB, use bounce buffer */ -		if (!ssb_dma_mapping_error(bp->sdev, mapping)) -			ssb_dma_unmap_single(bp->sdev, mapping, len, +		if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) +			dma_unmap_single(bp->sdev->dma_dev, mapping, len,  					     DMA_TO_DEVICE);  		bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);  		if (!bounce_skb)  			goto err_out; -		mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data, -					     len, DMA_TO_DEVICE); -		if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) { -			if (!ssb_dma_mapping_error(bp->sdev, mapping)) -				ssb_dma_unmap_single(bp->sdev, mapping, +		mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data, +					 len, DMA_TO_DEVICE); +		if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) { +			if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) +				dma_unmap_single(bp->sdev->dma_dev, mapping,  						     len, DMA_TO_DEVICE);  			dev_kfree_skb_any(bounce_skb);  			goto err_out; @@ -1068,8 +1065,8 @@ static void b44_free_rings(struct b44 *bp)  		if (rp->skb == NULL)  			continue; -		ssb_dma_unmap_single(bp->sdev, rp->mapping, RX_PKT_BUF_SZ, -				     DMA_FROM_DEVICE); +		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ, +				 DMA_FROM_DEVICE);  		dev_kfree_skb_any(rp->skb);  		rp->skb = NULL;  	} @@ -1080,8 +1077,8 @@ static void b44_free_rings(struct b44 *bp)  		if (rp->skb == NULL)  			continue; -		ssb_dma_unmap_single(bp->sdev, rp->mapping, rp->skb->len, -				     DMA_TO_DEVICE); +		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len, +				 DMA_TO_DEVICE);  		dev_kfree_skb_any(rp->skb);  		rp->skb = NULL;  	} @@ -1103,14 +1100,12 @@ static void b44_init_rings(struct b44 *bp)  	memset(bp->tx_ring, 0, B44_TX_RING_BYTES);  	if (bp->flags & B44_FLAG_RX_RING_HACK) -		ssb_dma_sync_single_for_device(bp->sdev, bp->rx_ring_dma, -					       DMA_TABLE_BYTES, -					       DMA_BIDIRECTIONAL); +		dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma, +					   DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);  	if (bp->flags & B44_FLAG_TX_RING_HACK) -		ssb_dma_sync_single_for_device(bp->sdev, bp->tx_ring_dma, -					       DMA_TABLE_BYTES, -					       DMA_TO_DEVICE); +		dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma, +					   DMA_TABLE_BYTES, DMA_TO_DEVICE);  	for (i = 0; i < bp->rx_pending; i++) {  		if (b44_alloc_rx_skb(bp, -1, i) < 0) @@ -1130,27 +1125,23 @@ static void b44_free_consistent(struct b44 *bp)  	bp->tx_buffers = NULL;  	if (bp->rx_ring) {  		if (bp->flags & B44_FLAG_RX_RING_HACK) { -			ssb_dma_unmap_single(bp->sdev, bp->rx_ring_dma, -					     DMA_TABLE_BYTES, -					     DMA_BIDIRECTIONAL); +			dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma, +					 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);  			kfree(bp->rx_ring);  		} else -			ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES, -						bp->rx_ring, bp->rx_ring_dma, -						GFP_KERNEL); +			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, +					  bp->rx_ring, bp->rx_ring_dma);  		bp->rx_ring = NULL;  		bp->flags &= ~B44_FLAG_RX_RING_HACK;  	}  	if (bp->tx_ring) {  		if (bp->flags & B44_FLAG_TX_RING_HACK) { -			ssb_dma_unmap_single(bp->sdev, bp->tx_ring_dma, -					     DMA_TABLE_BYTES, -					     DMA_TO_DEVICE); +			dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma, +					 DMA_TABLE_BYTES, DMA_TO_DEVICE);  			kfree(bp->tx_ring);  		} else -			ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES, -						bp->tx_ring, bp->tx_ring_dma, -						GFP_KERNEL); +			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, +					  bp->tx_ring, bp->tx_ring_dma);  		bp->tx_ring = NULL;  		bp->flags &= ~B44_FLAG_TX_RING_HACK;  	} @@ -1175,7 +1166,8 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)  		goto out_err;  	size = DMA_TABLE_BYTES; -	bp->rx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->rx_ring_dma, gfp); +	bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, +					 &bp->rx_ring_dma, gfp);  	if (!bp->rx_ring) {  		/* Allocation may have failed due to pci_alloc_consistent  		   insisting on use of GFP_DMA, which is more restrictive @@ -1187,11 +1179,11 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)  		if (!rx_ring)  			goto out_err; -		rx_ring_dma = ssb_dma_map_single(bp->sdev, rx_ring, -						 DMA_TABLE_BYTES, -						 DMA_BIDIRECTIONAL); +		rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring, +					     DMA_TABLE_BYTES, +					     DMA_BIDIRECTIONAL); -		if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) || +		if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||  			rx_ring_dma + size > DMA_BIT_MASK(30)) {  			kfree(rx_ring);  			goto out_err; @@ -1202,7 +1194,8 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)  		bp->flags |= B44_FLAG_RX_RING_HACK;  	} -	bp->tx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->tx_ring_dma, gfp); +	bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, +					 &bp->tx_ring_dma, gfp);  	if (!bp->tx_ring) {  		/* Allocation may have failed due to ssb_dma_alloc_consistent  		   insisting on use of GFP_DMA, which is more restrictive @@ -1214,11 +1207,11 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)  		if (!tx_ring)  			goto out_err; -		tx_ring_dma = ssb_dma_map_single(bp->sdev, tx_ring, -			                    DMA_TABLE_BYTES, -			                    DMA_TO_DEVICE); +		tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring, +					     DMA_TABLE_BYTES, +					     DMA_TO_DEVICE); -		if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) || +		if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||  			tx_ring_dma + size > DMA_BIT_MASK(30)) {  			kfree(tx_ring);  			goto out_err; @@ -2176,12 +2169,14 @@ static int __devinit b44_init_one(struct ssb_device *sdev,  			"Failed to powerup the bus\n");  		goto err_out_free_dev;  	} -	err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30)); -	if (err) { + +	if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) || +	    dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {  		dev_err(sdev->dev,  			"Required 30BIT DMA mask unsupported by the system\n");  		goto err_out_powerdown;  	} +  	err = b44_get_invariants(bp);  	if (err) {  		dev_err(sdev->dev, @@ -2344,7 +2339,6 @@ static int __init b44_init(void)  	int err;  	/* Setup paramaters for syncing RX/TX DMA descriptors */ -	dma_desc_align_mask = ~(dma_desc_align_size - 1);  	dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));  	err = b44_pci_init();  |