diff options
Diffstat (limited to 'drivers/net/igb/igb_main.c')
| -rw-r--r-- | drivers/net/igb/igb_main.c | 164 | 
1 files changed, 116 insertions, 48 deletions
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 9b4e5895f5f..75155a27fdd 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -71,6 +71,8 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {  	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },  	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },  	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },  	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },  	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },  	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, @@ -94,7 +96,6 @@ static int igb_setup_all_rx_resources(struct igb_adapter *);  static void igb_free_all_tx_resources(struct igb_adapter *);  static void igb_free_all_rx_resources(struct igb_adapter *);  static void igb_setup_mrqc(struct igb_adapter *); -void igb_update_stats(struct igb_adapter *);  static int igb_probe(struct pci_dev *, const struct pci_device_id *);  static void __devexit igb_remove(struct pci_dev *pdev);  static int igb_sw_init(struct igb_adapter *); @@ -111,7 +112,8 @@ static void igb_update_phy_info(unsigned long);  static void igb_watchdog(unsigned long);  static void igb_watchdog_task(struct work_struct *);  static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *); -static struct net_device_stats *igb_get_stats(struct net_device *); +static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, +						 struct rtnl_link_stats64 *stats);  static int igb_change_mtu(struct net_device *, int);  static int igb_set_mac(struct net_device *, void *);  static void igb_set_uta(struct igb_adapter *adapter); @@ -986,7 +988,7 @@ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)   * Attempt to configure interrupts using the best available   * capabilities of the hardware and kernel.   **/ -static void igb_set_interrupt_capability(struct igb_adapter *adapter) +static int igb_set_interrupt_capability(struct igb_adapter *adapter)  {  	int err;  	int numvecs, i; @@ -1052,8 +1054,10 @@ msi_only:  	if (!pci_enable_msi(adapter->pdev))  		adapter->flags |= IGB_FLAG_HAS_MSI;  out: -	/* Notify the stack of the (possibly) reduced Tx Queue count. */ -	adapter->netdev->real_num_tx_queues = adapter->num_tx_queues; +	/* Notify the stack of the (possibly) reduced queue counts. */ +	netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); +	return netif_set_real_num_rx_queues(adapter->netdev, +					    adapter->num_rx_queues);  }  /** @@ -1152,7 +1156,9 @@ static int igb_init_interrupt_scheme(struct igb_adapter *adapter)  	struct pci_dev *pdev = adapter->pdev;  	int err; -	igb_set_interrupt_capability(adapter); +	err = igb_set_interrupt_capability(adapter); +	if (err) +		return err;  	err = igb_alloc_q_vectors(adapter);  	if (err) { @@ -1530,7 +1536,9 @@ void igb_down(struct igb_adapter *adapter)  	netif_carrier_off(netdev);  	/* record the stats before reset*/ -	igb_update_stats(adapter); +	spin_lock(&adapter->stats64_lock); +	igb_update_stats(adapter, &adapter->stats64); +	spin_unlock(&adapter->stats64_lock);  	adapter->link_speed = 0;  	adapter->link_duplex = 0; @@ -1683,7 +1691,7 @@ static const struct net_device_ops igb_netdev_ops = {  	.ndo_open		= igb_open,  	.ndo_stop		= igb_close,  	.ndo_start_xmit		= igb_xmit_frame_adv, -	.ndo_get_stats		= igb_get_stats, +	.ndo_get_stats64	= igb_get_stats64,  	.ndo_set_rx_mode	= igb_set_rx_mode,  	.ndo_set_multicast_list	= igb_set_rx_mode,  	.ndo_set_mac_address	= igb_set_mac, @@ -1856,8 +1864,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,  	netdev->vlan_features |= NETIF_F_IPV6_CSUM;  	netdev->vlan_features |= NETIF_F_SG; -	if (pci_using_dac) +	if (pci_using_dac) {  		netdev->features |= NETIF_F_HIGHDMA; +		netdev->vlan_features |= NETIF_F_HIGHDMA; +	}  	if (hw->mac.type >= e1000_82576)  		netdev->features |= NETIF_F_SCTP_CSUM; @@ -1888,9 +1898,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,  		goto err_eeprom;  	} -	setup_timer(&adapter->watchdog_timer, &igb_watchdog, +	setup_timer(&adapter->watchdog_timer, igb_watchdog,  	            (unsigned long) adapter); -	setup_timer(&adapter->phy_info_timer, &igb_update_phy_info, +	setup_timer(&adapter->phy_info_timer, igb_update_phy_info,  	            (unsigned long) adapter);  	INIT_WORK(&adapter->reset_task, igb_reset_task); @@ -2268,6 +2278,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)  	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;  	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; +	spin_lock_init(&adapter->stats64_lock);  #ifdef CONFIG_PCI_IOV  	if (hw->mac.type == e1000_82576)  		adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs; @@ -3475,7 +3486,9 @@ static void igb_watchdog_task(struct work_struct *work)  		}  	} -	igb_update_stats(adapter); +	spin_lock(&adapter->stats64_lock); +	igb_update_stats(adapter, &adapter->stats64); +	spin_unlock(&adapter->stats64_lock);  	for (i = 0; i < adapter->num_tx_queues; i++) {  		struct igb_ring *tx_ring = adapter->tx_ring[i]; @@ -3542,6 +3555,8 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)  	int new_val = q_vector->itr_val;  	int avg_wire_size = 0;  	struct igb_adapter *adapter = q_vector->adapter; +	struct igb_ring *ring; +	unsigned int packets;  	/* For non-gigabit speeds, just fix the interrupt rate at 4000  	 * ints/sec - ITR timer value of 120 ticks. @@ -3551,16 +3566,21 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)  		goto set_itr_val;  	} -	if (q_vector->rx_ring && q_vector->rx_ring->total_packets) { -		struct igb_ring *ring = q_vector->rx_ring; -		avg_wire_size = ring->total_bytes / ring->total_packets; +	ring = q_vector->rx_ring; +	if (ring) { +		packets = ACCESS_ONCE(ring->total_packets); + +		if (packets) +			avg_wire_size = ring->total_bytes / packets;  	} -	if (q_vector->tx_ring && q_vector->tx_ring->total_packets) { -		struct igb_ring *ring = q_vector->tx_ring; -		avg_wire_size = max_t(u32, avg_wire_size, -		                      (ring->total_bytes / -		                       ring->total_packets)); +	ring = q_vector->tx_ring; +	if (ring) { +		packets = ACCESS_ONCE(ring->total_packets); + +		if (packets) +			avg_wire_size = max_t(u32, avg_wire_size, +			                      ring->total_bytes / packets);  	}  	/* if avg_wire_size isn't set no work was done */ @@ -3954,7 +3974,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,  	}  	tx_ring->buffer_info[i].skb = skb; -	tx_ring->buffer_info[i].shtx = skb_shinfo(skb)->tx_flags; +	tx_ring->buffer_info[i].tx_flags = skb_shinfo(skb)->tx_flags;  	/* multiply data chunks by size of headers */  	tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len;  	tx_ring->buffer_info[i].gso_segs = gso_segs; @@ -4069,7 +4089,11 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)  	/* A reprieve! */  	netif_wake_subqueue(netdev, tx_ring->queue_index); -	tx_ring->tx_stats.restart_queue++; + +	u64_stats_update_begin(&tx_ring->tx_syncp2); +	tx_ring->tx_stats.restart_queue2++; +	u64_stats_update_end(&tx_ring->tx_syncp2); +  	return 0;  } @@ -4088,7 +4112,6 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,  	u32 tx_flags = 0;  	u16 first;  	u8 hdr_len = 0; -	union skb_shared_tx *shtx = skb_tx(skb);  	/* need: 1 descriptor per page,  	 *       + 2 desc gap to keep tail from touching head, @@ -4100,12 +4123,12 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,  		return NETDEV_TX_BUSY;  	} -	if (unlikely(shtx->hardware)) { -		shtx->in_progress = 1; +	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { +		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;  		tx_flags |= IGB_TX_FLAGS_TSTAMP;  	} -	if (vlan_tx_tag_present(skb) && adapter->vlgrp) { +	if (vlan_tx_tag_present(skb)) {  		tx_flags |= IGB_TX_FLAGS_VLAN;  		tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);  	} @@ -4207,16 +4230,22 @@ static void igb_reset_task(struct work_struct *work)  }  /** - * igb_get_stats - Get System Network Statistics + * igb_get_stats64 - Get System Network Statistics   * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer   * - * Returns the address of the device statistics structure. - * The statistics are actually updated from the timer callback.   **/ -static struct net_device_stats *igb_get_stats(struct net_device *netdev) +static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, +						 struct rtnl_link_stats64 *stats)  { -	/* only return the current stats */ -	return &netdev->stats; +	struct igb_adapter *adapter = netdev_priv(netdev); + +	spin_lock(&adapter->stats64_lock); +	igb_update_stats(adapter, &adapter->stats64); +	memcpy(stats, &adapter->stats64, sizeof(*stats)); +	spin_unlock(&adapter->stats64_lock); + +	return stats;  }  /** @@ -4298,15 +4327,17 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)   * @adapter: board private structure   **/ -void igb_update_stats(struct igb_adapter *adapter) +void igb_update_stats(struct igb_adapter *adapter, +		      struct rtnl_link_stats64 *net_stats)  { -	struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);  	struct e1000_hw *hw = &adapter->hw;  	struct pci_dev *pdev = adapter->pdev;  	u32 reg, mpc;  	u16 phy_tmp;  	int i;  	u64 bytes, packets; +	unsigned int start; +	u64 _bytes, _packets;  #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF @@ -4324,10 +4355,17 @@ void igb_update_stats(struct igb_adapter *adapter)  	for (i = 0; i < adapter->num_rx_queues; i++) {  		u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;  		struct igb_ring *ring = adapter->rx_ring[i]; +  		ring->rx_stats.drops += rqdpc_tmp;  		net_stats->rx_fifo_errors += rqdpc_tmp; -		bytes += ring->rx_stats.bytes; -		packets += ring->rx_stats.packets; + +		do { +			start = u64_stats_fetch_begin_bh(&ring->rx_syncp); +			_bytes = ring->rx_stats.bytes; +			_packets = ring->rx_stats.packets; +		} while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start)); +		bytes += _bytes; +		packets += _packets;  	}  	net_stats->rx_bytes = bytes; @@ -4337,8 +4375,13 @@ void igb_update_stats(struct igb_adapter *adapter)  	packets = 0;  	for (i = 0; i < adapter->num_tx_queues; i++) {  		struct igb_ring *ring = adapter->tx_ring[i]; -		bytes += ring->tx_stats.bytes; -		packets += ring->tx_stats.packets; +		do { +			start = u64_stats_fetch_begin_bh(&ring->tx_syncp); +			_bytes = ring->tx_stats.bytes; +			_packets = ring->tx_stats.packets; +		} while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start)); +		bytes += _bytes; +		packets += _packets;  	}  	net_stats->tx_bytes = bytes;  	net_stats->tx_packets = packets; @@ -4660,12 +4703,13 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)  	u32 vmolr = rd32(E1000_VMOLR(vf));  	struct vf_data_storage *vf_data = &adapter->vf_data[vf]; -	vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC | +	vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |  	                    IGB_VF_FLAG_MULTI_PROMISC);  	vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);  	if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {  		vmolr |= E1000_VMOLR_MPME; +		vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;  		*msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;  	} else {  		/* @@ -5319,7 +5363,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *bu  	u64 regval;  	/* if skb does not support hw timestamp or TX stamp not valid exit */ -	if (likely(!buffer_info->shtx.hardware) || +	if (likely(!(buffer_info->tx_flags & SKBTX_HW_TSTAMP)) ||  	    !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))  		return; @@ -5389,7 +5433,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)  		if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&  		    !(test_bit(__IGB_DOWN, &adapter->state))) {  			netif_wake_subqueue(netdev, tx_ring->queue_index); + +			u64_stats_update_begin(&tx_ring->tx_syncp);  			tx_ring->tx_stats.restart_queue++; +			u64_stats_update_end(&tx_ring->tx_syncp);  		}  	} @@ -5429,9 +5476,11 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)  	}  	tx_ring->total_bytes += total_bytes;  	tx_ring->total_packets += total_packets; +	u64_stats_update_begin(&tx_ring->tx_syncp);  	tx_ring->tx_stats.bytes += total_bytes;  	tx_ring->tx_stats.packets += total_packets; -	return (count < tx_ring->count); +	u64_stats_update_end(&tx_ring->tx_syncp); +	return count < tx_ring->count;  }  /** @@ -5456,7 +5505,7 @@ static void igb_receive_skb(struct igb_q_vector *q_vector,  static inline void igb_rx_checksum_adv(struct igb_ring *ring,  				       u32 status_err, struct sk_buff *skb)  { -	skb->ip_summed = CHECKSUM_NONE; +	skb_checksum_none_assert(skb);  	/* Ignore Checksum bit is set or checksum is disabled through ethtool */  	if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) || @@ -5472,9 +5521,11 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring,  		 * packets, (aka let the stack check the crc32c)  		 */  		if ((skb->len == 60) && -		    (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) +		    (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) { +			u64_stats_update_begin(&ring->rx_syncp);  			ring->rx_stats.csum_err++; - +			u64_stats_update_end(&ring->rx_syncp); +		}  		/* let the stack verify checksum errors */  		return;  	} @@ -5500,7 +5551,7 @@ static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,  	 * values must belong to this one here and therefore we don't need to  	 * compare any of the additional attributes stored for it.  	 * -	 * If nothing went wrong, then it should have a skb_shared_tx that we +	 * If nothing went wrong, then it should have a shared tx_flags that we  	 * can turn into a skb_shared_hwtstamps.  	 */  	if (staterr & E1000_RXDADV_STAT_TSIP) { @@ -5661,8 +5712,10 @@ next_desc:  	rx_ring->total_packets += total_packets;  	rx_ring->total_bytes += total_bytes; +	u64_stats_update_begin(&rx_ring->rx_syncp);  	rx_ring->rx_stats.packets += total_packets;  	rx_ring->rx_stats.bytes += total_bytes; +	u64_stats_update_end(&rx_ring->rx_syncp);  	return cleaned;  } @@ -5690,8 +5743,10 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)  		if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {  			if (!buffer_info->page) {  				buffer_info->page = netdev_alloc_page(netdev); -				if (!buffer_info->page) { +				if (unlikely(!buffer_info->page)) { +					u64_stats_update_begin(&rx_ring->rx_syncp);  					rx_ring->rx_stats.alloc_failed++; +					u64_stats_update_end(&rx_ring->rx_syncp);  					goto no_buffers;  				}  				buffer_info->page_offset = 0; @@ -5706,7 +5761,9 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)  			if (dma_mapping_error(rx_ring->dev,  					      buffer_info->page_dma)) {  				buffer_info->page_dma = 0; +				u64_stats_update_begin(&rx_ring->rx_syncp);  				rx_ring->rx_stats.alloc_failed++; +				u64_stats_update_end(&rx_ring->rx_syncp);  				goto no_buffers;  			}  		} @@ -5714,8 +5771,10 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)  		skb = buffer_info->skb;  		if (!skb) {  			skb = netdev_alloc_skb_ip_align(netdev, bufsz); -			if (!skb) { +			if (unlikely(!skb)) { +				u64_stats_update_begin(&rx_ring->rx_syncp);  				rx_ring->rx_stats.alloc_failed++; +				u64_stats_update_end(&rx_ring->rx_syncp);  				goto no_buffers;  			} @@ -5729,7 +5788,9 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)  			if (dma_mapping_error(rx_ring->dev,  					      buffer_info->dma)) {  				buffer_info->dma = 0; +				u64_stats_update_begin(&rx_ring->rx_syncp);  				rx_ring->rx_stats.alloc_failed++; +				u64_stats_update_end(&rx_ring->rx_syncp);  				goto no_buffers;  			}  		} @@ -6092,7 +6153,7 @@ static void igb_restore_vlan(struct igb_adapter *adapter)  	if (adapter->vlgrp) {  		u16 vid; -		for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { +		for (vid = 0; vid < VLAN_N_VID; vid++) {  			if (!vlan_group_get_device(adapter->vlgrp, vid))  				continue;  			igb_vlan_rx_add_vid(adapter->netdev, vid); @@ -6107,6 +6168,13 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)  	mac->autoneg = 0; +	/* Fiber NIC's only allow 1000 Gbps Full duplex */ +	if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) && +		spddplx != (SPEED_1000 + DUPLEX_FULL)) { +		dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); +		return -EINVAL; +	} +  	switch (spddplx) {  	case SPEED_10 + DUPLEX_HALF:  		mac->forced_speed_duplex = ADVERTISE_10_HALF;  |