diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe_main.c')
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 410 | 
1 files changed, 237 insertions, 173 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index b3e3294cfe5..68478d6dfa2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1,7 +1,7 @@  /*******************************************************************************    Intel 10 Gigabit PCI Express Linux driver -  Copyright(c) 1999 - 2012 Intel Corporation. +  Copyright(c) 1999 - 2013 Intel Corporation.    This program is free software; you can redistribute it and/or modify it    under the terms and conditions of the GNU General Public License, @@ -66,7 +66,7 @@ static char ixgbe_default_device_descr[] =  #define DRV_VERSION "3.11.33-k"  const char ixgbe_driver_version[] = DRV_VERSION;  static const char ixgbe_copyright[] = -				"Copyright (c) 1999-2012 Intel Corporation."; +				"Copyright (c) 1999-2013 Intel Corporation.";  static const struct ixgbe_info *ixgbe_info_tbl[] = {  	[board_82598] = &ixgbe_82598_info, @@ -803,6 +803,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)  	/* Do the reset outside of interrupt context */  	if (!test_bit(__IXGBE_DOWN, &adapter->state)) {  		adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; +		e_warn(drv, "initiating reset due to tx timeout\n");  		ixgbe_service_event_schedule(adapter);  	}  } @@ -837,7 +838,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,  			break;  		/* prevent any other reads prior to eop_desc */ -		rmb(); +		read_barrier_depends();  		/* if DD is not set pending work has not been completed */  		if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) @@ -850,9 +851,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,  		total_bytes += tx_buffer->bytecount;  		total_packets += tx_buffer->gso_segs; -		if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP)) -			ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb); -  		/* free the skb */  		dev_kfree_skb_any(tx_buffer->skb); @@ -1442,7 +1440,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,  	ixgbe_rx_checksum(rx_ring, rx_desc, skb); -	ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb); +	ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);  	if ((dev->features & NETIF_F_HW_VLAN_RX) &&  	    ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { @@ -2181,10 +2179,10 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)  			return;  		if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) { -			u32 autoneg; +			u32 speed;  			bool link_up = false; -			hw->mac.ops.check_link(hw, &autoneg, &link_up, false); +			hw->mac.ops.check_link(hw, &speed, &link_up, false);  			if (link_up)  				return; @@ -2788,13 +2786,19 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,  	/*  	 * set WTHRESH to encourage burst writeback, it should not be set -	 * higher than 1 when ITR is 0 as it could cause false TX hangs +	 * higher than 1 when: +	 * - ITR is 0 as it could cause false TX hangs +	 * - ITR is set to > 100k int/sec and BQL is enabled  	 *  	 * In order to avoid issues WTHRESH + PTHRESH should always be equal  	 * to or less than the number of on chip descriptors, which is  	 * currently 40.  	 */ +#if IS_ENABLED(CONFIG_BQL) +	if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR)) +#else  	if (!ring->q_vector || (ring->q_vector->itr < 8)) +#endif  		txdctl |= (1 << 16);	/* WTHRESH = 1 */  	else  		txdctl |= (8 << 16);	/* WTHRESH = 8 */ @@ -2815,6 +2819,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,  		ring->atr_sample_rate = 0;  	} +	/* initialize XPS */ +	if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) { +		struct ixgbe_q_vector *q_vector = ring->q_vector; + +		if (q_vector) +			netif_set_xps_queue(adapter->netdev, +					    &q_vector->affinity_mask, +					    ring->queue_index); +	} +  	clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);  	/* enable queue */ @@ -3997,25 +4011,25 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)   **/  static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)  { -	u32 autoneg; -	bool negotiation, link_up = false; +	u32 speed; +	bool autoneg, link_up = false;  	u32 ret = IXGBE_ERR_LINK_SETUP;  	if (hw->mac.ops.check_link) -		ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false); +		ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);  	if (ret)  		goto link_cfg_out; -	autoneg = hw->phy.autoneg_advertised; -	if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) -		ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, -							&negotiation); +	speed = hw->phy.autoneg_advertised; +	if ((!speed) && (hw->mac.ops.get_link_capabilities)) +		ret = hw->mac.ops.get_link_capabilities(hw, &speed, +							&autoneg);  	if (ret)  		goto link_cfg_out;  	if (hw->mac.ops.setup_link) -		ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up); +		ret = hw->mac.ops.setup_link(hw, speed, link_up);  link_cfg_out:  	return ret;  } @@ -4467,7 +4481,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)  {  	struct ixgbe_hw *hw = &adapter->hw;  	struct pci_dev *pdev = adapter->pdev; -	unsigned int rss; +	unsigned int rss, fdir;  	u32 fwsm;  #ifdef CONFIG_IXGBE_DCB  	int j; @@ -4482,38 +4496,57 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)  	hw->subsystem_vendor_id = pdev->subsystem_vendor;  	hw->subsystem_device_id = pdev->subsystem_device; -	/* Set capability flags */ +	/* Set common capability flags and settings */  	rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());  	adapter->ring_feature[RING_F_RSS].limit = rss; +	adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; +	adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; +	adapter->max_q_vectors = MAX_Q_VECTORS_82599; +	adapter->atr_sample_rate = 20; +	fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus()); +	adapter->ring_feature[RING_F_FDIR].limit = fdir; +	adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; +#ifdef CONFIG_IXGBE_DCA +	adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; +#endif +#ifdef IXGBE_FCOE +	adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; +	adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; +#ifdef CONFIG_IXGBE_DCB +	/* Default traffic class to use for FCoE */ +	adapter->fcoe.up = IXGBE_FCOE_DEFTC; +#endif /* CONFIG_IXGBE_DCB */ +#endif /* IXGBE_FCOE */ + +	/* Set MAC specific capability flags and exceptions */  	switch (hw->mac.type) {  	case ixgbe_mac_82598EB: +		adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; +		adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; +  		if (hw->device_id == IXGBE_DEV_ID_82598AT)  			adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; +  		adapter->max_q_vectors = MAX_Q_VECTORS_82598; +		adapter->ring_feature[RING_F_FDIR].limit = 0; +		adapter->atr_sample_rate = 0; +		adapter->fdir_pballoc = 0; +#ifdef IXGBE_FCOE +		adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; +		adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; +#ifdef CONFIG_IXGBE_DCB +		adapter->fcoe.up = 0; +#endif /* IXGBE_DCB */ +#endif /* IXGBE_FCOE */ +		break; +	case ixgbe_mac_82599EB: +		if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) +			adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;  		break;  	case ixgbe_mac_X540:  		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);  		if (fwsm & IXGBE_FWSM_TS_ENABLED)  			adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; -	case ixgbe_mac_82599EB: -		adapter->max_q_vectors = MAX_Q_VECTORS_82599; -		adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; -		adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; -		if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) -			adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; -		/* Flow Director hash filters enabled */ -		adapter->atr_sample_rate = 20; -		adapter->ring_feature[RING_F_FDIR].limit = -							 IXGBE_MAX_FDIR_INDICES; -		adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; -#ifdef IXGBE_FCOE -		adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; -		adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; -#ifdef CONFIG_IXGBE_DCB -		/* Default traffic class to use for FCoE */ -		adapter->fcoe.up = IXGBE_FCOE_DEFTC; -#endif -#endif /* IXGBE_FCOE */  		break;  	default:  		break; @@ -4872,7 +4905,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)  	 */  	if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&  	    (adapter->hw.mac.type == ixgbe_mac_82599EB) && -	    (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) +	    (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))  		e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");  	e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); @@ -5535,6 +5568,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)  		break;  	} +	adapter->last_rx_ptp_check = jiffies; +  	if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)  		ixgbe_ptp_start_cyclecounter(adapter); @@ -5615,6 +5650,7 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)  			 * to get done, so reset controller to flush Tx.  			 * (Do the reset outside of interrupt context).  			 */ +			e_warn(drv, "initiating reset to clear Tx work after link loss\n");  			adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;  		}  	} @@ -5679,6 +5715,10 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)  	    !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))  		return; +	/* concurent i2c reads are not supported */ +	if (test_bit(__IXGBE_READ_I2C, &adapter->state)) +		return; +  	/* someone else is in init, wait until next service event */  	if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))  		return; @@ -5739,8 +5779,8 @@ sfp_out:  static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)  {  	struct ixgbe_hw *hw = &adapter->hw; -	u32 autoneg; -	bool negotiation; +	u32 speed; +	bool autoneg = false;  	if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))  		return; @@ -5751,11 +5791,11 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)  	adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; -	autoneg = hw->phy.autoneg_advertised; -	if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) -		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); +	speed = hw->phy.autoneg_advertised; +	if ((!speed) && (hw->mac.ops.get_link_capabilities)) +		hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);  	if (hw->mac.ops.setup_link) -		hw->mac.ops.setup_link(hw, autoneg, negotiation, true); +		hw->mac.ops.setup_link(hw, speed, true);  	adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;  	adapter->link_check_timeout = jiffies; @@ -5879,7 +5919,6 @@ static void ixgbe_service_task(struct work_struct *work)  	struct ixgbe_adapter *adapter = container_of(work,  						     struct ixgbe_adapter,  						     service_task); -  	ixgbe_reset_subtask(adapter);  	ixgbe_sfp_detection_subtask(adapter);  	ixgbe_sfp_link_config_subtask(adapter); @@ -5887,7 +5926,11 @@ static void ixgbe_service_task(struct work_struct *work)  	ixgbe_watchdog_subtask(adapter);  	ixgbe_fdir_reinit_subtask(adapter);  	ixgbe_check_hang_subtask(adapter); -	ixgbe_ptp_overflow_check(adapter); + +	if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) { +		ixgbe_ptp_overflow_check(adapter); +		ixgbe_ptp_rx_hang(adapter); +	}  	ixgbe_service_event_complete(adapter);  } @@ -5900,6 +5943,9 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,  	u32 vlan_macip_lens, type_tucmd;  	u32 mss_l4len_idx, l4len; +	if (skb->ip_summed != CHECKSUM_PARTIAL) +		return 0; +  	if (!skb_is_gso(skb))  		return 0; @@ -5942,10 +5988,9 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,  	first->gso_segs = skb_shinfo(skb)->gso_segs;  	first->bytecount += (first->gso_segs - 1) * *hdr_len; -	/* mss_l4len_id: use 1 as index for TSO */ +	/* mss_l4len_id: use 0 as index for TSO */  	mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;  	mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; -	mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;  	/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */  	vlan_macip_lens = skb_network_header_len(skb); @@ -5967,12 +6012,9 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,  	u32 type_tucmd = 0;  	if (skb->ip_summed != CHECKSUM_PARTIAL) { -		if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) { -			if (unlikely(skb->no_fcs)) -				first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS; -			if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW)) -				return; -		} +		if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && +		    !(first->tx_flags & IXGBE_TX_FLAGS_CC)) +			return;  	} else {  		u8 l4_hdr = 0;  		switch (first->protocol) { @@ -6030,30 +6072,32 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,  			  type_tucmd, mss_l4len_idx);  } -static __le32 ixgbe_tx_cmd_type(u32 tx_flags) +#define IXGBE_SET_FLAG(_input, _flag, _result) \ +	((_flag <= _result) ? \ +	 ((u32)(_input & _flag) * (_result / _flag)) : \ +	 ((u32)(_input & _flag) / (_flag / _result))) + +static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)  {  	/* set type for advanced descriptor with frame checksum insertion */ -	__le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA | -				      IXGBE_ADVTXD_DCMD_DEXT); +	u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA | +		       IXGBE_ADVTXD_DCMD_DEXT | +		       IXGBE_ADVTXD_DCMD_IFCS;  	/* set HW vlan bit if vlan is present */ -	if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN) -		cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); - -	if (tx_flags & IXGBE_TX_FLAGS_TSTAMP) -		cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP); +	cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN, +				   IXGBE_ADVTXD_DCMD_VLE);  	/* set segmentation enable bits for TSO/FSO */ -#ifdef IXGBE_FCOE -	if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO)) -#else -	if (tx_flags & IXGBE_TX_FLAGS_TSO) -#endif -		cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE); +	cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO, +				   IXGBE_ADVTXD_DCMD_TSE); + +	/* set timestamp bit if present */ +	cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP, +				   IXGBE_ADVTXD_MAC_TSTAMP);  	/* insert frame checksum */ -	if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS)) -		cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS); +	cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);  	return cmd_type;  } @@ -6061,36 +6105,27 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)  static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,  				   u32 tx_flags, unsigned int paylen)  { -	__le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT); +	u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;  	/* enable L4 checksum for TSO and TX checksum offload */ -	if (tx_flags & IXGBE_TX_FLAGS_CSUM) -		olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM); +	olinfo_status |= IXGBE_SET_FLAG(tx_flags, +					IXGBE_TX_FLAGS_CSUM, +					IXGBE_ADVTXD_POPTS_TXSM);  	/* enble IPv4 checksum for TSO */ -	if (tx_flags & IXGBE_TX_FLAGS_IPV4) -		olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); - -	/* use index 1 context for TSO/FSO/FCOE */ -#ifdef IXGBE_FCOE -	if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FCOE)) -#else -	if (tx_flags & IXGBE_TX_FLAGS_TSO) -#endif -		olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT); +	olinfo_status |= IXGBE_SET_FLAG(tx_flags, +					IXGBE_TX_FLAGS_IPV4, +					IXGBE_ADVTXD_POPTS_IXSM);  	/*  	 * Check Context must be set if Tx switch is enabled, which it  	 * always is for case where virtual functions are running  	 */ -#ifdef IXGBE_FCOE -	if (tx_flags & (IXGBE_TX_FLAGS_TXSW | IXGBE_TX_FLAGS_FCOE)) -#else -	if (tx_flags & IXGBE_TX_FLAGS_TXSW) -#endif -		olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC); +	olinfo_status |= IXGBE_SET_FLAG(tx_flags, +					IXGBE_TX_FLAGS_CC, +					IXGBE_ADVTXD_CC); -	tx_desc->read.olinfo_status = olinfo_status; +	tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);  }  #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ @@ -6100,22 +6135,22 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,  			 struct ixgbe_tx_buffer *first,  			 const u8 hdr_len)  { -	dma_addr_t dma;  	struct sk_buff *skb = first->skb;  	struct ixgbe_tx_buffer *tx_buffer;  	union ixgbe_adv_tx_desc *tx_desc; -	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; -	unsigned int data_len = skb->data_len; -	unsigned int size = skb_headlen(skb); -	unsigned int paylen = skb->len - hdr_len; +	struct skb_frag_struct *frag; +	dma_addr_t dma; +	unsigned int data_len, size;  	u32 tx_flags = first->tx_flags; -	__le32 cmd_type; +	u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);  	u16 i = tx_ring->next_to_use;  	tx_desc = IXGBE_TX_DESC(tx_ring, i); -	ixgbe_tx_olinfo_status(tx_desc, tx_flags, paylen); -	cmd_type = ixgbe_tx_cmd_type(tx_flags); +	ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); + +	size = skb_headlen(skb); +	data_len = skb->data_len;  #ifdef IXGBE_FCOE  	if (tx_flags & IXGBE_TX_FLAGS_FCOE) { @@ -6129,19 +6164,22 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,  #endif  	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); -	if (dma_mapping_error(tx_ring->dev, dma)) -		goto dma_error; -	/* record length, and DMA address */ -	dma_unmap_len_set(first, len, size); -	dma_unmap_addr_set(first, dma, dma); +	tx_buffer = first; -	tx_desc->read.buffer_addr = cpu_to_le64(dma); +	for (frag = &skb_shinfo(skb)->frags[0];; frag++) { +		if (dma_mapping_error(tx_ring->dev, dma)) +			goto dma_error; + +		/* record length, and DMA address */ +		dma_unmap_len_set(tx_buffer, len, size); +		dma_unmap_addr_set(tx_buffer, dma, dma); + +		tx_desc->read.buffer_addr = cpu_to_le64(dma); -	for (;;) {  		while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {  			tx_desc->read.cmd_type_len = -				cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); +				cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);  			i++;  			tx_desc++; @@ -6149,18 +6187,18 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,  				tx_desc = IXGBE_TX_DESC(tx_ring, 0);  				i = 0;  			} +			tx_desc->read.olinfo_status = 0;  			dma += IXGBE_MAX_DATA_PER_TXD;  			size -= IXGBE_MAX_DATA_PER_TXD;  			tx_desc->read.buffer_addr = cpu_to_le64(dma); -			tx_desc->read.olinfo_status = 0;  		}  		if (likely(!data_len))  			break; -		tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); +		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);  		i++;  		tx_desc++; @@ -6168,6 +6206,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,  			tx_desc = IXGBE_TX_DESC(tx_ring, 0);  			i = 0;  		} +		tx_desc->read.olinfo_status = 0;  #ifdef IXGBE_FCOE  		size = min_t(unsigned int, data_len, skb_frag_size(frag)); @@ -6178,22 +6217,13 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,  		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,  				       DMA_TO_DEVICE); -		if (dma_mapping_error(tx_ring->dev, dma)) -			goto dma_error;  		tx_buffer = &tx_ring->tx_buffer_info[i]; -		dma_unmap_len_set(tx_buffer, len, size); -		dma_unmap_addr_set(tx_buffer, dma, dma); - -		tx_desc->read.buffer_addr = cpu_to_le64(dma); -		tx_desc->read.olinfo_status = 0; - -		frag++;  	}  	/* write last descriptor with RS and EOP bits */ -	cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD); -	tx_desc->read.cmd_type_len = cmd_type; +	cmd_type |= size | IXGBE_TXD_CMD; +	tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);  	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); @@ -6354,38 +6384,40 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)  	return __ixgbe_maybe_stop_tx(tx_ring, size);  } +#ifdef IXGBE_FCOE  static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)  { -	struct ixgbe_adapter *adapter = netdev_priv(dev); -	int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : -					       smp_processor_id(); -#ifdef IXGBE_FCOE -	__be16 protocol = vlan_get_protocol(skb); +	struct ixgbe_adapter *adapter; +	struct ixgbe_ring_feature *f; +	int txq; -	if (((protocol == htons(ETH_P_FCOE)) || -	    (protocol == htons(ETH_P_FIP))) && -	    (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { -		struct ixgbe_ring_feature *f; +	/* +	 * only execute the code below if protocol is FCoE +	 * or FIP and we have FCoE enabled on the adapter +	 */ +	switch (vlan_get_protocol(skb)) { +	case __constant_htons(ETH_P_FCOE): +	case __constant_htons(ETH_P_FIP): +		adapter = netdev_priv(dev); -		f = &adapter->ring_feature[RING_F_FCOE]; +		if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) +			break; +	default: +		return __netdev_pick_tx(dev, skb); +	} -		while (txq >= f->indices) -			txq -= f->indices; -		txq += adapter->ring_feature[RING_F_FCOE].offset; +	f = &adapter->ring_feature[RING_F_FCOE]; -		return txq; -	} -#endif +	txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : +					   smp_processor_id(); -	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { -		while (unlikely(txq >= dev->real_num_tx_queues)) -			txq -= dev->real_num_tx_queues; -		return txq; -	} +	while (txq >= f->indices) +		txq -= f->indices; -	return skb_tx_hash(dev, skb); +	return txq + f->offset;  } +#endif  netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,  			  struct ixgbe_adapter *adapter,  			  struct ixgbe_ring *tx_ring) @@ -6446,6 +6478,11 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,  	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {  		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;  		tx_flags |= IXGBE_TX_FLAGS_TSTAMP; + +		/* schedule check for Tx timestamp */ +		adapter->ptp_tx_skb = skb_get(skb); +		adapter->ptp_tx_start = jiffies; +		schedule_work(&adapter->ptp_tx_work);  	}  #ifdef CONFIG_PCI_IOV @@ -6454,7 +6491,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,  	 * Tx switch had been disabled.  	 */  	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) -		tx_flags |= IXGBE_TX_FLAGS_TXSW; +		tx_flags |= IXGBE_TX_FLAGS_CC;  #endif  	/* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */ @@ -6785,6 +6822,7 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)  	}  } +#endif /* CONFIG_IXGBE_DCB */  /**   * ixgbe_setup_tc - configure net_device for multiple traffic classes   * @@ -6810,6 +6848,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)  		ixgbe_close(dev);  	ixgbe_clear_interrupt_scheme(adapter); +#ifdef CONFIG_IXGBE_DCB  	if (tc) {  		netdev_set_num_tc(dev, tc);  		ixgbe_set_prio_tc_map(adapter); @@ -6832,15 +6871,28 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)  		adapter->dcb_cfg.pfc_mode_enable = false;  	} -	ixgbe_init_interrupt_scheme(adapter);  	ixgbe_validate_rtr(adapter, tc); + +#endif /* CONFIG_IXGBE_DCB */ +	ixgbe_init_interrupt_scheme(adapter); +  	if (netif_running(dev)) -		ixgbe_open(dev); +		return ixgbe_open(dev);  	return 0;  } -#endif /* CONFIG_IXGBE_DCB */ +#ifdef CONFIG_PCI_IOV +void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) +{ +	struct net_device *netdev = adapter->netdev; + +	rtnl_lock(); +	ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev)); +	rtnl_unlock(); +} + +#endif  void ixgbe_do_reset(struct net_device *netdev)  {  	struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -6986,7 +7038,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],  	return err;  } -static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, +static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],  			     struct net_device *dev,  			     const unsigned char *addr)  { @@ -7063,7 +7115,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,  }  static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, -				    struct net_device *dev) +				    struct net_device *dev, +				    u32 filter_mask)  {  	struct ixgbe_adapter *adapter = netdev_priv(dev);  	u16 mode; @@ -7083,7 +7136,9 @@ static const struct net_device_ops ixgbe_netdev_ops = {  	.ndo_open		= ixgbe_open,  	.ndo_stop		= ixgbe_close,  	.ndo_start_xmit		= ixgbe_xmit_frame, +#ifdef IXGBE_FCOE  	.ndo_select_queue	= ixgbe_select_queue, +#endif  	.ndo_set_rx_mode	= ixgbe_set_rx_mode,  	.ndo_validate_addr	= eth_validate_addr,  	.ndo_set_mac_address	= ixgbe_set_mac, @@ -7195,9 +7250,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];  	static int cards_found;  	int i, err, pci_using_dac; +	unsigned int indices = MAX_TX_QUEUES;  	u8 part_str[IXGBE_PBANUM_LENGTH]; -	unsigned int indices = num_possible_cpus(); -	unsigned int dcb_max = 0;  #ifdef IXGBE_FCOE  	u16 device_caps;  #endif @@ -7246,25 +7300,15 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	pci_set_master(pdev);  	pci_save_state(pdev); +	if (ii->mac == ixgbe_mac_82598EB) {  #ifdef CONFIG_IXGBE_DCB -	if (ii->mac == ixgbe_mac_82598EB) -		dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS, -				IXGBE_MAX_RSS_INDICES); -	else -		dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS, -				IXGBE_MAX_FDIR_INDICES); +		/* 8 TC w/ 4 queues per TC */ +		indices = 4 * MAX_TRAFFIC_CLASS; +#else +		indices = IXGBE_MAX_RSS_INDICES;  #endif +	} -	if (ii->mac == ixgbe_mac_82598EB) -		indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES); -	else -		indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); - -#ifdef IXGBE_FCOE -	indices += min_t(unsigned int, num_possible_cpus(), -			 IXGBE_MAX_FCOE_INDICES); -#endif -	indices = max_t(unsigned int, dcb_max, indices);  	netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);  	if (!netdev) {  		err = -ENOMEM; @@ -7367,7 +7411,15 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	}  #ifdef CONFIG_PCI_IOV -	ixgbe_enable_sriov(adapter, ii); +	/* SR-IOV not supported on the 82598 */ +	if (adapter->hw.mac.type == ixgbe_mac_82598EB) +		goto skip_sriov; +	/* Mailbox */ +	ixgbe_init_mbx_params_pf(hw); +	memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops)); +	ixgbe_enable_sriov(adapter); +	pci_sriov_set_totalvfs(pdev, 63); +skip_sriov:  #endif  	netdev->features = NETIF_F_SG | @@ -7411,13 +7463,17 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  #ifdef IXGBE_FCOE  	if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { +		unsigned int fcoe_l; +  		if (hw->mac.ops.get_device_caps) {  			hw->mac.ops.get_device_caps(hw, &device_caps);  			if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)  				adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;  		} -		adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE; + +		fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus()); +		adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;  		netdev->features |= NETIF_F_FSO |  				    NETIF_F_FCOE_CRC; @@ -7445,9 +7501,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	}  	memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); -	memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); -	if (!is_valid_ether_addr(netdev->perm_addr)) { +	if (!is_valid_ether_addr(netdev->dev_addr)) {  		e_dev_err("invalid MAC address\n");  		err = -EIO;  		goto err_sw_init; @@ -7624,8 +7679,14 @@ static void ixgbe_remove(struct pci_dev *pdev)  	if (netdev->reg_state == NETREG_REGISTERED)  		unregister_netdev(netdev); -	ixgbe_disable_sriov(adapter); - +#ifdef CONFIG_PCI_IOV +	/* +	 * Only disable SR-IOV on unload if the user specified the now +	 * deprecated max_vfs module parameter. +	 */ +	if (max_vfs) +		ixgbe_disable_sriov(adapter); +#endif  	ixgbe_clear_interrupt_scheme(adapter);  	ixgbe_release_hw_control(adapter); @@ -7730,6 +7791,8 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,  		if (vfdev) {  			e_dev_err("Issuing VFLR to VF %d\n", vf);  			pci_write_config_dword(vfdev, 0xA8, 0x00008000); +			/* Free device reference count */ +			pci_dev_put(vfdev);  		}  		pci_cleanup_aer_uncorrect_error_status(pdev); @@ -7839,6 +7902,7 @@ static struct pci_driver ixgbe_driver = {  	.resume   = ixgbe_resume,  #endif  	.shutdown = ixgbe_shutdown, +	.sriov_configure = ixgbe_pci_sriov_configure,  	.err_handler = &ixgbe_err_handler  };  |