diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe')
20 files changed, 1692 insertions, 1232 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index 0bdf06bc5c4..5fd5d04c26c 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -34,11 +34,11 @@ obj-$(CONFIG_IXGBE) += ixgbe.o  ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \                ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ -              ixgbe_mbx.o ixgbe_x540.o ixgbe_sysfs.o ixgbe_lib.o +              ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o  ixgbe-$(CONFIG_IXGBE_DCB) +=  ixgbe_dcb.o ixgbe_dcb_82598.o \                                ixgbe_dcb_82599.o ixgbe_dcb_nl.o  ixgbe-$(CONFIG_IXGBE_PTP) += ixgbe_ptp.o - +ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o  ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 7af291e236b..b9623e9ea89 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -77,17 +77,18 @@  #define IXGBE_MAX_FCPAUSE		 0xFFFF  /* Supported Rx Buffer Sizes */ -#define IXGBE_RXBUFFER_512   512    /* Used for packet split */ +#define IXGBE_RXBUFFER_256    256  /* Used for skb receive header */  #define IXGBE_MAX_RXBUFFER  16384  /* largest size for a single descriptor */  /* - * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we - * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, - * this adds up to 512 bytes of extra data meaning the smallest allocation - * we could have is 1K. - * i.e. RXBUFFER_512 --> size-1024 slab + * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we + * reserve 64 more, and skb_shared_info adds an additional 320 bytes more, + * this adds up to 448 bytes of extra data. + * + * Since netdev_alloc_skb now allocates a page fragment we can use a value + * of 256 and the resultant skb will have a truesize of 960 or less.   */ -#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512 +#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256  #define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) @@ -113,7 +114,7 @@  #define IXGBE_MAX_VFTA_ENTRIES          128  #define MAX_EMULATION_MAC_ADDRS         16  #define IXGBE_MAX_PF_MACVLANS           15 -#define VMDQ_P(p)   ((p) + adapter->num_vfs) +#define VMDQ_P(p)   ((p) + adapter->ring_feature[RING_F_VMDQ].offset)  #define IXGBE_82599_VF_DEVICE_ID        0x10ED  #define IXGBE_X540_VF_DEVICE_ID         0x1515 @@ -130,7 +131,6 @@ struct vf_data_storage {  	u16 tx_rate;  	u16 vlan_count;  	u8 spoofchk_enabled; -	struct pci_dev *vfdev;  };  struct vf_macvlans { @@ -278,10 +278,16 @@ enum ixgbe_ring_f_enum {  #define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES  #endif /* IXGBE_FCOE */  struct ixgbe_ring_feature { -	int indices; -	int mask; +	u16 limit;	/* upper limit on feature indices */ +	u16 indices;	/* current value of indices */ +	u16 mask;	/* Mask used for feature to ring mapping */ +	u16 offset;	/* offset to start of feature */  } ____cacheline_internodealigned_in_smp; +#define IXGBE_82599_VMDQ_8Q_MASK 0x78 +#define IXGBE_82599_VMDQ_4Q_MASK 0x7C +#define IXGBE_82599_VMDQ_2Q_MASK 0x7E +  /*   * FCoE requires that all Rx buffers be over 2200 bytes in length.  Since   * this is twice the size of a half page we need to double the page order @@ -315,7 +321,7 @@ struct ixgbe_ring_container {                                ? 8 : 1)  #define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS -/* MAX_MSIX_Q_VECTORS of these are allocated, +/* MAX_Q_VECTORS of these are allocated,   * but we only use one per queue-specific vector.   */  struct ixgbe_q_vector { @@ -401,11 +407,11 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)  #define NON_Q_VECTORS (OTHER_VECTOR)  #define MAX_MSIX_VECTORS_82599 64 -#define MAX_MSIX_Q_VECTORS_82599 64 +#define MAX_Q_VECTORS_82599 64  #define MAX_MSIX_VECTORS_82598 18 -#define MAX_MSIX_Q_VECTORS_82598 16 +#define MAX_Q_VECTORS_82598 16 -#define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82599 +#define MAX_Q_VECTORS MAX_Q_VECTORS_82599  #define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599  #define MIN_MSIX_Q_VECTORS 1 @@ -427,35 +433,33 @@ struct ixgbe_adapter {  	 * thus the additional *_CAPABLE flags.  	 */  	u32 flags; -#define IXGBE_FLAG_MSI_CAPABLE                  (u32)(1 << 1) -#define IXGBE_FLAG_MSI_ENABLED                  (u32)(1 << 2) -#define IXGBE_FLAG_MSIX_CAPABLE                 (u32)(1 << 3) -#define IXGBE_FLAG_MSIX_ENABLED                 (u32)(1 << 4) -#define IXGBE_FLAG_RX_1BUF_CAPABLE              (u32)(1 << 6) -#define IXGBE_FLAG_RX_PS_CAPABLE                (u32)(1 << 7) -#define IXGBE_FLAG_RX_PS_ENABLED                (u32)(1 << 8) -#define IXGBE_FLAG_IN_NETPOLL                   (u32)(1 << 9) -#define IXGBE_FLAG_DCA_ENABLED                  (u32)(1 << 10) -#define IXGBE_FLAG_DCA_CAPABLE                  (u32)(1 << 11) -#define IXGBE_FLAG_IMIR_ENABLED                 (u32)(1 << 12) -#define IXGBE_FLAG_MQ_CAPABLE                   (u32)(1 << 13) -#define IXGBE_FLAG_DCB_ENABLED                  (u32)(1 << 14) -#define IXGBE_FLAG_RSS_ENABLED                  (u32)(1 << 16) -#define IXGBE_FLAG_RSS_CAPABLE                  (u32)(1 << 17) -#define IXGBE_FLAG_VMDQ_CAPABLE                 (u32)(1 << 18) -#define IXGBE_FLAG_VMDQ_ENABLED                 (u32)(1 << 19) -#define IXGBE_FLAG_FAN_FAIL_CAPABLE             (u32)(1 << 20) -#define IXGBE_FLAG_NEED_LINK_UPDATE             (u32)(1 << 22) -#define IXGBE_FLAG_NEED_LINK_CONFIG             (u32)(1 << 23) -#define IXGBE_FLAG_FDIR_HASH_CAPABLE            (u32)(1 << 24) -#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE         (u32)(1 << 25) -#define IXGBE_FLAG_FCOE_CAPABLE                 (u32)(1 << 26) -#define IXGBE_FLAG_FCOE_ENABLED                 (u32)(1 << 27) -#define IXGBE_FLAG_SRIOV_CAPABLE                (u32)(1 << 28) -#define IXGBE_FLAG_SRIOV_ENABLED                (u32)(1 << 29) +#define IXGBE_FLAG_MSI_CAPABLE                  (u32)(1 << 0) +#define IXGBE_FLAG_MSI_ENABLED                  (u32)(1 << 1) +#define IXGBE_FLAG_MSIX_CAPABLE                 (u32)(1 << 2) +#define IXGBE_FLAG_MSIX_ENABLED                 (u32)(1 << 3) +#define IXGBE_FLAG_RX_1BUF_CAPABLE              (u32)(1 << 4) +#define IXGBE_FLAG_RX_PS_CAPABLE                (u32)(1 << 5) +#define IXGBE_FLAG_RX_PS_ENABLED                (u32)(1 << 6) +#define IXGBE_FLAG_IN_NETPOLL                   (u32)(1 << 7) +#define IXGBE_FLAG_DCA_ENABLED                  (u32)(1 << 8) +#define IXGBE_FLAG_DCA_CAPABLE                  (u32)(1 << 9) +#define IXGBE_FLAG_IMIR_ENABLED                 (u32)(1 << 10) +#define IXGBE_FLAG_MQ_CAPABLE                   (u32)(1 << 11) +#define IXGBE_FLAG_DCB_ENABLED                  (u32)(1 << 12) +#define IXGBE_FLAG_VMDQ_CAPABLE                 (u32)(1 << 13) +#define IXGBE_FLAG_VMDQ_ENABLED                 (u32)(1 << 14) +#define IXGBE_FLAG_FAN_FAIL_CAPABLE             (u32)(1 << 15) +#define IXGBE_FLAG_NEED_LINK_UPDATE             (u32)(1 << 16) +#define IXGBE_FLAG_NEED_LINK_CONFIG             (u32)(1 << 17) +#define IXGBE_FLAG_FDIR_HASH_CAPABLE            (u32)(1 << 18) +#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE         (u32)(1 << 19) +#define IXGBE_FLAG_FCOE_CAPABLE                 (u32)(1 << 20) +#define IXGBE_FLAG_FCOE_ENABLED                 (u32)(1 << 21) +#define IXGBE_FLAG_SRIOV_CAPABLE                (u32)(1 << 22) +#define IXGBE_FLAG_SRIOV_ENABLED                (u32)(1 << 23)  	u32 flags2; -#define IXGBE_FLAG2_RSC_CAPABLE                 (u32)(1) +#define IXGBE_FLAG2_RSC_CAPABLE                 (u32)(1 << 0)  #define IXGBE_FLAG2_RSC_ENABLED                 (u32)(1 << 1)  #define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE         (u32)(1 << 2)  #define IXGBE_FLAG2_TEMP_SENSOR_EVENT           (u32)(1 << 3) @@ -496,7 +500,7 @@ struct ixgbe_adapter {  	u32 alloc_rx_page_failed;  	u32 alloc_rx_buff_failed; -	struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; +	struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS];  	/* DCB parameters */  	struct ieee_pfc *ixgbe_ieee_pfc; @@ -507,8 +511,8 @@ struct ixgbe_adapter {  	u8 dcbx_cap;  	enum ixgbe_fc_mode last_lfc_mode; -	int num_msix_vectors; -	int max_msix_q_vectors;         /* true count of q_vectors for device */ +	int num_q_vectors;	/* current number of q_vectors for device */ +	int max_q_vectors;	/* true count of q_vectors for device */  	struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];  	struct msix_entry *msix_entries; @@ -561,6 +565,7 @@ struct ixgbe_adapter {  	spinlock_t tmreg_lock;  	struct cyclecounter cc;  	struct timecounter tc; +	int rx_hwtstamp_filter;  	u32 base_incval;  	u32 cycle_speed;  #endif /* CONFIG_IXGBE_PTP */ @@ -686,7 +691,6 @@ extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);  extern int ixgbe_fso(struct ixgbe_ring *tx_ring,  		     struct ixgbe_tx_buffer *first,  		     u8 *hdr_len); -extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);  extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,  			  union ixgbe_adv_rx_desc *rx_desc,  			  struct sk_buff *skb); @@ -695,6 +699,8 @@ extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,  extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,  				 struct scatterlist *sgl, unsigned int sgc);  extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); +extern int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter); +extern void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);  extern int ixgbe_fcoe_enable(struct net_device *netdev);  extern int ixgbe_fcoe_disable(struct net_device *netdev);  #ifdef CONFIG_IXGBE_DCB @@ -704,6 +710,7 @@ extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);  extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);  extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,  				  struct netdev_fcoe_hbainfo *info); +extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);  #endif /* IXGBE_FCOE */  static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) @@ -718,6 +725,7 @@ extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);  extern void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,  				  struct sk_buff *skb);  extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, +				  union ixgbe_adv_rx_desc *rx_desc,  				  struct sk_buff *skb);  extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,  				    struct ifreq *ifr, int cmd); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index dee64d2703f..50fc137501d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -241,7 +241,9 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,  	/* Determine 1G link capabilities off of SFP+ type */  	if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || -	    hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) { +	    hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || +	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || +	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {  		*speed = IXGBE_LINK_SPEED_1GB_FULL;  		*negotiation = true;  		goto out; @@ -1023,6 +1025,9 @@ mac_reset_top:  		hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,  		                    hw->mac.san_addr, 0, IXGBE_RAH_AV); +		/* Save the SAN MAC RAR index */ +		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; +  		/* Reserve the last RAR for the SAN MAC address */  		hw->mac.num_rar_entries--;  	} @@ -2104,6 +2109,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {  	.set_rar                = &ixgbe_set_rar_generic,  	.clear_rar              = &ixgbe_clear_rar_generic,  	.set_vmdq               = &ixgbe_set_vmdq_generic, +	.set_vmdq_san_mac	= &ixgbe_set_vmdq_san_mac_generic,  	.clear_vmdq             = &ixgbe_clear_vmdq_generic,  	.init_rx_addrs          = &ixgbe_init_rx_addrs_generic,  	.update_mc_addr_list    = &ixgbe_update_mc_addr_list_generic, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 77ac41feb0f..90e41db3cb6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -2848,6 +2848,31 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)  }  /** + *  This function should only be involved in the IOV mode. + *  In IOV mode, Default pool is next pool after the number of + *  VFs advertized and not 0. + *  MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] + * + *  ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address + *  @hw: pointer to hardware struct + *  @vmdq: VMDq pool index + **/ +s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) +{ +	u32 rar = hw->mac.san_mac_rar_index; + +	if (vmdq < 32) { +		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq); +		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); +	} else { +		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); +		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32)); +	} + +	return 0; +} + +/**   *  ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array   *  @hw: pointer to hardware structure   **/ @@ -3132,7 +3157,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,  }  /** - *  ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from + *  ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from   *  the EEPROM   *  @hw: pointer to hardware structure   *  @wwnn_prefix: the alternative WWNN prefix @@ -3200,20 +3225,22 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)  	 * PFVFSPOOF register array is size 8 with 8 bits assigned to  	 * MAC anti-spoof enables in each register array element.  	 */ -	for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) +	for (j = 0; j < pf_target_reg; j++)  		IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); -	/* If not enabling anti-spoofing then done */ -	if (!enable) -		return; -  	/*  	 * The PF should be allowed to spoof so that it can support -	 * emulation mode NICs.  Reset the bit assigned to the PF +	 * emulation mode NICs.  Do not set the bits assigned to the PF  	 */ -	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg)); -	pfvfspoof ^= (1 << pf_target_shift); -	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof); +	pfvfspoof &= (1 << pf_target_shift) - 1; +	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); + +	/* +	 * Remaining pools belong to the PF so they do not need to have +	 * anti-spoofing enabled. +	 */ +	for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) +		IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);  }  /** @@ -3325,6 +3352,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw,   *  ixgbe_calculate_checksum - Calculate checksum for buffer   *  @buffer: pointer to EEPROM   *  @length: size of EEPROM to calculate a checksum for + *   *  Calculates the checksum for some buffer on a specified length.  The   *  checksum calculated is returned.   **/ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 6222fdb3d3f..d813d1188c3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -85,6 +85,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);  void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);  s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);  s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);  s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);  s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);  s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index 8bfaaee5ac5..9bc17c0cb97 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -180,67 +180,83 @@ out:  void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)  { -	int i; +	struct tc_configuration *tc_config = &cfg->tc_config[0]; +	int tc; -	*pfc_en = 0; -	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) -		*pfc_en |= !!(cfg->tc_config[i].dcb_pfc & 0xF) << i; +	for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) { +		if (tc_config[tc].dcb_pfc != pfc_disabled) +			*pfc_en |= 1 << tc; +	}  }  void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction,  			     u16 *refill)  { -	struct tc_bw_alloc *p; -	int i; +	struct tc_configuration *tc_config = &cfg->tc_config[0]; +	int tc; -	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { -		p = &cfg->tc_config[i].path[direction]; -		refill[i] = p->data_credits_refill; -	} +	for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) +		refill[tc] = tc_config[tc].path[direction].data_credits_refill;  }  void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max)  { -	int i; +	struct tc_configuration *tc_config = &cfg->tc_config[0]; +	int tc; -	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) -		max[i] = cfg->tc_config[i].desc_credits_max; +	for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) +		max[tc] = tc_config[tc].desc_credits_max;  }  void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction,  			    u8 *bwgid)  { -	struct tc_bw_alloc *p; -	int i; +	struct tc_configuration *tc_config = &cfg->tc_config[0]; +	int tc; -	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { -		p = &cfg->tc_config[i].path[direction]; -		bwgid[i] = p->bwg_id; -	} +	for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) +		bwgid[tc] = tc_config[tc].path[direction].bwg_id;  }  void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,  			    u8 *ptype)  { -	struct tc_bw_alloc *p; -	int i; +	struct tc_configuration *tc_config = &cfg->tc_config[0]; +	int tc; -	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { -		p = &cfg->tc_config[i].path[direction]; -		ptype[i] = p->prio_type; +	for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) +		ptype[tc] = tc_config[tc].path[direction].prio_type; +} + +u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up) +{ +	struct tc_configuration *tc_config = &cfg->tc_config[0]; +	u8 prio_mask = 1 << up; +	u8 tc = cfg->num_tcs.pg_tcs; + +	/* If tc is 0 then DCB is likely not enabled or supported */ +	if (!tc) +		goto out; + +	/* +	 * Test from maximum TC to 1 and report the first match we find.  If +	 * we find no match we can assume that the TC is 0 since the TC must +	 * be set for all user priorities +	 */ +	for (tc--; tc; tc--) { +		if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap) +			break;  	} +out: +	return tc;  }  void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)  { -	int i, up; -	unsigned long bitmap; +	u8 up; -	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { -		bitmap = cfg->tc_config[i].path[direction].up_to_tc_bitmap; -		for_each_set_bit(up, &bitmap, MAX_USER_PRIORITY) -			map[up] = i; -	} +	for (up = 0; up < MAX_USER_PRIORITY; up++) +		map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up);  }  /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h index 24333b71816..1f4108ee154 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h @@ -146,6 +146,7 @@ void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *);  void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *);  void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *);  void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *); +u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8);  /* DCB credits calculation */  s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index 5164a21b13c..f1e002d5fa8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -151,34 +151,21 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)  static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)  { -	int err = 0; -	u8 prio_tc[MAX_USER_PRIORITY] = {0}; -	int i;  	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	int err = 0;  	/* Fail command if not in CEE mode */  	if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))  		return 1;  	/* verify there is something to do, if not then exit */ -	if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) -		goto out; - -	if (state > 0) { -		err = ixgbe_setup_tc(netdev, adapter->dcb_cfg.num_tcs.pg_tcs); -		ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc); -	} else { -		err = ixgbe_setup_tc(netdev, 0); -	} - -	if (err) +	if (!state == !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))  		goto out; -	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) -		netdev_set_prio_tc_map(netdev, i, prio_tc[i]); - +	err = ixgbe_setup_tc(netdev, +			     state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0);  out: -	return err ? 1 : 0; +	return !!err;  }  static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, @@ -584,9 +571,6 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,  	if (err)  		goto err_out; -	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) -		netdev_set_prio_tc_map(dev, i, ets->prio_tc[i]); -  	err = ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);  err_out:  	return err; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 3178f1ec371..4104ea25d81 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -154,100 +154,60 @@ static int ixgbe_get_settings(struct net_device *netdev,  {  	struct ixgbe_adapter *adapter = netdev_priv(netdev);  	struct ixgbe_hw *hw = &adapter->hw; +	ixgbe_link_speed supported_link;  	u32 link_speed = 0; +	bool autoneg;  	bool link_up; -	ecmd->supported = SUPPORTED_10000baseT_Full; -	ecmd->autoneg = AUTONEG_ENABLE; -	ecmd->transceiver = XCVR_EXTERNAL; -	if ((hw->phy.media_type == ixgbe_media_type_copper) || -	    (hw->phy.multispeed_fiber)) { -		ecmd->supported |= (SUPPORTED_1000baseT_Full | -		                    SUPPORTED_Autoneg); - -		switch (hw->mac.type) { -		case ixgbe_mac_X540: -			ecmd->supported |= SUPPORTED_100baseT_Full; -			break; -		default: -			break; -		} +	hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); -		ecmd->advertising = ADVERTISED_Autoneg; -		if (hw->phy.autoneg_advertised) { -			if (hw->phy.autoneg_advertised & -			    IXGBE_LINK_SPEED_100_FULL) -				ecmd->advertising |= ADVERTISED_100baseT_Full; -			if (hw->phy.autoneg_advertised & -			    IXGBE_LINK_SPEED_10GB_FULL) -				ecmd->advertising |= ADVERTISED_10000baseT_Full; -			if (hw->phy.autoneg_advertised & -			    IXGBE_LINK_SPEED_1GB_FULL) -				ecmd->advertising |= ADVERTISED_1000baseT_Full; -		} else { -			/* -			 * Default advertised modes in case -			 * phy.autoneg_advertised isn't set. -			 */ -			ecmd->advertising |= (ADVERTISED_10000baseT_Full | -					      ADVERTISED_1000baseT_Full); -			if (hw->mac.type == ixgbe_mac_X540) -				ecmd->advertising |= ADVERTISED_100baseT_Full; -		} +	/* set the supported link speeds */ +	if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) +		ecmd->supported |= SUPPORTED_10000baseT_Full; +	if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) +		ecmd->supported |= SUPPORTED_1000baseT_Full; +	if (supported_link & IXGBE_LINK_SPEED_100_FULL) +		ecmd->supported |= SUPPORTED_100baseT_Full; -		if (hw->phy.media_type == ixgbe_media_type_copper) { -			ecmd->supported |= SUPPORTED_TP; -			ecmd->advertising |= ADVERTISED_TP; -			ecmd->port = PORT_TP; -		} else { -			ecmd->supported |= SUPPORTED_FIBRE; -			ecmd->advertising |= ADVERTISED_FIBRE; -			ecmd->port = PORT_FIBRE; -		} -	} else if (hw->phy.media_type == ixgbe_media_type_backplane) { -		/* Set as FIBRE until SERDES defined in kernel */ -		if (hw->device_id == IXGBE_DEV_ID_82598_BX) { -			ecmd->supported = (SUPPORTED_1000baseT_Full | -					   SUPPORTED_FIBRE); -			ecmd->advertising = (ADVERTISED_1000baseT_Full | -					     ADVERTISED_FIBRE); -			ecmd->port = PORT_FIBRE; -			ecmd->autoneg = AUTONEG_DISABLE; -		} else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) || -			   (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) { -			ecmd->supported |= (SUPPORTED_1000baseT_Full | -					    SUPPORTED_Autoneg | -					    SUPPORTED_FIBRE); -			ecmd->advertising = (ADVERTISED_10000baseT_Full | -					     ADVERTISED_1000baseT_Full | -					     ADVERTISED_Autoneg | -					     ADVERTISED_FIBRE); -			ecmd->port = PORT_FIBRE; -		} else { -			ecmd->supported |= (SUPPORTED_1000baseT_Full | -					    SUPPORTED_FIBRE); -			ecmd->advertising = (ADVERTISED_10000baseT_Full | -					     ADVERTISED_1000baseT_Full | -					     ADVERTISED_FIBRE); -			ecmd->port = PORT_FIBRE; -		} +	/* set the advertised speeds */ +	if (hw->phy.autoneg_advertised) { +		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) +			ecmd->advertising |= ADVERTISED_100baseT_Full; +		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) +			ecmd->advertising |= ADVERTISED_10000baseT_Full; +		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) +			ecmd->advertising |= ADVERTISED_1000baseT_Full;  	} else { -		ecmd->supported |= SUPPORTED_FIBRE; -		ecmd->advertising = (ADVERTISED_10000baseT_Full | -		                     ADVERTISED_FIBRE); -		ecmd->port = PORT_FIBRE; -		ecmd->autoneg = AUTONEG_DISABLE; +		/* default modes in case phy.autoneg_advertised isn't set */ +		if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) +			ecmd->advertising |= ADVERTISED_10000baseT_Full; +		if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) +			ecmd->advertising |= ADVERTISED_1000baseT_Full; +		if (supported_link & IXGBE_LINK_SPEED_100_FULL) +			ecmd->advertising |= ADVERTISED_100baseT_Full;  	} -	/* Get PHY type */ +	if (autoneg) { +		ecmd->supported |= SUPPORTED_Autoneg; +		ecmd->advertising |= ADVERTISED_Autoneg; +		ecmd->autoneg = AUTONEG_ENABLE; +	} else +		ecmd->autoneg = AUTONEG_DISABLE; + +	ecmd->transceiver = XCVR_EXTERNAL; + +	/* Determine the remaining settings based on the PHY type. */  	switch (adapter->hw.phy.type) {  	case ixgbe_phy_tn:  	case ixgbe_phy_aq:  	case ixgbe_phy_cu_unknown: -		/* Copper 10G-BASET */ +		ecmd->supported |= SUPPORTED_TP; +		ecmd->advertising |= ADVERTISED_TP;  		ecmd->port = PORT_TP;  		break;  	case ixgbe_phy_qt: +		ecmd->supported |= SUPPORTED_FIBRE; +		ecmd->advertising |= ADVERTISED_FIBRE;  		ecmd->port = PORT_FIBRE;  		break;  	case ixgbe_phy_nl: @@ -257,42 +217,59 @@ static int ixgbe_get_settings(struct net_device *netdev,  	case ixgbe_phy_sfp_avago:  	case ixgbe_phy_sfp_intel:  	case ixgbe_phy_sfp_unknown: -		switch (adapter->hw.phy.sfp_type) {  		/* SFP+ devices, further checking needed */ +		switch (adapter->hw.phy.sfp_type) {  		case ixgbe_sfp_type_da_cu:  		case ixgbe_sfp_type_da_cu_core0:  		case ixgbe_sfp_type_da_cu_core1: +			ecmd->supported |= SUPPORTED_FIBRE; +			ecmd->advertising |= ADVERTISED_FIBRE;  			ecmd->port = PORT_DA;  			break;  		case ixgbe_sfp_type_sr:  		case ixgbe_sfp_type_lr:  		case ixgbe_sfp_type_srlr_core0:  		case ixgbe_sfp_type_srlr_core1: +			ecmd->supported |= SUPPORTED_FIBRE; +			ecmd->advertising |= ADVERTISED_FIBRE;  			ecmd->port = PORT_FIBRE;  			break;  		case ixgbe_sfp_type_not_present: +			ecmd->supported |= SUPPORTED_FIBRE; +			ecmd->advertising |= ADVERTISED_FIBRE;  			ecmd->port = PORT_NONE;  			break;  		case ixgbe_sfp_type_1g_cu_core0:  		case ixgbe_sfp_type_1g_cu_core1: +			ecmd->supported |= SUPPORTED_TP; +			ecmd->advertising |= ADVERTISED_TP;  			ecmd->port = PORT_TP; -			ecmd->supported = SUPPORTED_TP; -			ecmd->advertising = (ADVERTISED_1000baseT_Full | -			                     ADVERTISED_TP); +			break; +		case ixgbe_sfp_type_1g_sx_core0: +		case ixgbe_sfp_type_1g_sx_core1: +			ecmd->supported |= SUPPORTED_FIBRE; +			ecmd->advertising |= ADVERTISED_FIBRE; +			ecmd->port = PORT_FIBRE;  			break;  		case ixgbe_sfp_type_unknown:  		default: +			ecmd->supported |= SUPPORTED_FIBRE; +			ecmd->advertising |= ADVERTISED_FIBRE;  			ecmd->port = PORT_OTHER;  			break;  		}  		break;  	case ixgbe_phy_xaui: +		ecmd->supported |= SUPPORTED_FIBRE; +		ecmd->advertising |= ADVERTISED_FIBRE;  		ecmd->port = PORT_NONE;  		break;  	case ixgbe_phy_unknown:  	case ixgbe_phy_generic:  	case ixgbe_phy_sfp_unsupported:  	default: +		ecmd->supported |= SUPPORTED_FIBRE; +		ecmd->advertising |= ADVERTISED_FIBRE;  		ecmd->port = PORT_OTHER;  		break;  	} @@ -2113,7 +2090,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev,  	struct ixgbe_adapter *adapter = netdev_priv(netdev);  	struct ixgbe_q_vector *q_vector;  	int i; -	int num_vectors;  	u16 tx_itr_param, rx_itr_param;  	bool need_reset = false; @@ -2149,12 +2125,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,  	/* check the old value and enable RSC if necessary */  	need_reset = ixgbe_update_rsc(adapter); -	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) -		num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; -	else -		num_vectors = 1; - -	for (i = 0; i < num_vectors; i++) { +	for (i = 0; i < adapter->num_q_vectors; i++) {  		q_vector = adapter->q_vector[i];  		if (q_vector->tx.count && !q_vector->rx.count)  			/* tx only */ @@ -2274,10 +2245,6 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,  {  	cmd->data = 0; -	/* if RSS is disabled then report no hashing */ -	if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) -		return 0; -  	/* Report default options for RSS on ixgbe */  	switch (cmd->flow_type) {  	case TCP_V4_FLOW: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index bc07933d67d..ae73ef14fdf 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -38,7 +38,7 @@  /**   * ixgbe_fcoe_clear_ddp - clear the given ddp context - * @ddp - ptr to the ixgbe_fcoe_ddp + * @ddp: ptr to the ixgbe_fcoe_ddp   *   * Returns : none   * @@ -104,10 +104,10 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)  			udelay(100);  	}  	if (ddp->sgl) -		pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc, +		dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc,  			     DMA_FROM_DEVICE);  	if (ddp->pool) { -		pci_pool_free(ddp->pool, ddp->udl, ddp->udp); +		dma_pool_free(ddp->pool, ddp->udl, ddp->udp);  		ddp->pool = NULL;  	} @@ -134,6 +134,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,  	struct ixgbe_hw *hw;  	struct ixgbe_fcoe *fcoe;  	struct ixgbe_fcoe_ddp *ddp; +	struct ixgbe_fcoe_ddp_pool *ddp_pool;  	struct scatterlist *sg;  	unsigned int i, j, dmacount;  	unsigned int len; @@ -144,8 +145,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,  	unsigned int thislen = 0;  	u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;  	dma_addr_t addr = 0; -	struct pci_pool *pool; -	unsigned int cpu;  	if (!netdev || !sgl)  		return 0; @@ -162,11 +161,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,  		return 0;  	fcoe = &adapter->fcoe; -	if (!fcoe->pool) { -		e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); -		return 0; -	} -  	ddp = &fcoe->ddp[xid];  	if (ddp->sgl) {  		e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", @@ -175,22 +169,32 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,  	}  	ixgbe_fcoe_clear_ddp(ddp); + +	if (!fcoe->ddp_pool) { +		e_warn(drv, "No ddp_pool resources allocated\n"); +		return 0; +	} + +	ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu()); +	if (!ddp_pool->pool) { +		e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); +		goto out_noddp; +	} +  	/* setup dma from scsi command sgl */ -	dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); +	dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);  	if (dmacount == 0) {  		e_err(drv, "xid 0x%x DMA map error\n", xid); -		return 0; +		goto out_noddp;  	}  	/* alloc the udl from per cpu ddp pool */ -	cpu = get_cpu(); -	pool = *per_cpu_ptr(fcoe->pool, cpu); -	ddp->udl = pci_pool_alloc(pool, GFP_ATOMIC, &ddp->udp); +	ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);  	if (!ddp->udl) {  		e_err(drv, "failed allocated ddp context\n");  		goto out_noddp_unmap;  	} -	ddp->pool = pool; +	ddp->pool = ddp_pool->pool;  	ddp->sgl = sgl;  	ddp->sgc = sgc; @@ -201,7 +205,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,  		while (len) {  			/* max number of buffers allowed in one DDP context */  			if (j >= IXGBE_BUFFCNT_MAX) { -				*per_cpu_ptr(fcoe->pcpu_noddp, cpu) += 1; +				ddp_pool->noddp++;  				goto out_noddp_free;  			} @@ -241,7 +245,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,  	 */  	if (lastsize == bufflen) {  		if (j >= IXGBE_BUFFCNT_MAX) { -			*per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) += 1; +			ddp_pool->noddp_ext_buff++;  			goto out_noddp_free;  		} @@ -293,11 +297,12 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,  	return 1;  out_noddp_free: -	pci_pool_free(pool, ddp->udl, ddp->udp); +	dma_pool_free(ddp->pool, ddp->udl, ddp->udp);  	ixgbe_fcoe_clear_ddp(ddp);  out_noddp_unmap: -	pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); +	dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); +out_noddp:  	put_cpu();  	return 0;  } @@ -409,7 +414,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,  		break;  	/* unmap the sg list when FCPRSP is received */  	case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP): -		pci_unmap_sg(adapter->pdev, ddp->sgl, +		dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,  			     ddp->sgc, DMA_FROM_DEVICE);  		ddp->err = ddp_err;  		ddp->sgl = NULL; @@ -563,44 +568,37 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring,  	return 0;  } -static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe) +static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu)  { -	unsigned int cpu; -	struct pci_pool **pool; +	struct ixgbe_fcoe_ddp_pool *ddp_pool; -	for_each_possible_cpu(cpu) { -		pool = per_cpu_ptr(fcoe->pool, cpu); -		if (*pool) -			pci_pool_destroy(*pool); -	} -	free_percpu(fcoe->pool); -	fcoe->pool = NULL; +	ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); +	if (ddp_pool->pool) +		dma_pool_destroy(ddp_pool->pool); +	ddp_pool->pool = NULL;  } -static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter) +static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe, +				     struct device *dev, +				     unsigned int cpu)  { -	struct ixgbe_fcoe *fcoe = &adapter->fcoe; -	unsigned int cpu; -	struct pci_pool **pool; +	struct ixgbe_fcoe_ddp_pool *ddp_pool; +	struct dma_pool *pool;  	char pool_name[32]; -	fcoe->pool = alloc_percpu(struct pci_pool *); -	if (!fcoe->pool) -		return; +	snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu); -	/* allocate pci pool for each cpu */ -	for_each_possible_cpu(cpu) { -		snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu); -		pool = per_cpu_ptr(fcoe->pool, cpu); -		*pool = pci_pool_create(pool_name, -					adapter->pdev, IXGBE_FCPTR_MAX, -					IXGBE_FCPTR_ALIGN, PAGE_SIZE); -		if (!*pool) { -			e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); -			ixgbe_fcoe_ddp_pools_free(fcoe); -			return; -		} -	} +	pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX, +			       IXGBE_FCPTR_ALIGN, PAGE_SIZE); +	if (!pool) +		return -ENOMEM; + +	ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); +	ddp_pool->pool = pool; +	ddp_pool->noddp = 0; +	ddp_pool->noddp_ext_buff = 0; + +	return 0;  }  /** @@ -613,132 +611,171 @@ static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)   */  void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)  { -	int i, fcoe_q, fcoe_i; +	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];  	struct ixgbe_hw *hw = &adapter->hw; -	struct ixgbe_fcoe *fcoe = &adapter->fcoe; -	struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; -	unsigned int cpu; - -	if (!fcoe->pool) { -		spin_lock_init(&fcoe->lock); - -		ixgbe_fcoe_ddp_pools_alloc(adapter); -		if (!fcoe->pool) { -			e_err(drv, "failed to alloc percpu fcoe DDP pools\n"); -			return; -		} - -		/* Extra buffer to be shared by all DDPs for HW work around */ -		fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); -		if (fcoe->extra_ddp_buffer == NULL) { -			e_err(drv, "failed to allocated extra DDP buffer\n"); -			goto out_ddp_pools; -		} +	int i, fcoe_q, fcoe_i; +	u32 etqf; -		fcoe->extra_ddp_buffer_dma = -			dma_map_single(&adapter->pdev->dev, -				       fcoe->extra_ddp_buffer, -				       IXGBE_FCBUFF_MIN, -				       DMA_FROM_DEVICE); -		if (dma_mapping_error(&adapter->pdev->dev, -				      fcoe->extra_ddp_buffer_dma)) { -			e_err(drv, "failed to map extra DDP buffer\n"); -			goto out_extra_ddp_buffer; -		} +	/* Minimal functionality for FCoE requires at least CRC offloads */ +	if (!(adapter->netdev->features & NETIF_F_FCOE_CRC)) +		return; -		/* Alloc per cpu mem to count the ddp alloc failure number */ -		fcoe->pcpu_noddp = alloc_percpu(u64); -		if (!fcoe->pcpu_noddp) { -			e_err(drv, "failed to alloc noddp counter\n"); -			goto out_pcpu_noddp_alloc_fail; -		} +	/* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */ +	etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN; +	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { +		etqf |= IXGBE_ETQF_POOL_ENABLE; +		etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT; +	} +	IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf); +	IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); -		fcoe->pcpu_noddp_ext_buff = alloc_percpu(u64); -		if (!fcoe->pcpu_noddp_ext_buff) { -			e_err(drv, "failed to alloc noddp extra buff cnt\n"); -			goto out_pcpu_noddp_extra_buff_alloc_fail; -		} +	/* leave registers un-configured if FCoE is disabled */ +	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) +		return; -		for_each_possible_cpu(cpu) { -			*per_cpu_ptr(fcoe->pcpu_noddp, cpu) = 0; -			*per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) = 0; -		} +	/* Use one or more Rx queues for FCoE by redirection table */ +	for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { +		fcoe_i = fcoe->offset + (i % fcoe->indices); +		fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; +		fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; +		IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);  	} +	IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); -	/* Enable L2 eth type filter for FCoE */ -	IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), -			(ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN)); -	/* Enable L2 eth type filter for FIP */ -	IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), -			(ETH_P_FIP | IXGBE_ETQF_FILTER_EN)); -	if (adapter->ring_feature[RING_F_FCOE].indices) { -		/* Use multiple rx queues for FCoE by redirection table */ -		for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { -			fcoe_i = f->mask + i % f->indices; -			fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; -			fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; -			IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); -		} -		IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); -		IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); -	} else  { -		/* Use single rx queue for FCoE */ -		fcoe_i = f->mask; -		fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; -		IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0); -		IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), -				IXGBE_ETQS_QUEUE_EN | -				(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); +	/* Enable L2 EtherType filter for FIP */ +	etqf = ETH_P_FIP | IXGBE_ETQF_FILTER_EN; +	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { +		etqf |= IXGBE_ETQF_POOL_ENABLE; +		etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT;  	} -	/* send FIP frames to the first FCoE queue */ -	fcoe_i = f->mask; -	fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; +	IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf); + +	/* Send FIP frames to the first FCoE queue */ +	fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx;  	IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),  			IXGBE_ETQS_QUEUE_EN |  			(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); -	IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, IXGBE_FCRXCTRL_FCCRCBO | +	/* Configure FCoE Rx control */ +	IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, +			IXGBE_FCRXCTRL_FCCRCBO |  			(FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); -	return; -out_pcpu_noddp_extra_buff_alloc_fail: -	free_percpu(fcoe->pcpu_noddp); -out_pcpu_noddp_alloc_fail: -	dma_unmap_single(&adapter->pdev->dev, -			 fcoe->extra_ddp_buffer_dma, -			 IXGBE_FCBUFF_MIN, -			 DMA_FROM_DEVICE); -out_extra_ddp_buffer: -	kfree(fcoe->extra_ddp_buffer); -out_ddp_pools: -	ixgbe_fcoe_ddp_pools_free(fcoe);  }  /** - * ixgbe_cleanup_fcoe - release all fcoe ddp context resources + * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources   * @adapter : ixgbe adapter   *   * Cleans up outstanding ddp context resources   *   * Returns : none   */ -void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) +void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter)  { -	int i;  	struct ixgbe_fcoe *fcoe = &adapter->fcoe; +	int cpu, i; -	if (!fcoe->pool) +	/* do nothing if no DDP pools were allocated */ +	if (!fcoe->ddp_pool)  		return;  	for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)  		ixgbe_fcoe_ddp_put(adapter->netdev, i); + +	for_each_possible_cpu(cpu) +		ixgbe_fcoe_dma_pool_free(fcoe, cpu); +  	dma_unmap_single(&adapter->pdev->dev,  			 fcoe->extra_ddp_buffer_dma,  			 IXGBE_FCBUFF_MIN,  			 DMA_FROM_DEVICE); -	free_percpu(fcoe->pcpu_noddp); -	free_percpu(fcoe->pcpu_noddp_ext_buff);  	kfree(fcoe->extra_ddp_buffer); -	ixgbe_fcoe_ddp_pools_free(fcoe); + +	fcoe->extra_ddp_buffer = NULL; +	fcoe->extra_ddp_buffer_dma = 0; +} + +/** + * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources + * @adapter: ixgbe adapter + * + * Sets up ddp context resouces + * + * Returns : 0 indicates success or -EINVAL on failure + */ +int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_fcoe *fcoe = &adapter->fcoe; +	struct device *dev = &adapter->pdev->dev; +	void *buffer; +	dma_addr_t dma; +	unsigned int cpu; + +	/* do nothing if no DDP pools were allocated */ +	if (!fcoe->ddp_pool) +		return 0; + +	/* Extra buffer to be shared by all DDPs for HW work around */ +	buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); +	if (!buffer) { +		e_err(drv, "failed to allocate extra DDP buffer\n"); +		return -ENOMEM; +	} + +	dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); +	if (dma_mapping_error(dev, dma)) { +		e_err(drv, "failed to map extra DDP buffer\n"); +		kfree(buffer); +		return -ENOMEM; +	} + +	fcoe->extra_ddp_buffer = buffer; +	fcoe->extra_ddp_buffer_dma = dma; + +	/* allocate pci pool for each cpu */ +	for_each_possible_cpu(cpu) { +		int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu); +		if (!err) +			continue; + +		e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); +		ixgbe_free_fcoe_ddp_resources(adapter); +		return -ENOMEM; +	} + +	return 0; +} + +static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_fcoe *fcoe = &adapter->fcoe; + +	if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) +		return -EINVAL; + +	fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool); + +	if (!fcoe->ddp_pool) { +		e_err(drv, "failed to allocate percpu DDP resources\n"); +		return -ENOMEM; +	} + +	adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; + +	return 0; +} + +static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_fcoe *fcoe = &adapter->fcoe; + +	adapter->netdev->fcoe_ddp_xid = 0; + +	if (!fcoe->ddp_pool) +		return; + +	free_percpu(fcoe->ddp_pool); +	fcoe->ddp_pool = NULL;  }  /** @@ -751,40 +788,37 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)   */  int ixgbe_fcoe_enable(struct net_device *netdev)  { -	int rc = -EINVAL;  	struct ixgbe_adapter *adapter = netdev_priv(netdev);  	struct ixgbe_fcoe *fcoe = &adapter->fcoe; +	atomic_inc(&fcoe->refcnt);  	if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) -		goto out_enable; +		return -EINVAL; -	atomic_inc(&fcoe->refcnt);  	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) -		goto out_enable; +		return -EINVAL;  	e_info(drv, "Enabling FCoE offload features.\n");  	if (netif_running(netdev))  		netdev->netdev_ops->ndo_stop(netdev); -	ixgbe_clear_interrupt_scheme(adapter); +	/* Allocate per CPU memory to track DDP pools */ +	ixgbe_fcoe_ddp_enable(adapter); +	/* enable FCoE and notify stack */  	adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; -	adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE; -	netdev->features |= NETIF_F_FCOE_CRC; -	netdev->features |= NETIF_F_FSO;  	netdev->features |= NETIF_F_FCOE_MTU; -	netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; +	netdev_features_change(netdev); +	/* release existing queues and reallocate them */ +	ixgbe_clear_interrupt_scheme(adapter);  	ixgbe_init_interrupt_scheme(adapter); -	netdev_features_change(netdev);  	if (netif_running(netdev))  		netdev->netdev_ops->ndo_open(netdev); -	rc = 0; -out_enable: -	return rc; +	return 0;  }  /** @@ -797,41 +831,35 @@ out_enable:   */  int ixgbe_fcoe_disable(struct net_device *netdev)  { -	int rc = -EINVAL;  	struct ixgbe_adapter *adapter = netdev_priv(netdev); -	struct ixgbe_fcoe *fcoe = &adapter->fcoe; -	if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) -		goto out_disable; +	if (!atomic_dec_and_test(&adapter->fcoe.refcnt)) +		return -EINVAL;  	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) -		goto out_disable; - -	if (!atomic_dec_and_test(&fcoe->refcnt)) -		goto out_disable; +		return -EINVAL;  	e_info(drv, "Disabling FCoE offload features.\n"); -	netdev->features &= ~NETIF_F_FCOE_CRC; -	netdev->features &= ~NETIF_F_FSO; -	netdev->features &= ~NETIF_F_FCOE_MTU; -	netdev->fcoe_ddp_xid = 0; -	netdev_features_change(netdev); -  	if (netif_running(netdev))  		netdev->netdev_ops->ndo_stop(netdev); -	ixgbe_clear_interrupt_scheme(adapter); +	/* Free per CPU memory to track DDP pools */ +	ixgbe_fcoe_ddp_disable(adapter); + +	/* disable FCoE and notify stack */  	adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; -	adapter->ring_feature[RING_F_FCOE].indices = 0; -	ixgbe_cleanup_fcoe(adapter); +	netdev->features &= ~NETIF_F_FCOE_MTU; + +	netdev_features_change(netdev); + +	/* release existing queues and reallocate them */ +	ixgbe_clear_interrupt_scheme(adapter);  	ixgbe_init_interrupt_scheme(adapter);  	if (netif_running(netdev))  		netdev->netdev_ops->ndo_open(netdev); -	rc = 0; -out_disable: -	return rc; +	return 0;  }  /** @@ -960,3 +988,18 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,  	return 0;  } + +/** + * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to + * @adapter - pointer to the device adapter structure + * + * Return : TC that FCoE is mapped to + */ +u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter) +{ +#ifdef CONFIG_IXGBE_DCB +	return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up); +#else +	return 0; +#endif +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h index 1dbed17c810..bf724da9937 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h @@ -62,19 +62,24 @@ struct ixgbe_fcoe_ddp {  	struct scatterlist *sgl;  	dma_addr_t udp;  	u64 *udl; -	struct pci_pool *pool; +	struct dma_pool *pool; +}; + +/* per cpu variables */ +struct ixgbe_fcoe_ddp_pool { +	struct dma_pool *pool; +	u64 noddp; +	u64 noddp_ext_buff;  };  struct ixgbe_fcoe { -	struct pci_pool **pool; +	struct ixgbe_fcoe_ddp_pool __percpu *ddp_pool;  	atomic_t refcnt;  	spinlock_t lock;  	struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; -	unsigned char *extra_ddp_buffer; +	void *extra_ddp_buffer;  	dma_addr_t extra_ddp_buffer_dma;  	unsigned long mode; -	u64 __percpu *pcpu_noddp; -	u64 __percpu *pcpu_noddp_ext_buff;  #ifdef CONFIG_IXGBE_DCB  	u8 up;  #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index c377706e81a..17ecbcedd54 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -28,28 +28,83 @@  #include "ixgbe.h"  #include "ixgbe_sriov.h" +#ifdef CONFIG_IXGBE_DCB  /** - * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS + * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV   * @adapter: board private structure to initialize   * - * Cache the descriptor ring offsets for RSS to the assigned rings. + * Cache the descriptor ring offsets for SR-IOV to the assigned rings.  It + * will also try to cache the proper offsets if RSS/FCoE are enabled along + * with VMDq.   *   **/ -static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) +static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)  { +#ifdef IXGBE_FCOE +	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; +#endif /* IXGBE_FCOE */ +	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];  	int i; +	u16 reg_idx; +	u8 tcs = netdev_get_num_tc(adapter->netdev); -	if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) +	/* verify we have DCB queueing enabled before proceeding */ +	if (tcs <= 1)  		return false; -	for (i = 0; i < adapter->num_rx_queues; i++) -		adapter->rx_ring[i]->reg_idx = i; -	for (i = 0; i < adapter->num_tx_queues; i++) -		adapter->tx_ring[i]->reg_idx = i; +	/* verify we have VMDq enabled before proceeding */ +	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) +		return false; + +	/* start at VMDq register offset for SR-IOV enabled setups */ +	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); +	for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { +		/* If we are greater than indices move to next pool */ +		if ((reg_idx & ~vmdq->mask) >= tcs) +			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); +		adapter->rx_ring[i]->reg_idx = reg_idx; +	} + +	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); +	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { +		/* If we are greater than indices move to next pool */ +		if ((reg_idx & ~vmdq->mask) >= tcs) +			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); +		adapter->tx_ring[i]->reg_idx = reg_idx; +	} + +#ifdef IXGBE_FCOE +	/* nothing to do if FCoE is disabled */ +	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) +		return true; + +	/* The work is already done if the FCoE ring is shared */ +	if (fcoe->offset < tcs) +		return true; + +	/* The FCoE rings exist separately, we need to move their reg_idx */ +	if (fcoe->indices) { +		u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); +		u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); + +		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; +		for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { +			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; +			adapter->rx_ring[i]->reg_idx = reg_idx; +			reg_idx++; +		} + +		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; +		for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { +			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; +			adapter->tx_ring[i]->reg_idx = reg_idx; +			reg_idx++; +		} +	} +#endif /* IXGBE_FCOE */  	return true;  } -#ifdef CONFIG_IXGBE_DCB  /* ixgbe_get_first_reg_idx - Return first register index associated with ring */  static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, @@ -64,42 +119,37 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,  	switch (hw->mac.type) {  	case ixgbe_mac_82598EB: -		*tx = tc << 2; -		*rx = tc << 3; +		/* TxQs/TC: 4	RxQs/TC: 8 */ +		*tx = tc << 2; /* 0, 4,  8, 12, 16, 20, 24, 28 */ +		*rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */  		break;  	case ixgbe_mac_82599EB:  	case ixgbe_mac_X540:  		if (num_tcs > 4) { -			if (tc < 3) { -				*tx = tc << 5; -				*rx = tc << 4; -			} else if (tc <  5) { -				*tx = ((tc + 2) << 4); -				*rx = tc << 4; -			} else if (tc < num_tcs) { -				*tx = ((tc + 8) << 3); -				*rx = tc << 4; -			} +			/* +			 * TCs    : TC0/1 TC2/3 TC4-7 +			 * TxQs/TC:    32    16     8 +			 * RxQs/TC:    16    16    16 +			 */ +			*rx = tc << 4; +			if (tc < 3) +				*tx = tc << 5;		/*   0,  32,  64 */ +			else if (tc < 5) +				*tx = (tc + 2) << 4;	/*  80,  96 */ +			else +				*tx = (tc + 8) << 3;	/* 104, 112, 120 */  		} else { -			*rx =  tc << 5; -			switch (tc) { -			case 0: -				*tx =  0; -				break; -			case 1: -				*tx = 64; -				break; -			case 2: -				*tx = 96; -				break; -			case 3: -				*tx = 112; -				break; -			default: -				break; -			} +			/* +			 * TCs    : TC0 TC1 TC2/3 +			 * TxQs/TC:  64  32    16 +			 * RxQs/TC:  32  32    32 +			 */ +			*rx = tc << 5; +			if (tc < 2) +				*tx = tc << 6;		/*  0,  64 */ +			else +				*tx = (tc + 4) << 4;	/* 96, 112 */  		} -		break;  	default:  		break;  	} @@ -112,106 +162,115 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,   * Cache the descriptor ring offsets for DCB to the assigned rings.   *   **/ -static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) +static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)  {  	struct net_device *dev = adapter->netdev; -	int i, j, k; +	unsigned int tx_idx, rx_idx; +	int tc, offset, rss_i, i;  	u8 num_tcs = netdev_get_num_tc(dev); -	if (!num_tcs) +	/* verify we have DCB queueing enabled before proceeding */ +	if (num_tcs <= 1)  		return false; -	for (i = 0, k = 0; i < num_tcs; i++) { -		unsigned int tx_s, rx_s; -		u16 count = dev->tc_to_txq[i].count; +	rss_i = adapter->ring_feature[RING_F_RSS].indices; -		ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s); -		for (j = 0; j < count; j++, k++) { -			adapter->tx_ring[k]->reg_idx = tx_s + j; -			adapter->rx_ring[k]->reg_idx = rx_s + j; -			adapter->tx_ring[k]->dcb_tc = i; -			adapter->rx_ring[k]->dcb_tc = i; +	for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { +		ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx); +		for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { +			adapter->tx_ring[offset + i]->reg_idx = tx_idx; +			adapter->rx_ring[offset + i]->reg_idx = rx_idx; +			adapter->tx_ring[offset + i]->dcb_tc = tc; +			adapter->rx_ring[offset + i]->dcb_tc = tc;  		}  	}  	return true;  } -#endif +#endif  /** - * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director + * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov   * @adapter: board private structure to initialize   * - * Cache the descriptor ring offsets for Flow Director to the assigned rings. + * SR-IOV doesn't use any descriptor rings but changes the default if + * no other mapping is used.   * - **/ -static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) + */ +static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)  { +#ifdef IXGBE_FCOE +	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; +#endif /* IXGBE_FCOE */ +	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; +	struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];  	int i; -	bool ret = false; +	u16 reg_idx; -	if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && -	    (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { -		for (i = 0; i < adapter->num_rx_queues; i++) -			adapter->rx_ring[i]->reg_idx = i; -		for (i = 0; i < adapter->num_tx_queues; i++) -			adapter->tx_ring[i]->reg_idx = i; -		ret = true; +	/* only proceed if VMDq is enabled */ +	if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) +		return false; + +	/* start at VMDq register offset for SR-IOV enabled setups */ +	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); +	for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { +#ifdef IXGBE_FCOE +		/* Allow first FCoE queue to be mapped as RSS */ +		if (fcoe->offset && (i > fcoe->offset)) +			break; +#endif +		/* If we are greater than indices move to next pool */ +		if ((reg_idx & ~vmdq->mask) >= rss->indices) +			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); +		adapter->rx_ring[i]->reg_idx = reg_idx;  	} -	return ret; -} +#ifdef IXGBE_FCOE +	/* FCoE uses a linear block of queues so just assigning 1:1 */ +	for (; i < adapter->num_rx_queues; i++, reg_idx++) +		adapter->rx_ring[i]->reg_idx = reg_idx; +#endif +	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); +	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {  #ifdef IXGBE_FCOE -/** - * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE - * @adapter: board private structure to initialize - * - * Cache the descriptor ring offsets for FCoE mode to the assigned rings. - * - */ -static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) -{ -	struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; -	int i; -	u8 fcoe_rx_i = 0, fcoe_tx_i = 0; +		/* Allow first FCoE queue to be mapped as RSS */ +		if (fcoe->offset && (i > fcoe->offset)) +			break; +#endif +		/* If we are greater than indices move to next pool */ +		if ((reg_idx & rss->mask) >= rss->indices) +			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); +		adapter->tx_ring[i]->reg_idx = reg_idx; +	} -	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) -		return false; +#ifdef IXGBE_FCOE +	/* FCoE uses a linear block of queues so just assigning 1:1 */ +	for (; i < adapter->num_tx_queues; i++, reg_idx++) +		adapter->tx_ring[i]->reg_idx = reg_idx; -	if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { -		if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) -			ixgbe_cache_ring_fdir(adapter); -		else -			ixgbe_cache_ring_rss(adapter); +#endif -		fcoe_rx_i = f->mask; -		fcoe_tx_i = f->mask; -	} -	for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { -		adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; -		adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; -	}  	return true;  } -#endif /* IXGBE_FCOE */  /** - * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov + * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS   * @adapter: board private structure to initialize   * - * SR-IOV doesn't use any descriptor rings but changes the default if - * no other mapping is used. + * Cache the descriptor ring offsets for RSS to the assigned rings.   * - */ -static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) + **/ +static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)  { -	adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2; -	adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2; -	if (adapter->num_vfs) -		return true; -	else -		return false; +	int i; + +	for (i = 0; i < adapter->num_rx_queues; i++) +		adapter->rx_ring[i]->reg_idx = i; +	for (i = 0; i < adapter->num_tx_queues; i++) +		adapter->tx_ring[i]->reg_idx = i; + +	return true;  }  /** @@ -231,186 +290,384 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)  	adapter->rx_ring[0]->reg_idx = 0;  	adapter->tx_ring[0]->reg_idx = 0; -	if (ixgbe_cache_ring_sriov(adapter)) -		return; -  #ifdef CONFIG_IXGBE_DCB -	if (ixgbe_cache_ring_dcb(adapter)) +	if (ixgbe_cache_ring_dcb_sriov(adapter))  		return; -#endif -#ifdef IXGBE_FCOE -	if (ixgbe_cache_ring_fcoe(adapter)) +	if (ixgbe_cache_ring_dcb(adapter))  		return; -#endif /* IXGBE_FCOE */ -	if (ixgbe_cache_ring_fdir(adapter)) +#endif +	if (ixgbe_cache_ring_sriov(adapter))  		return; -	if (ixgbe_cache_ring_rss(adapter)) -		return; +	ixgbe_cache_ring_rss(adapter);  } -/** - * ixgbe_set_sriov_queues: Allocate queues for IOV use - * @adapter: board private structure to initialize - * - * IOV doesn't actually use anything, so just NAK the - * request for now and let the other queue routines - * figure out what to do. - */ -static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) -{ -	return false; -} +#define IXGBE_RSS_16Q_MASK	0xF +#define IXGBE_RSS_8Q_MASK	0x7 +#define IXGBE_RSS_4Q_MASK	0x3 +#define IXGBE_RSS_2Q_MASK	0x1 +#define IXGBE_RSS_DISABLED_MASK	0x0 +#ifdef CONFIG_IXGBE_DCB  /** - * ixgbe_set_rss_queues: Allocate queues for RSS + * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB   * @adapter: board private structure to initialize   * - * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try - * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * and VM pools where appropriate.  Also assign queues based on DCB + * priorities and map accordingly..   *   **/ -static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) +static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)  { -	bool ret = false; -	struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS]; +	int i; +	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; +	u16 vmdq_m = 0; +#ifdef IXGBE_FCOE +	u16 fcoe_i = 0; +#endif +	u8 tcs = netdev_get_num_tc(adapter->netdev); -	if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { -		f->mask = 0xF; -		adapter->num_rx_queues = f->indices; -		adapter->num_tx_queues = f->indices; -		ret = true; +	/* verify we have DCB queueing enabled before proceeding */ +	if (tcs <= 1) +		return false; + +	/* verify we have VMDq enabled before proceeding */ +	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) +		return false; + +	/* Add starting offset to total pool count */ +	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + +	/* 16 pools w/ 8 TC per pool */ +	if (tcs > 4) { +		vmdq_i = min_t(u16, vmdq_i, 16); +		vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; +	/* 32 pools w/ 4 TC per pool */ +	} else { +		vmdq_i = min_t(u16, vmdq_i, 32); +		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;  	} -	return ret; -} +#ifdef IXGBE_FCOE +	/* queues in the remaining pools are available for FCoE */ +	fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; -/** - * ixgbe_set_fdir_queues: Allocate queues for Flow Director - * @adapter: board private structure to initialize - * - * Flow Director is an advanced Rx filter, attempting to get Rx flows back - * to the original CPU that initiated the Tx session.  This runs in addition - * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the - * Rx load across CPUs using RSS. - * - **/ -static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) -{ -	bool ret = false; -	struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; +#endif +	/* remove the starting offset from the pool count */ +	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; -	f_fdir->indices = min_t(int, num_online_cpus(), f_fdir->indices); -	f_fdir->mask = 0; +	/* save features for later use */ +	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; +	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;  	/* -	 * Use RSS in addition to Flow Director to ensure the best -	 * distribution of flows across cores, even when an FDIR flow -	 * isn't matched. +	 * We do not support DCB, VMDq, and RSS all simultaneously +	 * so we will disable RSS since it is the lowest priority  	 */ -	if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && -	    (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { -		adapter->num_tx_queues = f_fdir->indices; -		adapter->num_rx_queues = f_fdir->indices; -		ret = true; -	} else { -		adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; +	adapter->ring_feature[RING_F_RSS].indices = 1; +	adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; + +	/* disable ATR as it is not supported when VMDq is enabled */ +	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + +	adapter->num_rx_pools = vmdq_i; +	adapter->num_rx_queues_per_pool = tcs; + +	adapter->num_tx_queues = vmdq_i * tcs; +	adapter->num_rx_queues = vmdq_i * tcs; + +#ifdef IXGBE_FCOE +	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { +		struct ixgbe_ring_feature *fcoe; + +		fcoe = &adapter->ring_feature[RING_F_FCOE]; + +		/* limit ourselves based on feature limits */ +		fcoe_i = min_t(u16, fcoe_i, num_online_cpus()); +		fcoe_i = min_t(u16, fcoe_i, fcoe->limit); + +		if (fcoe_i) { +			/* alloc queues for FCoE separately */ +			fcoe->indices = fcoe_i; +			fcoe->offset = vmdq_i * tcs; + +			/* add queues to adapter */ +			adapter->num_tx_queues += fcoe_i; +			adapter->num_rx_queues += fcoe_i; +		} else if (tcs > 1) { +			/* use queue belonging to FcoE TC */ +			fcoe->indices = 1; +			fcoe->offset = ixgbe_fcoe_get_tc(adapter); +		} else { +			adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; + +			fcoe->indices = 0; +			fcoe->offset = 0; +		}  	} -	return ret; + +#endif /* IXGBE_FCOE */ +	/* configure TC to queue mapping */ +	for (i = 0; i < tcs; i++) +		netdev_set_tc_queue(adapter->netdev, i, 1, i); + +	return true;  } +static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) +{ +	struct net_device *dev = adapter->netdev; +	struct ixgbe_ring_feature *f; +	int rss_i, rss_m, i; +	int tcs; + +	/* Map queue offset and counts onto allocated tx queues */ +	tcs = netdev_get_num_tc(dev); + +	/* verify we have DCB queueing enabled before proceeding */ +	if (tcs <= 1) +		return false; + +	/* determine the upper limit for our current DCB mode */ +	rss_i = dev->num_tx_queues / tcs; +	if (adapter->hw.mac.type == ixgbe_mac_82598EB) { +		/* 8 TC w/ 4 queues per TC */ +		rss_i = min_t(u16, rss_i, 4); +		rss_m = IXGBE_RSS_4Q_MASK; +	} else if (tcs > 4) { +		/* 8 TC w/ 8 queues per TC */ +		rss_i = min_t(u16, rss_i, 8); +		rss_m = IXGBE_RSS_8Q_MASK; +	} else { +		/* 4 TC w/ 16 queues per TC */ +		rss_i = min_t(u16, rss_i, 16); +		rss_m = IXGBE_RSS_16Q_MASK; +	} + +	/* set RSS mask and indices */ +	f = &adapter->ring_feature[RING_F_RSS]; +	rss_i = min_t(int, rss_i, f->limit); +	f->indices = rss_i; +	f->mask = rss_m; + +	/* disable ATR as it is not supported when multiple TCs are enabled */ +	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; +  #ifdef IXGBE_FCOE +	/* FCoE enabled queues require special configuration indexed +	 * by feature specific indices and offset. Here we map FCoE +	 * indices onto the DCB queue pairs allowing FCoE to own +	 * configuration later. +	 */ +	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { +		u8 tc = ixgbe_fcoe_get_tc(adapter); + +		f = &adapter->ring_feature[RING_F_FCOE]; +		f->indices = min_t(u16, rss_i, f->limit); +		f->offset = rss_i * tc; +	} + +#endif /* IXGBE_FCOE */ +	for (i = 0; i < tcs; i++) +		netdev_set_tc_queue(dev, i, rss_i, rss_i * i); + +	adapter->num_tx_queues = rss_i * tcs; +	adapter->num_rx_queues = rss_i * tcs; + +	return true; +} + +#endif  /** - * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) + * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices   * @adapter: board private structure to initialize   * - * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. - * The ring feature mask is not used as a mask for FCoE, as it can take any 8 - * rx queues out of the max number of rx queues, instead, it is used as the - * index of the first rx queue used by FCoE. + * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * and VM pools where appropriate.  If RSS is available, then also try and + * enable RSS and map accordingly.   *   **/ -static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) +static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)  { -	struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; +	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; +	u16 vmdq_m = 0; +	u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; +	u16 rss_m = IXGBE_RSS_DISABLED_MASK; +#ifdef IXGBE_FCOE +	u16 fcoe_i = 0; +#endif -	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) +	/* only proceed if SR-IOV is enabled */ +	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))  		return false; -	f->indices = min_t(int, num_online_cpus(), f->indices); +	/* Add starting offset to total pool count */ +	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; -	adapter->num_rx_queues = 1; -	adapter->num_tx_queues = 1; +	/* double check we are limited to maximum pools */ +	vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); -	if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { -		e_info(probe, "FCoE enabled with RSS\n"); -		if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) -			ixgbe_set_fdir_queues(adapter); -		else -			ixgbe_set_rss_queues(adapter); +	/* 64 pool mode with 2 queues per pool */ +	if ((vmdq_i > 32) || (rss_i < 4)) { +		vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; +		rss_m = IXGBE_RSS_2Q_MASK; +		rss_i = min_t(u16, rss_i, 2); +	/* 32 pool mode with 4 queues per pool */ +	} else { +		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; +		rss_m = IXGBE_RSS_4Q_MASK; +		rss_i = 4;  	} -	/* adding FCoE rx rings to the end */ -	f->mask = adapter->num_rx_queues; -	adapter->num_rx_queues += f->indices; -	adapter->num_tx_queues += f->indices; +#ifdef IXGBE_FCOE +	/* queues in the remaining pools are available for FCoE */ +	fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); + +#endif +	/* remove the starting offset from the pool count */ +	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + +	/* save features for later use */ +	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; +	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; +	/* limit RSS based on user input and save for later use */ +	adapter->ring_feature[RING_F_RSS].indices = rss_i; +	adapter->ring_feature[RING_F_RSS].mask = rss_m; + +	adapter->num_rx_pools = vmdq_i; +	adapter->num_rx_queues_per_pool = rss_i; + +	adapter->num_rx_queues = vmdq_i * rss_i; +	adapter->num_tx_queues = vmdq_i * rss_i; + +	/* disable ATR as it is not supported when VMDq is enabled */ +	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + +#ifdef IXGBE_FCOE +	/* +	 * FCoE can use rings from adjacent buffers to allow RSS +	 * like behavior.  To account for this we need to add the +	 * FCoE indices to the total ring count. +	 */ +	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { +		struct ixgbe_ring_feature *fcoe; + +		fcoe = &adapter->ring_feature[RING_F_FCOE]; + +		/* limit ourselves based on feature limits */ +		fcoe_i = min_t(u16, fcoe_i, fcoe->limit); + +		if (vmdq_i > 1 && fcoe_i) { +			/* reserve no more than number of CPUs */ +			fcoe_i = min_t(u16, fcoe_i, num_online_cpus()); + +			/* alloc queues for FCoE separately */ +			fcoe->indices = fcoe_i; +			fcoe->offset = vmdq_i * rss_i; +		} else { +			/* merge FCoE queues with RSS queues */ +			fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus()); + +			/* limit indices to rss_i if MSI-X is disabled */ +			if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) +				fcoe_i = rss_i; + +			/* attempt to reserve some queues for just FCoE */ +			fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); +			fcoe->offset = fcoe_i - fcoe->indices; + +			fcoe_i -= rss_i; +		} + +		/* add queues to adapter */ +		adapter->num_tx_queues += fcoe_i; +		adapter->num_rx_queues += fcoe_i; +	} + +#endif  	return true;  } -#endif /* IXGBE_FCOE */ - -/* Artificial max queue cap per traffic class in DCB mode */ -#define DCB_QUEUE_CAP 8 -#ifdef CONFIG_IXGBE_DCB -static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) +/** + * ixgbe_set_rss_queues - Allocate queues for RSS + * @adapter: board private structure to initialize + * + * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)  { -	int per_tc_q, q, i, offset = 0; -	struct net_device *dev = adapter->netdev; -	int tcs = netdev_get_num_tc(dev); +	struct ixgbe_ring_feature *f; +	u16 rss_i; -	if (!tcs) -		return false; +	/* set mask for 16 queue limit of RSS */ +	f = &adapter->ring_feature[RING_F_RSS]; +	rss_i = f->limit; -	/* Map queue offset and counts onto allocated tx queues */ -	per_tc_q = min_t(unsigned int, dev->num_tx_queues / tcs, DCB_QUEUE_CAP); -	q = min_t(int, num_online_cpus(), per_tc_q); +	f->indices = rss_i; +	f->mask = IXGBE_RSS_16Q_MASK; -	for (i = 0; i < tcs; i++) { -		netdev_set_tc_queue(dev, i, q, offset); -		offset += q; -	} +	/* disable ATR by default, it will be configured below */ +	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + +	/* +	 * Use Flow Director in addition to RSS to ensure the best +	 * distribution of flows across cores, even when an FDIR flow +	 * isn't matched. +	 */ +	if (rss_i > 1 && adapter->atr_sample_rate) { +		f = &adapter->ring_feature[RING_F_FDIR]; + +		f->indices = min_t(u16, num_online_cpus(), f->limit); +		rss_i = max_t(u16, rss_i, f->indices); -	adapter->num_tx_queues = q * tcs; -	adapter->num_rx_queues = q * tcs; +		if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) +			adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; +	}  #ifdef IXGBE_FCOE -	/* FCoE enabled queues require special configuration indexed -	 * by feature specific indices and mask. Here we map FCoE -	 * indices onto the DCB queue pairs allowing FCoE to own -	 * configuration later. +	/* +	 * FCoE can exist on the same rings as standard network traffic +	 * however it is preferred to avoid that if possible.  In order +	 * to get the best performance we allocate as many FCoE queues +	 * as we can and we place them at the end of the ring array to +	 * avoid sharing queues with standard RSS on systems with 24 or +	 * more CPUs.  	 */  	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { -		u8 prio_tc[MAX_USER_PRIORITY] = {0}; -		int tc; -		struct ixgbe_ring_feature *f = -					&adapter->ring_feature[RING_F_FCOE]; +		struct net_device *dev = adapter->netdev; +		u16 fcoe_i; + +		f = &adapter->ring_feature[RING_F_FCOE]; + +		/* merge FCoE queues with RSS queues */ +		fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus()); +		fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues); -		ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc); -		tc = prio_tc[adapter->fcoe.up]; -		f->indices = dev->tc_to_txq[tc].count; -		f->mask = dev->tc_to_txq[tc].offset; +		/* limit indices to rss_i if MSI-X is disabled */ +		if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) +			fcoe_i = rss_i; + +		/* attempt to reserve some queues for just FCoE */ +		f->indices = min_t(u16, fcoe_i, f->limit); +		f->offset = fcoe_i - f->indices; +		rss_i = max_t(u16, fcoe_i, rss_i);  	} -#endif + +#endif /* IXGBE_FCOE */ +	adapter->num_rx_queues = rss_i; +	adapter->num_tx_queues = rss_i;  	return true;  } -#endif  /** - * ixgbe_set_num_queues: Allocate queues for device, feature dependent + * ixgbe_set_num_queues - Allocate queues for device, feature dependent   * @adapter: board private structure to initialize   *   * This is the top level queue allocation routine.  The order here is very @@ -420,7 +677,7 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)   * fallthrough conditions.   *   **/ -static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) +static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)  {  	/* Start with base case */  	adapter->num_rx_queues = 1; @@ -428,38 +685,18 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)  	adapter->num_rx_pools = adapter->num_rx_queues;  	adapter->num_rx_queues_per_pool = 1; -	if (ixgbe_set_sriov_queues(adapter)) -		goto done; -  #ifdef CONFIG_IXGBE_DCB +	if (ixgbe_set_dcb_sriov_queues(adapter)) +		return; +  	if (ixgbe_set_dcb_queues(adapter)) -		goto done; +		return;  #endif -#ifdef IXGBE_FCOE -	if (ixgbe_set_fcoe_queues(adapter)) -		goto done; - -#endif /* IXGBE_FCOE */ -	if (ixgbe_set_fdir_queues(adapter)) -		goto done; - -	if (ixgbe_set_rss_queues(adapter)) -		goto done; - -	/* fallback to base case */ -	adapter->num_rx_queues = 1; -	adapter->num_tx_queues = 1; - -done: -	if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) || -	    (adapter->netdev->reg_state == NETREG_UNREGISTERING)) -		return 0; +	if (ixgbe_set_sriov_queues(adapter)) +		return; -	/* Notify the stack of the (possibly) reduced queue counts. */ -	netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); -	return netif_set_real_num_rx_queues(adapter->netdev, -					    adapter->num_rx_queues); +	ixgbe_set_rss_queues(adapter);  }  static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, @@ -507,8 +744,8 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,  		 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of  		 * vectors we were allocated.  		 */ -		adapter->num_msix_vectors = min(vectors, -				   adapter->max_msix_q_vectors + NON_Q_VECTORS); +		vectors -= NON_Q_VECTORS; +		adapter->num_q_vectors = min(vectors, adapter->max_q_vectors);  	}  } @@ -632,8 +869,8 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,  		if (adapter->netdev->features & NETIF_F_FCOE_MTU) {  			struct ixgbe_ring_feature *f;  			f = &adapter->ring_feature[RING_F_FCOE]; -			if ((rxr_idx >= f->mask) && -			    (rxr_idx < f->mask + f->indices)) +			if ((rxr_idx >= f->offset) && +			    (rxr_idx < f->offset + f->indices))  				set_bit(__IXGBE_RX_FCOE, &ring->state);  		} @@ -695,7 +932,7 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)   **/  static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)  { -	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; +	int q_vectors = adapter->num_q_vectors;  	int rxr_remaining = adapter->num_rx_queues;  	int txr_remaining = adapter->num_tx_queues;  	int rxr_idx = 0, txr_idx = 0, v_idx = 0; @@ -739,10 +976,12 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)  	return 0;  err_out: -	while (v_idx) { -		v_idx--; +	adapter->num_tx_queues = 0; +	adapter->num_rx_queues = 0; +	adapter->num_q_vectors = 0; + +	while (v_idx--)  		ixgbe_free_q_vector(adapter, v_idx); -	}  	return -ENOMEM;  } @@ -757,14 +996,13 @@ err_out:   **/  static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)  { -	int v_idx, q_vectors; +	int v_idx = adapter->num_q_vectors; -	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) -		q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; -	else -		q_vectors = 1; +	adapter->num_tx_queues = 0; +	adapter->num_rx_queues = 0; +	adapter->num_q_vectors = 0; -	for (v_idx = 0; v_idx < q_vectors; v_idx++) +	while (v_idx--)  		ixgbe_free_q_vector(adapter, v_idx);  } @@ -788,11 +1026,10 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)   * Attempt to configure the interrupts using the best available   * capabilities of the hardware and the kernel.   **/ -static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) +static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)  {  	struct ixgbe_hw *hw = &adapter->hw; -	int err = 0; -	int vector, v_budget; +	int vector, v_budget, err;  	/*  	 * It's easy to be greedy for MSI-X vectors, but it really @@ -825,38 +1062,41 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)  		ixgbe_acquire_msix_vectors(adapter, v_budget);  		if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) -			goto out; +			return;  	} -	adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; -	adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; -	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { -		e_err(probe, -		      "ATR is not supported while multiple " -		      "queues are disabled.  Disabling Flow Director\n"); +	/* disable DCB if number of TCs exceeds 1 */ +	if (netdev_get_num_tc(adapter->netdev) > 1) { +		e_err(probe, "num TCs exceeds number of queues - disabling DCB\n"); +		netdev_reset_tc(adapter->netdev); + +		if (adapter->hw.mac.type == ixgbe_mac_82598EB) +			adapter->hw.fc.requested_mode = adapter->last_lfc_mode; + +		adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; +		adapter->temp_dcb_cfg.pfc_mode_enable = false; +		adapter->dcb_cfg.pfc_mode_enable = false;  	} -	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; -	adapter->atr_sample_rate = 0; -	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) -		ixgbe_disable_sriov(adapter); +	adapter->dcb_cfg.num_tcs.pg_tcs = 1; +	adapter->dcb_cfg.num_tcs.pfc_tcs = 1; + +	/* disable SR-IOV */ +	ixgbe_disable_sriov(adapter); -	err = ixgbe_set_num_queues(adapter); -	if (err) -		return err; +	/* disable RSS */ +	adapter->ring_feature[RING_F_RSS].limit = 1; + +	ixgbe_set_num_queues(adapter); +	adapter->num_q_vectors = 1;  	err = pci_enable_msi(adapter->pdev); -	if (!err) { -		adapter->flags |= IXGBE_FLAG_MSI_ENABLED; -	} else { +	if (err) {  		netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,  			     "Unable to allocate MSI interrupt, "  			     "falling back to legacy.  Error: %d\n", err); -		/* reset err */ -		err = 0; +		return;  	} - -out: -	return err; +	adapter->flags |= IXGBE_FLAG_MSI_ENABLED;  }  /** @@ -874,15 +1114,10 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)  	int err;  	/* Number of supported queues */ -	err = ixgbe_set_num_queues(adapter); -	if (err) -		return err; +	ixgbe_set_num_queues(adapter); -	err = ixgbe_set_interrupt_capability(adapter); -	if (err) { -		e_dev_err("Unable to setup interrupt capabilities\n"); -		goto err_set_interrupt; -	} +	/* Set interrupt mode */ +	ixgbe_set_interrupt_capability(adapter);  	err = ixgbe_alloc_q_vectors(adapter);  	if (err) { @@ -902,7 +1137,6 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)  err_alloc_q_vectors:  	ixgbe_reset_interrupt_capability(adapter); -err_set_interrupt:  	return err;  } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index e242104ab47..3b6784cf134 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -516,7 +516,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)  			ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);  } -/* +/**   * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors   * @adapter: pointer to adapter struct   * @direction: 0 for Rx, 1 for Tx, -1 for other causes @@ -790,12 +790,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,  		total_packets += tx_buffer->gso_segs;  #ifdef CONFIG_IXGBE_PTP -		if (unlikely(tx_buffer->tx_flags & -			     IXGBE_TX_FLAGS_TSTAMP)) -			ixgbe_ptp_tx_hwtstamp(q_vector, -					      tx_buffer->skb); - +		if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP)) +			ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);  #endif +  		/* free the skb */  		dev_kfree_skb_any(tx_buffer->skb); @@ -995,7 +993,6 @@ out_no_update:  static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)  { -	int num_q_vectors;  	int i;  	if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) @@ -1004,12 +1001,7 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)  	/* always use CB2 mode, difference is masked in the CB driver */  	IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); -	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) -		num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; -	else -		num_q_vectors = 1; - -	for (i = 0; i < num_q_vectors; i++) { +	for (i = 0; i < adapter->num_q_vectors; i++) {  		adapter->q_vector[i]->cpu = -1;  		ixgbe_update_dca(adapter->q_vector[i]);  	} @@ -1399,8 +1391,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,  	ixgbe_rx_checksum(rx_ring, rx_desc, skb);  #ifdef CONFIG_IXGBE_PTP -	if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)) -		ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb); +	ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);  #endif  	if ((dev->features & NETIF_F_HW_VLAN_RX) && @@ -1526,8 +1517,8 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,  	 * 60 bytes if the skb->len is less than 60 for skb_pad.  	 */  	pull_len = skb_frag_size(frag); -	if (pull_len > 256) -		pull_len = ixgbe_get_headlen(va, pull_len); +	if (pull_len > IXGBE_RX_HDR_SIZE) +		pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);  	/* align pull length to size of long to optimize memcpy performance */  	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); @@ -1834,11 +1825,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,  static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)  {  	struct ixgbe_q_vector *q_vector; -	int q_vectors, v_idx; +	int v_idx;  	u32 mask; -	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; -  	/* Populate MSIX to EITR Select */  	if (adapter->num_vfs > 32) {  		u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; @@ -1849,7 +1838,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)  	 * Populate the IVAR table and set the ITR values to the  	 * corresponding register.  	 */ -	for (v_idx = 0; v_idx < q_vectors; v_idx++) { +	for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {  		struct ixgbe_ring *ring;  		q_vector = adapter->q_vector[v_idx]; @@ -2413,11 +2402,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)  static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)  {  	struct net_device *netdev = adapter->netdev; -	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;  	int vector, err;  	int ri = 0, ti = 0; -	for (vector = 0; vector < q_vectors; vector++) { +	for (vector = 0; vector < adapter->num_q_vectors; vector++) {  		struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];  		struct msix_entry *entry = &adapter->msix_entries[vector]; @@ -2572,30 +2560,28 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)  static void ixgbe_free_irq(struct ixgbe_adapter *adapter)  { -	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { -		int i, q_vectors; +	int vector; -		q_vectors = adapter->num_msix_vectors; -		i = q_vectors - 1; -		free_irq(adapter->msix_entries[i].vector, adapter); -		i--; +	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { +		free_irq(adapter->pdev->irq, adapter); +		return; +	} -		for (; i >= 0; i--) { -			/* free only the irqs that were actually requested */ -			if (!adapter->q_vector[i]->rx.ring && -			    !adapter->q_vector[i]->tx.ring) -				continue; +	for (vector = 0; vector < adapter->num_q_vectors; vector++) { +		struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; +		struct msix_entry *entry = &adapter->msix_entries[vector]; -			/* clear the affinity_mask in the IRQ descriptor */ -			irq_set_affinity_hint(adapter->msix_entries[i].vector, -					      NULL); +		/* free only the irqs that were actually requested */ +		if (!q_vector->rx.ring && !q_vector->tx.ring) +			continue; -			free_irq(adapter->msix_entries[i].vector, -				 adapter->q_vector[i]); -		} -	} else { -		free_irq(adapter->pdev->irq, adapter); +		/* clear the affinity_mask in the IRQ descriptor */ +		irq_set_affinity_hint(entry->vector, NULL); + +		free_irq(entry->vector, q_vector);  	} + +	free_irq(adapter->msix_entries[vector++].vector, adapter);  }  /** @@ -2619,9 +2605,12 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)  	}  	IXGBE_WRITE_FLUSH(&adapter->hw);  	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { -		int i; -		for (i = 0; i < adapter->num_msix_vectors; i++) -			synchronize_irq(adapter->msix_entries[i].vector); +		int vector; + +		for (vector = 0; vector < adapter->num_q_vectors; vector++) +			synchronize_irq(adapter->msix_entries[vector].vector); + +		synchronize_irq(adapter->msix_entries[vector++].vector);  	} else {  		synchronize_irq(adapter->pdev->irq);  	} @@ -2699,8 +2688,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,  		   32;		/* PTHRESH = 32 */  	/* reinitialize flowdirector state */ -	if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && -	    adapter->atr_sample_rate) { +	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {  		ring->atr_sample_rate = adapter->atr_sample_rate;  		ring->atr_count = 0;  		set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); @@ -2730,8 +2718,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,  static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)  {  	struct ixgbe_hw *hw = &adapter->hw; -	u32 rttdcs; -	u32 reg; +	u32 rttdcs, mtqc;  	u8 tcs = netdev_get_num_tc(adapter->netdev);  	if (hw->mac.type == ixgbe_mac_82598EB) @@ -2743,28 +2730,32 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)  	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);  	/* set transmit pool layout */ -	switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { -	case (IXGBE_FLAG_SRIOV_ENABLED): -		IXGBE_WRITE_REG(hw, IXGBE_MTQC, -				(IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); -		break; -	default: -		if (!tcs) -			reg = IXGBE_MTQC_64Q_1PB; -		else if (tcs <= 4) -			reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; +	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { +		mtqc = IXGBE_MTQC_VT_ENA; +		if (tcs > 4) +			mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; +		else if (tcs > 1) +			mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; +		else if (adapter->ring_feature[RING_F_RSS].indices == 4) +			mtqc |= IXGBE_MTQC_32VF;  		else -			reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; +			mtqc |= IXGBE_MTQC_64VF; +	} else { +		if (tcs > 4) +			mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; +		else if (tcs > 1) +			mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; +		else +			mtqc = IXGBE_MTQC_64Q_1PB; +	} -		IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); +	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); -		/* Enable Security TX Buffer IFG for multiple pb */ -		if (tcs) { -			reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); -			reg |= IXGBE_SECTX_DCB; -			IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); -		} -		break; +	/* Enable Security TX Buffer IFG for multiple pb */ +	if (tcs) { +		u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); +		sectx |= IXGBE_SECTX_DCB; +		IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);  	}  	/* re-enable the arbiter */ @@ -2858,40 +2849,34 @@ static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)  static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,  				   struct ixgbe_ring *rx_ring)  { +	struct ixgbe_hw *hw = &adapter->hw;  	u32 srrctl;  	u8 reg_idx = rx_ring->reg_idx; -	switch (adapter->hw.mac.type) { -	case ixgbe_mac_82598EB: { -		struct ixgbe_ring_feature *feature = adapter->ring_feature; -		const int mask = feature[RING_F_RSS].mask; -		reg_idx = reg_idx & mask; -	} -		break; -	case ixgbe_mac_82599EB: -	case ixgbe_mac_X540: -	default: -		break; -	} - -	srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx)); +	if (hw->mac.type == ixgbe_mac_82598EB) { +		u16 mask = adapter->ring_feature[RING_F_RSS].mask; -	srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; -	srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; -	if (adapter->num_vfs) -		srrctl |= IXGBE_SRRCTL_DROP_EN; +		/* +		 * if VMDq is not active we must program one srrctl register +		 * per RSS queue since we have enabled RDRXCTL.MVMEN +		 */ +		reg_idx &= mask; +	} -	srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & -		  IXGBE_SRRCTL_BSIZEHDR_MASK; +	/* configure header buffer length, needed for RSC */ +	srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; +	/* configure the packet buffer length */  #if PAGE_SIZE > IXGBE_MAX_RXBUFFER  	srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;  #else  	srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;  #endif + +	/* configure descriptor type */  	srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; -	IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl); +	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);  }  static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) @@ -2903,11 +2888,15 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)  	u32 mrqc = 0, reta = 0;  	u32 rxcsum;  	int i, j; -	u8 tcs = netdev_get_num_tc(adapter->netdev); -	int maxq = adapter->ring_feature[RING_F_RSS].indices; +	u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; -	if (tcs) -		maxq = min(maxq, adapter->num_tx_queues / tcs); +	/* +	 * Program table for at least 2 queues w/ SR-IOV so that VFs can +	 * make full use of any rings they may have.  We will use the +	 * PSRTYPE register to control how many rings we use within the PF. +	 */ +	if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2)) +		rss_i = 2;  	/* Fill out hash function seeds */  	for (i = 0; i < 10; i++) @@ -2915,7 +2904,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)  	/* Fill out redirection table */  	for (i = 0, j = 0; i < 128; i++, j++) { -		if (j == maxq) +		if (j == rss_i)  			j = 0;  		/* reta = 4-byte sliding window of  		 * 0x00..(indices-1)(indices-1)00..etc. */ @@ -2929,35 +2918,36 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)  	rxcsum |= IXGBE_RXCSUM_PCSD;  	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); -	if (adapter->hw.mac.type == ixgbe_mac_82598EB && -	    (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { -		mrqc = IXGBE_MRQC_RSSEN; +	if (adapter->hw.mac.type == ixgbe_mac_82598EB) { +		if (adapter->ring_feature[RING_F_RSS].mask) +			mrqc = IXGBE_MRQC_RSSEN;  	} else { -		int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED -					     | IXGBE_FLAG_SRIOV_ENABLED); +		u8 tcs = netdev_get_num_tc(adapter->netdev); -		switch (mask) { -		case (IXGBE_FLAG_RSS_ENABLED): -			if (!tcs) -				mrqc = IXGBE_MRQC_RSSEN; -			else if (tcs <= 4) -				mrqc = IXGBE_MRQC_RTRSS4TCEN; +		if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { +			if (tcs > 4) +				mrqc = IXGBE_MRQC_VMDQRT8TCEN;	/* 8 TCs */ +			else if (tcs > 1) +				mrqc = IXGBE_MRQC_VMDQRT4TCEN;	/* 4 TCs */ +			else if (adapter->ring_feature[RING_F_RSS].indices == 4) +				mrqc = IXGBE_MRQC_VMDQRSS32EN;  			else +				mrqc = IXGBE_MRQC_VMDQRSS64EN; +		} else { +			if (tcs > 4)  				mrqc = IXGBE_MRQC_RTRSS8TCEN; -			break; -		case (IXGBE_FLAG_SRIOV_ENABLED): -			mrqc = IXGBE_MRQC_VMDQEN; -			break; -		default: -			break; +			else if (tcs > 1) +				mrqc = IXGBE_MRQC_RTRSS4TCEN; +			else +				mrqc = IXGBE_MRQC_RSSEN;  		}  	}  	/* Perform hash on these packet types */ -	mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 -	      | IXGBE_MRQC_RSS_FIELD_IPV4_TCP -	      | IXGBE_MRQC_RSS_FIELD_IPV6 -	      | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; +	mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 | +		IXGBE_MRQC_RSS_FIELD_IPV4_TCP | +		IXGBE_MRQC_RSS_FIELD_IPV6 | +		IXGBE_MRQC_RSS_FIELD_IPV6_TCP;  	if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)  		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; @@ -3108,6 +3098,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,  static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)  {  	struct ixgbe_hw *hw = &adapter->hw; +	int rss_i = adapter->ring_feature[RING_F_RSS].indices;  	int p;  	/* PSRTYPE must be initialized in non 82598 adapters */ @@ -3120,58 +3111,69 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)  	if (hw->mac.type == ixgbe_mac_82598EB)  		return; -	if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) -		psrtype |= (adapter->num_rx_queues_per_pool << 29); +	if (rss_i > 3) +		psrtype |= 2 << 29; +	else if (rss_i > 1) +		psrtype |= 1 << 29;  	for (p = 0; p < adapter->num_rx_pools; p++) -		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p), +		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)),  				psrtype);  }  static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)  {  	struct ixgbe_hw *hw = &adapter->hw; -	u32 gcr_ext; -	u32 vt_reg_bits;  	u32 reg_offset, vf_shift; -	u32 vmdctl; +	u32 gcr_ext, vmdctl;  	int i;  	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))  		return;  	vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); -	vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN; -	vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT); -	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits); +	vmdctl |= IXGBE_VMD_CTL_VMDQ_EN; +	vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; +	vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT; +	vmdctl |= IXGBE_VT_CTL_REPLEN; +	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); -	vf_shift = adapter->num_vfs % 32; -	reg_offset = (adapter->num_vfs >= 32) ? 1 : 0; +	vf_shift = VMDQ_P(0) % 32; +	reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;  	/* Enable only the PF's pool for Tx/Rx */ -	IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); -	IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0); -	IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift)); -	IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0); +	IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift); +	IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); +	IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift); +	IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);  	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);  	/* Map PF MAC address in RAR Entry 0 to first pool following VFs */ -	hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs); +	hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));  	/*  	 * Set up VF register offsets for selected VT Mode,  	 * i.e. 32 or 64 VFs for SR-IOV  	 */ -	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); -	gcr_ext |= IXGBE_GCR_EXT_MSIX_EN; -	gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; +	switch (adapter->ring_feature[RING_F_VMDQ].mask) { +	case IXGBE_82599_VMDQ_8Q_MASK: +		gcr_ext = IXGBE_GCR_EXT_VT_MODE_16; +		break; +	case IXGBE_82599_VMDQ_4Q_MASK: +		gcr_ext = IXGBE_GCR_EXT_VT_MODE_32; +		break; +	default: +		gcr_ext = IXGBE_GCR_EXT_VT_MODE_64; +		break; +	} +  	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);  	/* enable Tx loopback for VF/PF communication */  	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); +  	/* Enable MAC Anti-Spoofing */ -	hw->mac.ops.set_mac_anti_spoofing(hw, -					   (adapter->num_vfs != 0), +	hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),  					  adapter->num_vfs);  	/* For VFs that have spoof checking turned off */  	for (i = 0; i < adapter->num_vfs; i++) { @@ -3307,10 +3309,9 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)  {  	struct ixgbe_adapter *adapter = netdev_priv(netdev);  	struct ixgbe_hw *hw = &adapter->hw; -	int pool_ndx = adapter->num_vfs;  	/* add VID to filter table */ -	hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true); +	hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true);  	set_bit(vid, adapter->active_vlans);  	return 0; @@ -3320,10 +3321,9 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)  {  	struct ixgbe_adapter *adapter = netdev_priv(netdev);  	struct ixgbe_hw *hw = &adapter->hw; -	int pool_ndx = adapter->num_vfs;  	/* remove VID from filter table */ -	hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); +	hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false);  	clear_bit(vid, adapter->active_vlans);  	return 0; @@ -3441,15 +3441,18 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev)  {  	struct ixgbe_adapter *adapter = netdev_priv(netdev);  	struct ixgbe_hw *hw = &adapter->hw; -	unsigned int vfn = adapter->num_vfs; -	unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS; +	unsigned int rar_entries = hw->mac.num_rar_entries - 1;  	int count = 0; +	/* In SR-IOV mode significantly less RAR entries are available */ +	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) +		rar_entries = IXGBE_MAX_PF_MACVLANS - 1; +  	/* return ENOMEM indicating insufficient memory for addresses */  	if (netdev_uc_count(netdev) > rar_entries)  		return -ENOMEM; -	if (!netdev_uc_empty(netdev) && rar_entries) { +	if (!netdev_uc_empty(netdev)) {  		struct netdev_hw_addr *ha;  		/* return error if we do not support writing to RAR table */  		if (!hw->mac.ops.set_rar) @@ -3459,7 +3462,7 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev)  			if (!rar_entries)  				break;  			hw->mac.ops.set_rar(hw, rar_entries--, ha->addr, -					    vfn, IXGBE_RAH_AV); +					    VMDQ_P(0), IXGBE_RAH_AV);  			count++;  		}  	} @@ -3533,12 +3536,14 @@ void ixgbe_set_rx_mode(struct net_device *netdev)  		vmolr |= IXGBE_VMOLR_ROPE;  	} -	if (adapter->num_vfs) { +	if (adapter->num_vfs)  		ixgbe_restore_vf_multicasts(adapter); -		vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) & + +	if (hw->mac.type != ixgbe_mac_82598EB) { +		vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &  			 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |  			   IXGBE_VMOLR_ROPE); -		IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr); +		IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);  	}  	/* This is useful for sniffing bad packets. */ @@ -3564,37 +3569,21 @@ void ixgbe_set_rx_mode(struct net_device *netdev)  static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)  {  	int q_idx; -	struct ixgbe_q_vector *q_vector; -	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - -	/* legacy and MSI only use one vector */ -	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) -		q_vectors = 1; -	for (q_idx = 0; q_idx < q_vectors; q_idx++) { -		q_vector = adapter->q_vector[q_idx]; -		napi_enable(&q_vector->napi); -	} +	for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) +		napi_enable(&adapter->q_vector[q_idx]->napi);  }  static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)  {  	int q_idx; -	struct ixgbe_q_vector *q_vector; -	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; -	/* legacy and MSI only use one vector */ -	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) -		q_vectors = 1; - -	for (q_idx = 0; q_idx < q_vectors; q_idx++) { -		q_vector = adapter->q_vector[q_idx]; -		napi_disable(&q_vector->napi); -	} +	for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) +		napi_disable(&adapter->q_vector[q_idx]->napi);  }  #ifdef CONFIG_IXGBE_DCB -/* +/**   * ixgbe_configure_dcb - Configure DCB hardware   * @adapter: ixgbe adapter struct   * @@ -3641,19 +3630,16 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)  	/* Enable RSS Hash per TC */  	if (hw->mac.type != ixgbe_mac_82598EB) { -		int i; -		u32 reg = 0; - -		for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { -			u8 msb = 0; -			u8 cnt = adapter->netdev->tc_to_txq[i].count; +		u32 msb = 0; +		u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1; -			while (cnt >>= 1) -				msb++; - -			reg |= msb << IXGBE_RQTC_SHIFT_TC(i); +		while (rss_i) { +			msb++; +			rss_i >>= 1;  		} -		IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg); + +		/* write msb to all 8 TCs in one write */ +		IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);  	}  }  #endif @@ -3661,11 +3647,11 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)  /* Additional bittime to account for IXGBE framing */  #define IXGBE_ETH_FRAMING 20 -/* +/**   * ixgbe_hpbthresh - calculate high water mark for flow control   *   * @adapter: board private structure to calculate for - * @pb - packet buffer to calculate + * @pb: packet buffer to calculate   */  static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)  { @@ -3679,18 +3665,12 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)  #ifdef IXGBE_FCOE  	/* FCoE traffic class uses FCOE jumbo frames */ -	if (dev->features & NETIF_F_FCOE_MTU) { -		int fcoe_pb = 0; - -#ifdef CONFIG_IXGBE_DCB -		fcoe_pb = netdev_get_prio_tc_map(dev, adapter->fcoe.up); +	if ((dev->features & NETIF_F_FCOE_MTU) && +	    (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && +	    (pb == ixgbe_fcoe_get_tc(adapter))) +		tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;  #endif -		if (fcoe_pb == pb && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) -			tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; -	} -#endif -  	/* Calculate delay value for device */  	switch (hw->mac.type) {  	case ixgbe_mac_X540: @@ -3725,11 +3705,11 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)  	return marker;  } -/* +/**   * ixgbe_lpbthresh - calculate low water mark for for flow control   *   * @adapter: board private structure to calculate for - * @pb - packet buffer to calculate + * @pb: packet buffer to calculate   */  static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)  { @@ -3830,12 +3810,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)  	ixgbe_set_rx_mode(adapter->netdev);  	ixgbe_restore_vlan(adapter); -#ifdef IXGBE_FCOE -	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) -		ixgbe_configure_fcoe(adapter); - -#endif /* IXGBE_FCOE */ -  	switch (hw->mac.type) {  	case ixgbe_mac_82599EB:  	case ixgbe_mac_X540: @@ -3865,6 +3839,11 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)  	ixgbe_configure_virtualization(adapter); +#ifdef IXGBE_FCOE +	/* configure FCoE L2 filters, redirection table, and Rx control */ +	ixgbe_configure_fcoe(adapter); + +#endif /* IXGBE_FCOE */  	ixgbe_configure_tx(adapter);  	ixgbe_configure_rx(adapter);  } @@ -3973,7 +3952,18 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)  	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {  		gpie &= ~IXGBE_GPIE_VTMODE_MASK; -		gpie |= IXGBE_GPIE_VTMODE_64; + +		switch (adapter->ring_feature[RING_F_VMDQ].mask) { +		case IXGBE_82599_VMDQ_8Q_MASK: +			gpie |= IXGBE_GPIE_VTMODE_16; +			break; +		case IXGBE_82599_VMDQ_4Q_MASK: +			gpie |= IXGBE_GPIE_VTMODE_32; +			break; +		default: +			gpie |= IXGBE_GPIE_VTMODE_64; +			break; +		}  	}  	/* Enable Thermal over heat sensor interrupt */ @@ -4131,8 +4121,11 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)  	clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);  	/* reprogram the RAR[0] in case user changed it. */ -	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, -			    IXGBE_RAH_AV); +	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV); + +	/* update SAN MAC vmdq pool selection */ +	if (hw->mac.san_mac_rar_index) +		hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));  }  /** @@ -4413,32 +4406,29 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)  	/* Set capability flags */  	rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); -	adapter->ring_feature[RING_F_RSS].indices = rss; -	adapter->flags |= IXGBE_FLAG_RSS_ENABLED; +	adapter->ring_feature[RING_F_RSS].limit = rss;  	switch (hw->mac.type) {  	case ixgbe_mac_82598EB:  		if (hw->device_id == IXGBE_DEV_ID_82598AT)  			adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; -		adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; +		adapter->max_q_vectors = MAX_Q_VECTORS_82598;  		break;  	case ixgbe_mac_X540:  		adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;  	case ixgbe_mac_82599EB: -		adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; +		adapter->max_q_vectors = MAX_Q_VECTORS_82599;  		adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;  		adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;  		if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)  			adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;  		/* Flow Director hash filters enabled */ -		adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;  		adapter->atr_sample_rate = 20; -		adapter->ring_feature[RING_F_FDIR].indices = +		adapter->ring_feature[RING_F_FDIR].limit =  							 IXGBE_MAX_FDIR_INDICES;  		adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;  #ifdef IXGBE_FCOE  		adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;  		adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; -		adapter->ring_feature[RING_F_FCOE].indices = 0;  #ifdef CONFIG_IXGBE_DCB  		/* Default traffic class to use for FCoE */  		adapter->fcoe.up = IXGBE_FCOE_DEFTC; @@ -4449,6 +4439,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)  		break;  	} +#ifdef IXGBE_FCOE +	/* FCoE support exists, always init the FCoE lock */ +	spin_lock_init(&adapter->fcoe.lock); + +#endif  	/* n-tuple support exists, always init our spinlock */  	spin_lock_init(&adapter->fdir_perfect_lock); @@ -4497,6 +4492,12 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)  	hw->fc.send_xon = true;  	hw->fc.disable_fc_autoneg = false; +#ifdef CONFIG_PCI_IOV +	/* assign number of SR-IOV VFs */ +	if (hw->mac.type != ixgbe_mac_82598EB) +		adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs; + +#endif  	/* enable itr by default in dynamic mode */  	adapter->rx_itr_setting = 1;  	adapter->tx_itr_setting = 1; @@ -4588,10 +4589,16 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)  		err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);  		if (!err)  			continue; +  		e_err(probe, "Allocation for Tx Queue %u failed\n", i); -		break; +		goto err_setup_tx;  	} +	return 0; +err_setup_tx: +	/* rewind the index freeing the rings as we go */ +	while (i--) +		ixgbe_free_tx_resources(adapter->tx_ring[i]);  	return err;  } @@ -4666,10 +4673,20 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)  		err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);  		if (!err)  			continue; +  		e_err(probe, "Allocation for Rx Queue %u failed\n", i); -		break; +		goto err_setup_rx;  	} +#ifdef IXGBE_FCOE +	err = ixgbe_setup_fcoe_ddp_resources(adapter); +	if (!err) +#endif +		return 0; +err_setup_rx: +	/* rewind the index freeing the rings as we go */ +	while (i--) +		ixgbe_free_rx_resources(adapter->rx_ring[i]);  	return err;  } @@ -4744,6 +4761,10 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)  {  	int i; +#ifdef IXGBE_FCOE +	ixgbe_free_fcoe_ddp_resources(adapter); + +#endif  	for (i = 0; i < adapter->num_rx_queues; i++)  		if (adapter->rx_ring[i]->desc)  			ixgbe_free_rx_resources(adapter->rx_ring[i]); @@ -4825,15 +4846,31 @@ static int ixgbe_open(struct net_device *netdev)  	if (err)  		goto err_req_irq; +	/* Notify the stack of the actual queue counts. */ +	err = netif_set_real_num_tx_queues(netdev, +					   adapter->num_rx_pools > 1 ? 1 : +					   adapter->num_tx_queues); +	if (err) +		goto err_set_queues; + + +	err = netif_set_real_num_rx_queues(netdev, +					   adapter->num_rx_pools > 1 ? 1 : +					   adapter->num_rx_queues); +	if (err) +		goto err_set_queues; +  	ixgbe_up_complete(adapter);  	return 0; +err_set_queues: +	ixgbe_free_irq(adapter);  err_req_irq: -err_setup_rx:  	ixgbe_free_all_rx_resources(adapter); -err_setup_tx: +err_setup_rx:  	ixgbe_free_all_tx_resources(adapter); +err_setup_tx:  	ixgbe_reset(adapter);  	return err; @@ -4891,23 +4928,19 @@ static int ixgbe_resume(struct pci_dev *pdev)  	pci_wake_from_d3(pdev, false); -	rtnl_lock(); -	err = ixgbe_init_interrupt_scheme(adapter); -	rtnl_unlock(); -	if (err) { -		e_dev_err("Cannot initialize interrupts for device\n"); -		return err; -	} -  	ixgbe_reset(adapter);  	IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); -	if (netif_running(netdev)) { +	rtnl_lock(); +	err = ixgbe_init_interrupt_scheme(adapter); +	if (!err && netif_running(netdev))  		err = ixgbe_open(netdev); -		if (err) -			return err; -	} + +	rtnl_unlock(); + +	if (err) +		return err;  	netif_device_attach(netdev); @@ -5043,11 +5076,6 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)  	u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;  	u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;  	u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; -#ifdef IXGBE_FCOE -	struct ixgbe_fcoe *fcoe = &adapter->fcoe; -	unsigned int cpu; -	u64 fcoe_noddp_counts_sum = 0, fcoe_noddp_ext_buff_counts_sum = 0; -#endif /* IXGBE_FCOE */  	if (test_bit(__IXGBE_DOWN, &adapter->state) ||  	    test_bit(__IXGBE_RESETTING, &adapter->state)) @@ -5178,17 +5206,19 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)  		hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);  		hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);  		/* Add up per cpu counters for total ddp aloc fail */ -		if (fcoe->pcpu_noddp && fcoe->pcpu_noddp_ext_buff) { +		if (adapter->fcoe.ddp_pool) { +			struct ixgbe_fcoe *fcoe = &adapter->fcoe; +			struct ixgbe_fcoe_ddp_pool *ddp_pool; +			unsigned int cpu; +			u64 noddp = 0, noddp_ext_buff = 0;  			for_each_possible_cpu(cpu) { -				fcoe_noddp_counts_sum += -					*per_cpu_ptr(fcoe->pcpu_noddp, cpu); -				fcoe_noddp_ext_buff_counts_sum += -					*per_cpu_ptr(fcoe-> -						pcpu_noddp_ext_buff, cpu); +				ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); +				noddp += ddp_pool->noddp; +				noddp_ext_buff += ddp_pool->noddp_ext_buff;  			} +			hwstats->fcoe_noddp = noddp; +			hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;  		} -		hwstats->fcoe_noddp = fcoe_noddp_counts_sum; -		hwstats->fcoe_noddp_ext_buff = fcoe_noddp_ext_buff_counts_sum;  #endif /* IXGBE_FCOE */  		break;  	default: @@ -5246,7 +5276,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)  /**   * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table - * @adapter - pointer to the device adapter structure + * @adapter: pointer to the device adapter structure   **/  static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)  { @@ -5282,7 +5312,7 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)  /**   * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts - * @adapter - pointer to the device adapter structure + * @adapter: pointer to the device adapter structure   *   * This function serves two purposes.  First it strobes the interrupt lines   * in order to make certain interrupts are occurring.  Secondly it sets the @@ -5316,7 +5346,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)  			(IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));  	} else {  		/* get one bit for every active tx/rx interrupt vector */ -		for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { +		for (i = 0; i < adapter->num_q_vectors; i++) {  			struct ixgbe_q_vector *qv = adapter->q_vector[i];  			if (qv->rx.ring || qv->tx.ring)  				eics |= ((u64)1 << i); @@ -5330,8 +5360,8 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)  /**   * ixgbe_watchdog_update_link - update the link status - * @adapter - pointer to the device adapter structure - * @link_speed - pointer to a u32 to store the link_speed + * @adapter: pointer to the device adapter structure + * @link_speed: pointer to a u32 to store the link_speed   **/  static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)  { @@ -5374,7 +5404,7 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)  /**   * ixgbe_watchdog_link_is_up - update netif_carrier status and   *                             print link up message - * @adapter - pointer to the device adapter structure + * @adapter: pointer to the device adapter structure   **/  static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)  { @@ -5429,12 +5459,15 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)  	netif_carrier_on(netdev);  	ixgbe_check_vf_rate_limit(adapter); + +	/* ping all the active vfs to let them know link has changed */ +	ixgbe_ping_all_vfs(adapter);  }  /**   * ixgbe_watchdog_link_is_down - update netif_carrier status and   *                               print link down message - * @adapter - pointer to the adapter structure + * @adapter: pointer to the adapter structure   **/  static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)  { @@ -5458,11 +5491,14 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)  	e_info(drv, "NIC Link is Down\n");  	netif_carrier_off(netdev); + +	/* ping all the active vfs to let them know link has changed */ +	ixgbe_ping_all_vfs(adapter);  }  /**   * ixgbe_watchdog_flush_tx - flush queues on link down - * @adapter - pointer to the device adapter structure + * @adapter: pointer to the device adapter structure   **/  static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)  { @@ -5511,7 +5547,7 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)  /**   * ixgbe_watchdog_subtask - check and bring link up - * @adapter - pointer to the device adapter structure + * @adapter: pointer to the device adapter structure   **/  static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)  { @@ -5535,7 +5571,7 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)  /**   * ixgbe_sfp_detection_subtask - poll for SFP+ cable - * @adapter - the ixgbe adapter structure + * @adapter: the ixgbe adapter structure   **/  static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)  { @@ -5602,7 +5638,7 @@ sfp_out:  /**   * ixgbe_sfp_link_config_subtask - set up link SFP after module install - * @adapter - the ixgbe adapter structure + * @adapter: the ixgbe adapter structure   **/  static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)  { @@ -6233,8 +6269,14 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)  	if (((protocol == htons(ETH_P_FCOE)) ||  	    (protocol == htons(ETH_P_FIP))) &&  	    (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { -		txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); -		txq += adapter->ring_feature[RING_F_FCOE].mask; +		struct ixgbe_ring_feature *f; + +		f = &adapter->ring_feature[RING_F_FCOE]; + +		while (txq >= f->indices) +			txq -= f->indices; +		txq += adapter->ring_feature[RING_F_FCOE].offset; +  		return txq;  	}  #endif @@ -6348,7 +6390,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,  #ifdef IXGBE_FCOE  	/* setup tx offload for FCoE */  	if ((protocol == __constant_htons(ETH_P_FCOE)) && -	    (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { +	    (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {  		tso = ixgbe_fso(tx_ring, first, &hdr_len);  		if (tso < 0)  			goto out_drop; @@ -6389,17 +6431,12 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,  	struct ixgbe_adapter *adapter = netdev_priv(netdev);  	struct ixgbe_ring *tx_ring; -	if (skb->len <= 0) { -		dev_kfree_skb_any(skb); -		return NETDEV_TX_OK; -	} -  	/*  	 * The minimum packet size for olinfo paylen is 17 so pad the skb  	 * in order to meet this minimum size requirement.  	 */ -	if (skb->len < 17) { -		if (skb_padto(skb, 17)) +	if (unlikely(skb->len < 17)) { +		if (skb_pad(skb, 17 - skb->len))  			return NETDEV_TX_OK;  		skb->len = 17;  	} @@ -6427,8 +6464,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)  	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);  	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); -	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, -			    IXGBE_RAH_AV); +	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);  	return 0;  } @@ -6485,12 +6521,15 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev)  {  	int err = 0;  	struct ixgbe_adapter *adapter = netdev_priv(dev); -	struct ixgbe_mac_info *mac = &adapter->hw.mac; +	struct ixgbe_hw *hw = &adapter->hw; -	if (is_valid_ether_addr(mac->san_addr)) { +	if (is_valid_ether_addr(hw->mac.san_addr)) {  		rtnl_lock(); -		err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); +		err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);  		rtnl_unlock(); + +		/* update SAN MAC vmdq pool selection */ +		hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));  	}  	return err;  } @@ -6533,11 +6572,8 @@ static void ixgbe_netpoll(struct net_device *netdev)  	adapter->flags |= IXGBE_FLAG_IN_NETPOLL;  	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { -		int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; -		for (i = 0; i < num_q_vectors; i++) { -			struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; -			ixgbe_msix_clean_rings(0, q_vector); -		} +		for (i = 0; i < adapter->num_q_vectors; i++) +			ixgbe_msix_clean_rings(0, adapter->q_vector[i]);  	} else {  		ixgbe_intr(adapter->pdev->irq, netdev);  	} @@ -6594,8 +6630,9 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,  }  #ifdef CONFIG_IXGBE_DCB -/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. - * #adapter: pointer to ixgbe_adapter +/** + * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. + * @adapter: pointer to ixgbe_adapter   * @tc: number of traffic classes currently enabled   *   * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm @@ -6630,8 +6667,33 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)  	return;  } -/* ixgbe_setup_tc - routine to configure net_device for multiple traffic - * classes. +/** + * ixgbe_set_prio_tc_map - Configure netdev prio tc map + * @adapter: Pointer to adapter struct + * + * Populate the netdev user priority to tc map + */ +static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) +{ +	struct net_device *dev = adapter->netdev; +	struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; +	struct ieee_ets *ets = adapter->ixgbe_ieee_ets; +	u8 prio; + +	for (prio = 0; prio < MAX_USER_PRIORITY; prio++) { +		u8 tc = 0; + +		if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) +			tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio); +		else if (ets) +			tc = ets->prio_tc[prio]; + +		netdev_set_prio_tc_map(dev, prio, tc); +	} +} + +/** + * ixgbe_setup_tc - configure net_device for multiple traffic classes   *   * @netdev: net device to configure   * @tc: number of traffic classes to enable @@ -6641,17 +6703,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)  	struct ixgbe_adapter *adapter = netdev_priv(dev);  	struct ixgbe_hw *hw = &adapter->hw; -	/* Multiple traffic classes requires multiple queues */ -	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { -		e_err(drv, "Enable failed, needs MSI-X\n"); -		return -EINVAL; -	} - -	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { -		e_err(drv, "Enable failed, SR-IOV enabled\n"); -		return -EINVAL; -	} -  	/* Hardware supports up to 8 traffic classes */  	if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||  	    (hw->mac.type == ixgbe_mac_82598EB && @@ -6668,8 +6719,9 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)  	if (tc) {  		netdev_set_num_tc(dev, tc); +		ixgbe_set_prio_tc_map(adapter); +  		adapter->flags |= IXGBE_FLAG_DCB_ENABLED; -		adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;  		if (adapter->hw.mac.type == ixgbe_mac_82598EB) {  			adapter->last_lfc_mode = adapter->hw.fc.requested_mode; @@ -6677,11 +6729,11 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)  		}  	} else {  		netdev_reset_tc(dev); +  		if (adapter->hw.mac.type == ixgbe_mac_82598EB)  			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;  		adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; -		adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;  		adapter->temp_dcb_cfg.pfc_mode_enable = false;  		adapter->dcb_cfg.pfc_mode_enable = false; @@ -6711,10 +6763,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,  {  	struct ixgbe_adapter *adapter = netdev_priv(netdev); -	/* return error if RXHASH is being enabled when RSS is not supported */ -	if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) -		features &= ~NETIF_F_RXHASH; -  	/* If Rx checksum is disabled, then RSC/LRO should also be disabled */  	if (!(features & NETIF_F_RXCSUM))  		features &= ~NETIF_F_LRO; @@ -6754,20 +6802,40 @@ static int ixgbe_set_features(struct net_device *netdev,  	 * Check if Flow Director n-tuple support was enabled or disabled.  If  	 * the state changed, we need to reset.  	 */ -	if (!(features & NETIF_F_NTUPLE)) { -		if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { -			/* turn off Flow Director, set ATR and reset */ -			if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && -			    !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) -				adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; -			need_reset = true; -		} -		adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; -	} else if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { +	switch (features & NETIF_F_NTUPLE) { +	case NETIF_F_NTUPLE:  		/* turn off ATR, enable perfect filters and reset */ +		if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) +			need_reset = true; +  		adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;  		adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; -		need_reset = true; +		break; +	default: +		/* turn off perfect filters, enable ATR and reset */ +		if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) +			need_reset = true; + +		adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + +		/* We cannot enable ATR if SR-IOV is enabled */ +		if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) +			break; + +		/* We cannot enable ATR if we have 2 or more traffic classes */ +		if (netdev_get_num_tc(netdev) > 1) +			break; + +		/* We cannot enable ATR if RSS is disabled */ +		if (adapter->ring_feature[RING_F_RSS].limit <= 1) +			break; + +		/* A sample rate of 0 indicates ATR disabled */ +		if (!adapter->atr_sample_rate) +			break; + +		adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; +		break;  	}  	if (features & NETIF_F_HW_VLAN_RX) @@ -6791,7 +6859,10 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,  			     u16 flags)  {  	struct ixgbe_adapter *adapter = netdev_priv(dev); -	int err = -EOPNOTSUPP; +	int err; + +	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) +		return -EOPNOTSUPP;  	if (ndm->ndm_state & NUD_PERMANENT) {  		pr_info("%s: FDB only supports static addresses\n", @@ -6799,13 +6870,17 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,  		return -EINVAL;  	} -	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { -		if (is_unicast_ether_addr(addr)) +	if (is_unicast_ether_addr(addr)) { +		u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS; + +		if (netdev_uc_count(dev) < rar_uc_entries)  			err = dev_uc_add_excl(dev, addr); -		else if (is_multicast_ether_addr(addr)) -			err = dev_mc_add_excl(dev, addr);  		else -			err = -EINVAL; +			err = -ENOMEM; +	} else if (is_multicast_ether_addr(addr)) { +		err = dev_mc_add_excl(dev, addr); +	} else { +		err = -EINVAL;  	}  	/* Only return duplicate errors if NLM_F_EXCL is set */ @@ -6894,26 +6969,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {  	.ndo_fdb_dump		= ixgbe_ndo_fdb_dump,  }; -static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, -				     const struct ixgbe_info *ii) -{ -#ifdef CONFIG_PCI_IOV -	struct ixgbe_hw *hw = &adapter->hw; - -	if (hw->mac.type == ixgbe_mac_82598EB) -		return; - -	/* The 82599 supports up to 64 VFs per physical function -	 * but this implementation limits allocation to 63 so that -	 * basic networking resources are still available to the -	 * physical function.  If the user requests greater thn -	 * 63 VFs then it is an error - reset to default of zero. -	 */ -	adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs; -	ixgbe_enable_sriov(adapter, ii); -#endif /* CONFIG_PCI_IOV */ -} -  /**   * ixgbe_wol_supported - Check whether device supports WoL   * @hw: hw specific details @@ -6940,6 +6995,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,  			if (hw->bus.func != 0)  				break;  		case IXGBE_SUBDEV_ID_82599_SFP: +		case IXGBE_SUBDEV_ID_82599_RNDC:  			is_wol_supported = 1;  			break;  		} @@ -6987,6 +7043,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,  	int i, err, pci_using_dac;  	u8 part_str[IXGBE_PBANUM_LENGTH];  	unsigned int indices = num_possible_cpus(); +	unsigned int dcb_max = 0;  #ifdef IXGBE_FCOE  	u16 device_caps;  #endif @@ -7036,7 +7093,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,  	pci_save_state(pdev);  #ifdef CONFIG_IXGBE_DCB -	indices *= MAX_TRAFFIC_CLASS; +	if (ii->mac == ixgbe_mac_82598EB) +		dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS, +				IXGBE_MAX_RSS_INDICES); +	else +		dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS, +				IXGBE_MAX_FDIR_INDICES);  #endif  	if (ii->mac == ixgbe_mac_82598EB) @@ -7048,6 +7110,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,  	indices += min_t(unsigned int, num_possible_cpus(),  			 IXGBE_MAX_FCOE_INDICES);  #endif +	indices = max_t(unsigned int, dcb_max, indices);  	netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);  	if (!netdev) {  		err = -ENOMEM; @@ -7154,8 +7217,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,  		goto err_sw_init;  	} -	ixgbe_probe_vf(adapter, ii); +#ifdef CONFIG_PCI_IOV +	ixgbe_enable_sriov(adapter, ii); +#endif  	netdev->features = NETIF_F_SG |  			   NETIF_F_IP_CSUM |  			   NETIF_F_IPV6_CSUM | @@ -7191,10 +7256,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,  	netdev->priv_flags |= IFF_UNICAST_FLT;  	netdev->priv_flags |= IFF_SUPP_NOFCS; -	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) -		adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED | -				    IXGBE_FLAG_DCB_ENABLED); -  #ifdef CONFIG_IXGBE_DCB  	netdev->dcbnl_ops = &dcbnl_ops;  #endif @@ -7206,11 +7267,15 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,  			if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)  				adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;  		} -	} -	if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { -		netdev->vlan_features |= NETIF_F_FCOE_CRC; -		netdev->vlan_features |= NETIF_F_FSO; -		netdev->vlan_features |= NETIF_F_FCOE_MTU; + +		adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE; + +		netdev->features |= NETIF_F_FSO | +				    NETIF_F_FCOE_CRC; + +		netdev->vlan_features |= NETIF_F_FSO | +					 NETIF_F_FCOE_CRC | +					 NETIF_F_FCOE_MTU;  	}  #endif /* IXGBE_FCOE */  	if (pci_using_dac) { @@ -7249,11 +7314,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,  	if (err)  		goto err_sw_init; -	if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { -		netdev->hw_features &= ~NETIF_F_RXHASH; -		netdev->features &= ~NETIF_F_RXHASH; -	} -  	/* WOL not supported for all devices */  	adapter->wol = 0;  	hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); @@ -7364,8 +7424,7 @@ err_register:  	ixgbe_release_hw_control(adapter);  	ixgbe_clear_interrupt_scheme(adapter);  err_sw_init: -	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) -		ixgbe_disable_sriov(adapter); +	ixgbe_disable_sriov(adapter);  	adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;  	iounmap(hw->hw_addr);  err_ioremap: @@ -7412,25 +7471,13 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)  	ixgbe_sysfs_exit(adapter);  #endif /* CONFIG_IXGBE_HWMON */ -#ifdef IXGBE_FCOE -	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) -		ixgbe_cleanup_fcoe(adapter); - -#endif /* IXGBE_FCOE */ -  	/* remove the added san mac */  	ixgbe_del_sanmac_netdev(netdev);  	if (netdev->reg_state == NETREG_REGISTERED)  		unregister_netdev(netdev); -	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { -		if (!(ixgbe_check_vf_assignment(adapter))) -			ixgbe_disable_sriov(adapter); -		else -			e_dev_warn("Unloading driver while VFs are assigned " -				   "- VFs will not be deallocated\n"); -	} +	ixgbe_disable_sriov(adapter);  	ixgbe_clear_interrupt_scheme(adapter); @@ -7521,11 +7568,11 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,  		}  		/* Find the pci device of the offending VF */ -		vfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL); +		vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);  		while (vfdev) {  			if (vfdev->devfn == (req_id & 0xFF))  				break; -			vfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, +			vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,  					       device_id, vfdev);  		}  		/* diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 24117709d6a..71659edf81a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -907,6 +907,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)  		  * 8   SFP_act_lmt_DA_CORE1 - 82599-specific  		  * 9   SFP_1g_cu_CORE0 - 82599-specific  		  * 10  SFP_1g_cu_CORE1 - 82599-specific +		  * 11  SFP_1g_sx_CORE0 - 82599-specific +		  * 12  SFP_1g_sx_CORE1 - 82599-specific  		  */  		if (hw->mac.type == ixgbe_mac_82598EB) {  			if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) @@ -957,6 +959,13 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)  				else  					hw->phy.sfp_type =  						ixgbe_sfp_type_1g_cu_core1; +			} else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) { +				if (hw->bus.lan_id == 0) +					hw->phy.sfp_type = +						ixgbe_sfp_type_1g_sx_core0; +				else +					hw->phy.sfp_type = +						ixgbe_sfp_type_1g_sx_core1;  			} else {  				hw->phy.sfp_type = ixgbe_sfp_type_unknown;  			} @@ -1049,7 +1058,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)  		/* Verify supported 1G SFP modules */  		if (comp_codes_10g == 0 &&  		    !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || -		      hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) { +		      hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || +		      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || +		      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {  			hw->phy.type = ixgbe_phy_sfp_unsupported;  			status = IXGBE_ERR_SFP_NOT_SUPPORTED;  			goto out; @@ -1064,7 +1075,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)  		hw->mac.ops.get_device_caps(hw, &enforce_sfp);  		if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&  		    !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) || -		      (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) { +		      (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) || +		      (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0) || +		      (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1))) {  			/* Make sure we're a supported PHY type */  			if (hw->phy.type == ixgbe_phy_sfp_intel) {  				status = 0; @@ -1128,10 +1141,12 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,  	 * SR modules  	 */  	if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 || -	    sfp_type == ixgbe_sfp_type_1g_cu_core0) +	    sfp_type == ixgbe_sfp_type_1g_cu_core0 || +	    sfp_type == ixgbe_sfp_type_1g_sx_core0)  		sfp_type = ixgbe_sfp_type_srlr_core0;  	else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 || -	         sfp_type == ixgbe_sfp_type_1g_cu_core1) +		 sfp_type == ixgbe_sfp_type_1g_cu_core1 || +		 sfp_type == ixgbe_sfp_type_1g_sx_core1)  		sfp_type = ixgbe_sfp_type_srlr_core1;  	/* Read offset to PHY init contents */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index dcebd128bec..3456d561714 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -26,6 +26,7 @@  *******************************************************************************/  #include "ixgbe.h"  #include <linux/export.h> +#include <linux/ptp_classify.h>  /*   * The 82599 and the X540 do not have true 64bit nanosecond scale @@ -100,9 +101,13 @@  #define NSECS_PER_SEC 1000000000ULL  #endif +static struct sock_filter ptp_filter[] = { +	PTP_FILTER +}; +  /**   * ixgbe_ptp_read - read raw cycle counter (to be used by time counter) - * @cc - the cyclecounter structure + * @cc: the cyclecounter structure   *   * this function reads the cyclecounter registers and is called by the   * cyclecounter structure used to construct a ns counter from the @@ -123,8 +128,8 @@ static cycle_t ixgbe_ptp_read(const struct cyclecounter *cc)  /**   * ixgbe_ptp_adjfreq - * @ptp - the ptp clock structure - * @ppb - parts per billion adjustment from base + * @ptp: the ptp clock structure + * @ppb: parts per billion adjustment from base   *   * adjust the frequency of the ptp cycle counter by the   * indicated ppb from the base frequency. @@ -170,8 +175,8 @@ static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)  /**   * ixgbe_ptp_adjtime - * @ptp - the ptp clock structure - * @delta - offset to adjust the cycle counter by + * @ptp: the ptp clock structure + * @delta: offset to adjust the cycle counter by   *   * adjust the timer by resetting the timecounter structure.   */ @@ -198,8 +203,8 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)  /**   * ixgbe_ptp_gettime - * @ptp - the ptp clock structure - * @ts - timespec structure to hold the current time value + * @ptp: the ptp clock structure + * @ts: timespec structure to hold the current time value   *   * read the timecounter and return the correct value on ns,   * after converting it into a struct timespec. @@ -224,8 +229,8 @@ static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)  /**   * ixgbe_ptp_settime - * @ptp - the ptp clock structure - * @ts - the timespec containing the new time for the cycle counter + * @ptp: the ptp clock structure + * @ts: the timespec containing the new time for the cycle counter   *   * reset the timecounter to use a new base value instead of the kernel   * wall timer value. @@ -251,9 +256,9 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,  /**   * ixgbe_ptp_enable - * @ptp - the ptp clock structure - * @rq - the requested feature to change - * @on - whether to enable or disable the feature + * @ptp: the ptp clock structure + * @rq: the requested feature to change + * @on: whether to enable or disable the feature   *   * enable (or disable) ancillary features of the phc subsystem.   * our driver only supports the PPS feature on the X540 @@ -289,8 +294,8 @@ static int ixgbe_ptp_enable(struct ptp_clock_info *ptp,  /**   * ixgbe_ptp_check_pps_event - * @adapter - the private adapter structure - * @eicr - the interrupt cause register value + * @adapter: the private adapter structure + * @eicr: the interrupt cause register value   *   * This function is called by the interrupt routine when checking for   * interrupts. It will check and handle a pps event. @@ -307,20 +312,21 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr)  	    !(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED))  		return; -	switch (hw->mac.type) { -	case ixgbe_mac_X540: -		if (eicr & IXGBE_EICR_TIMESYNC) +	if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) { +		switch (hw->mac.type) { +		case ixgbe_mac_X540:  			ptp_clock_event(adapter->ptp_clock, &event); -		break; -	default: -		break; +			break; +		default: +			break; +		}  	}  }  /**   * ixgbe_ptp_enable_sdp - * @hw - the hardware private structure - * @shift - the clock shift for calculating nanoseconds + * @hw: the hardware private structure + * @shift: the clock shift for calculating nanoseconds   *   * this function enables the clock out feature on the sdp0 for the   * X540 device. It will create a 1second periodic output that can be @@ -393,7 +399,7 @@ static void ixgbe_ptp_enable_sdp(struct ixgbe_hw *hw, int shift)  /**   * ixgbe_ptp_disable_sdp - * @hw - the private hardware structure + * @hw: the private hardware structure   *   * this function disables the auxiliary SDP clock out feature   */ @@ -425,6 +431,68 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)  }  /** + * ixgbe_ptp_match - determine if this skb matches a ptp packet + * @skb: pointer to the skb + * @hwtstamp: pointer to the hwtstamp_config to check + * + * Determine whether the skb should have been timestamped, assuming the + * hwtstamp was set via the hwtstamp ioctl. Returns non-zero when the packet + * should have a timestamp waiting in the registers, and 0 otherwise. + * + * V1 packets have to check the version type to determine whether they are + * correct. However, we can't directly access the data because it might be + * fragmented in the SKB, in paged memory. In order to work around this, we + * use skb_copy_bits which will properly copy the data whether it is in the + * paged memory fragments or not. We have to copy the IP header as well as the + * message type. + */ +static int ixgbe_ptp_match(struct sk_buff *skb, int rx_filter) +{ +	struct iphdr iph; +	u8 msgtype; +	unsigned int type, offset; + +	if (rx_filter == HWTSTAMP_FILTER_NONE) +		return 0; + +	type = sk_run_filter(skb, ptp_filter); + +	if (likely(rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT)) +		return type & PTP_CLASS_V2; + +	/* For the remaining cases actually check message type */ +	switch (type) { +	case PTP_CLASS_V1_IPV4: +		skb_copy_bits(skb, OFF_IHL, &iph, sizeof(iph)); +		offset = ETH_HLEN + (iph.ihl << 2) + UDP_HLEN + OFF_PTP_CONTROL; +		break; +	case PTP_CLASS_V1_IPV6: +		offset = OFF_PTP6 + OFF_PTP_CONTROL; +		break; +	default: +		/* other cases invalid or handled above */ +		return 0; +	} + +	/* Make sure our buffer is long enough */ +	if (skb->len < offset) +		return 0; + +	skb_copy_bits(skb, offset, &msgtype, sizeof(msgtype)); + +	switch (rx_filter) { +	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: +		return (msgtype == IXGBE_RXMTRL_V1_SYNC_MSG); +		break; +	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: +		return (msgtype == IXGBE_RXMTRL_V1_DELAY_REQ_MSG); +		break; +	default: +		return 0; +	} +} + +/**   * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp   * @q_vector: structure containing interrupt and ring information   * @skb: particular skb to send timestamp with @@ -473,6 +541,7 @@ void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,  /**   * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp   * @q_vector: structure containing interrupt and ring information + * @rx_desc: the rx descriptor   * @skb: particular skb to send timestamp with   *   * if the timestamp is valid, we convert it into the timecounter ns @@ -480,6 +549,7 @@ void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,   * is passed up the network stack   */  void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, +			   union ixgbe_adv_rx_desc *rx_desc,  			   struct sk_buff *skb)  {  	struct ixgbe_adapter *adapter; @@ -497,21 +567,33 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,  	hw = &adapter->hw;  	tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + +	/* Check if we have a valid timestamp and make sure the skb should +	 * have been timestamped */ +	if (likely(!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID) || +		   !ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter))) +		return; + +	/* +	 * Always read the registers, in order to clear a possible fault +	 * because of stagnant RX timestamp values for a packet that never +	 * reached the queue. +	 */  	regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);  	regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;  	/* -	 * If this bit is set, then the RX registers contain the time stamp. No -	 * other packet will be time stamped until we read these registers, so -	 * read the registers to make them available again. Because only one -	 * packet can be time stamped at a time, we know that the register -	 * values must belong to this one here and therefore we don't need to -	 * compare any of the additional attributes stored for it. +	 * If the timestamp bit is set in the packet's descriptor, we know the +	 * timestamp belongs to this packet. No other packet can be +	 * timestamped until the registers for timestamping have been read. +	 * Therefor only one packet with this bit can be in the queue at a +	 * time, and the rx timestamp values that were in the registers belong +	 * to this packet.  	 *  	 * If nothing went wrong, then it should have a skb_shared_tx that we  	 * can turn into a skb_shared_hwtstamps.  	 */ -	if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) +	if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))  		return;  	spin_lock_irqsave(&adapter->tmreg_lock, flags); @@ -539,6 +621,11 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,   * type has to be specified. Matching the kind of event packet is   * not supported, with the exception of "all V2 events regardless of   * level 2 or 4". + * + * Since hardware always timestamps Path delay packets when timestamping V2 + * packets, regardless of the type specified in the register, only use V2 + * Event mode. This more accurately tells the user what the hardware is going + * to do anyways.   */  int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,  			     struct ifreq *ifr, int cmd) @@ -582,41 +669,30 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,  		tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG;  		is_l4 = true;  		break; +	case HWTSTAMP_FILTER_PTP_V2_EVENT: +	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: +	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:  	case HWTSTAMP_FILTER_PTP_V2_SYNC:  	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:  	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: -		tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2; -		tsync_rx_mtrl = IXGBE_RXMTRL_V2_SYNC_MSG; -		is_l2 = true; -		is_l4 = true; -		config.rx_filter = HWTSTAMP_FILTER_SOME; -		break;  	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:  	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:  	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: -		tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2; -		tsync_rx_mtrl = IXGBE_RXMTRL_V2_DELAY_REQ_MSG; -		is_l2 = true; -		is_l4 = true; -		config.rx_filter = HWTSTAMP_FILTER_SOME; -		break; -	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: -	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: -	case HWTSTAMP_FILTER_PTP_V2_EVENT:  		tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; -		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;  		is_l2 = true;  		is_l4 = true; +		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;  		break;  	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:  	case HWTSTAMP_FILTER_ALL:  	default:  		/* -		 * register RXMTRL must be set, therefore it is not -		 * possible to time stamp both V1 Sync and Delay_Req messages -		 * and hardware does not support timestamping all packets -		 * => return error +		 * register RXMTRL must be set in order to do V1 packets, +		 * therefore it is not possible to time stamp both V1 Sync and +		 * Delay_Req messages and hardware does not support +		 * timestamping all packets => return error  		 */ +		config.rx_filter = HWTSTAMP_FILTER_NONE;  		return -ERANGE;  	} @@ -626,6 +702,9 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,  		return 0;  	} +	/* Store filter value for later use */ +	adapter->rx_hwtstamp_filter = config.rx_filter; +  	/* define ethertype filter for timestamped packets */  	if (is_l2)  		IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), @@ -690,7 +769,7 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,  /**   * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw - * @adapter - pointer to the adapter structure + * @adapter: pointer to the adapter structure   *   * this function initializes the timecounter and cyclecounter   * structures for use in generated a ns counter from the arbitrary @@ -826,7 +905,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)  /**   * ixgbe_ptp_init - * @adapter - the ixgbe private adapter structure + * @adapter: the ixgbe private adapter structure   *   * This function performs the required steps for enabling ptp   * support. If ptp support has already been loaded it simply calls the @@ -870,6 +949,10 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)  		return;  	} +	/* initialize the ptp filter */ +	if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) +		e_dev_warn("ptp_filter_init failed\n"); +  	spin_lock_init(&adapter->tmreg_lock);  	ixgbe_ptp_start_cyclecounter(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 2d971d18696..4fea8716ab6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -44,50 +44,15 @@  #include "ixgbe_sriov.h"  #ifdef CONFIG_PCI_IOV -static int ixgbe_find_enabled_vfs(struct ixgbe_adapter *adapter) -{ -	struct pci_dev *pdev = adapter->pdev; -	struct pci_dev *pvfdev; -	u16 vf_devfn = 0; -	int device_id; -	int vfs_found = 0; - -	switch (adapter->hw.mac.type) { -	case ixgbe_mac_82599EB: -		device_id = IXGBE_DEV_ID_82599_VF; -		break; -	case ixgbe_mac_X540: -		device_id = IXGBE_DEV_ID_X540_VF; -		break; -	default: -		device_id = 0; -		break; -	} - -	vf_devfn = pdev->devfn + 0x80; -	pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL); -	while (pvfdev) { -		if (pvfdev->devfn == vf_devfn && -		    (pvfdev->bus->number >= pdev->bus->number)) -			vfs_found++; -		vf_devfn += 2; -		pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, -					device_id, pvfdev); -	} - -	return vfs_found; -} -  void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,  			 const struct ixgbe_info *ii)  {  	struct ixgbe_hw *hw = &adapter->hw; -	int err = 0;  	int num_vf_macvlans, i;  	struct vf_macvlans *mv_list;  	int pre_existing_vfs = 0; -	pre_existing_vfs = ixgbe_find_enabled_vfs(adapter); +	pre_existing_vfs = pci_num_vf(adapter->pdev);  	if (!pre_existing_vfs && !adapter->num_vfs)  		return; @@ -106,16 +71,33 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,  			 "enabled for this device - Please reload all "  			 "VF drivers to avoid spoofed packet errors\n");  	} else { +		int err; +		/* +		 * The 82599 supports up to 64 VFs per physical function +		 * but this implementation limits allocation to 63 so that +		 * basic networking resources are still available to the +		 * physical function.  If the user requests greater thn +		 * 63 VFs then it is an error - reset to default of zero. +		 */ +		adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63); +  		err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); +		if (err) { +			e_err(probe, "Failed to enable PCI sriov: %d\n", err); +			adapter->num_vfs = 0; +			return; +		}  	} -	if (err) { -		e_err(probe, "Failed to enable PCI sriov: %d\n", err); -		goto err_novfs; -	} -	adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; +	adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;  	e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs); +	/* Enable VMDq flag so device will be set in VM mode */ +	adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED; +	if (!adapter->ring_feature[RING_F_VMDQ].limit) +		adapter->ring_feature[RING_F_VMDQ].limit = 1; +	adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs; +  	num_vf_macvlans = hw->mac.num_rar_entries -  	(IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs); @@ -146,12 +128,39 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,  		 * and memory allocated set up the mailbox parameters  		 */  		ixgbe_init_mbx_params_pf(hw); -		memcpy(&hw->mbx.ops, ii->mbx_ops, -		       sizeof(hw->mbx.ops)); +		memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops)); + +		/* limit trafffic classes based on VFs enabled */ +		if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && +		    (adapter->num_vfs < 16)) { +			adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; +			adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; +		} else if (adapter->num_vfs < 32) { +			adapter->dcb_cfg.num_tcs.pg_tcs = 4; +			adapter->dcb_cfg.num_tcs.pfc_tcs = 4; +		} else { +			adapter->dcb_cfg.num_tcs.pg_tcs = 1; +			adapter->dcb_cfg.num_tcs.pfc_tcs = 1; +		} + +		/* We do not support RSS w/ SR-IOV */ +		adapter->ring_feature[RING_F_RSS].limit = 1;  		/* Disable RSC when in SR-IOV mode */  		adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |  				     IXGBE_FLAG2_RSC_ENABLED); + +#ifdef IXGBE_FCOE +		/* +		 * When SR-IOV is enabled 82599 cannot support jumbo frames +		 * so we must disable FCoE because we cannot support FCoE MTU. +		 */ +		if (adapter->hw.mac.type == ixgbe_mac_82599EB) +			adapter->flags &= ~(IXGBE_FLAG_FCOE_ENABLED | +					    IXGBE_FLAG_FCOE_CAPABLE); +#endif + +		/* enable spoof checking for all VFs */  		for (i = 0; i < adapter->num_vfs; i++)  			adapter->vfinfo[i].spoofchk_enabled = true;  		return; @@ -160,31 +169,80 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,  	/* Oh oh */  	e_err(probe, "Unable to allocate memory for VF Data Storage - "  	      "SRIOV disabled\n"); -	pci_disable_sriov(adapter->pdev); +	ixgbe_disable_sriov(adapter); +} -err_novfs: -	adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; -	adapter->num_vfs = 0; +static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter) +{ +	struct pci_dev *pdev = adapter->pdev; +	struct pci_dev *vfdev; +	int dev_id; + +	switch (adapter->hw.mac.type) { +	case ixgbe_mac_82599EB: +		dev_id = IXGBE_DEV_ID_82599_VF; +		break; +	case ixgbe_mac_X540: +		dev_id = IXGBE_DEV_ID_X540_VF; +		break; +	default: +		return false; +	} + +	/* loop through all the VFs to see if we own any that are assigned */ +	vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL); +	while (vfdev) { +		/* if we don't own it we don't care */ +		if (vfdev->is_virtfn && vfdev->physfn == pdev) { +			/* if it is assigned we cannot release it */ +			if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) +				return true; +		} + +		vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev); +	} + +	return false;  } -#endif /* #ifdef CONFIG_PCI_IOV */ +#endif /* #ifdef CONFIG_PCI_IOV */  void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)  {  	struct ixgbe_hw *hw = &adapter->hw; -	u32 gcr;  	u32 gpie;  	u32 vmdctl; -	int i; + +	/* set num VFs to 0 to prevent access to vfinfo */ +	adapter->num_vfs = 0; + +	/* free VF control structures */ +	kfree(adapter->vfinfo); +	adapter->vfinfo = NULL; + +	/* free macvlan list */ +	kfree(adapter->mv_list); +	adapter->mv_list = NULL; + +	/* if SR-IOV is already disabled then there is nothing to do */ +	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) +		return;  #ifdef CONFIG_PCI_IOV +	/* +	 * If our VFs are assigned we cannot shut down SR-IOV +	 * without causing issues, so just leave the hardware +	 * available but disabled +	 */ +	if (ixgbe_vfs_are_assigned(adapter)) { +		e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n"); +		return; +	}  	/* disable iov and allow time for transactions to clear */  	pci_disable_sriov(adapter->pdev);  #endif  	/* turn off device IOV mode */ -	gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); -	gcr &= ~(IXGBE_GCR_EXT_SRIOV); -	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr); +	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0);  	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);  	gpie &= ~IXGBE_GPIE_VTMODE_MASK;  	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); @@ -195,19 +253,14 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)  	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);  	IXGBE_WRITE_FLUSH(hw); +	/* Disable VMDq flag so device will be set in VM mode */ +	if (adapter->ring_feature[RING_F_VMDQ].limit == 1) +		adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; +	adapter->ring_feature[RING_F_VMDQ].offset = 0; +  	/* take a breather then clean up driver data */  	msleep(100); -	/* Release reference to VF devices */ -	for (i = 0; i < adapter->num_vfs; i++) { -		if (adapter->vfinfo[i].vfdev) -			pci_dev_put(adapter->vfinfo[i].vfdev); -	} -	kfree(adapter->vfinfo); -	kfree(adapter->mv_list); -	adapter->vfinfo = NULL; - -	adapter->num_vfs = 0;  	adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;  } @@ -441,33 +494,16 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,  	return 0;  } -int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter) -{ -#ifdef CONFIG_PCI_IOV -	int i; -	for (i = 0; i < adapter->num_vfs; i++) { -		if (adapter->vfinfo[i].vfdev->dev_flags & -				PCI_DEV_FLAGS_ASSIGNED) -			return true; -	} -#endif -	return false; -} -  int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)  {  	unsigned char vf_mac_addr[6];  	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);  	unsigned int vfn = (event_mask & 0x3f); -	struct pci_dev *pvfdev; -	unsigned int device_id; -	u16 thisvf_devfn = (pdev->devfn + 0x80 + (vfn << 1)) | -				(pdev->devfn & 1);  	bool enable = ((event_mask & 0x10000000U) != 0);  	if (enable) { -		random_ether_addr(vf_mac_addr); +		eth_random_addr(vf_mac_addr);  		e_info(probe, "IOV: VF %d is enabled MAC %pM\n",  		       vfn, vf_mac_addr);  		/* @@ -475,31 +511,6 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)  		 * for it later.  		 */  		memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); - -		switch (adapter->hw.mac.type) { -		case ixgbe_mac_82599EB: -			device_id = IXGBE_DEV_ID_82599_VF; -			break; -		case ixgbe_mac_X540: -			device_id = IXGBE_DEV_ID_X540_VF; -			break; -		default: -			device_id = 0; -			break; -		} - -		pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL); -		while (pvfdev) { -			if (pvfdev->devfn == thisvf_devfn) -				break; -			pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, -						device_id, pvfdev); -		} -		if (pvfdev) -			adapter->vfinfo[vfn].vfdev = pvfdev; -		else -			e_err(drv, "Couldn't find pci dev ptr for VF %4.4x\n", -			      thisvf_devfn);  	}  	return 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index 2ab38d5fda9..1be1d30e4e7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -42,7 +42,6 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,  			    int vf, struct ifla_vf_info *ivi);  void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);  void ixgbe_disable_sriov(struct ixgbe_adapter *adapter); -int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter);  #ifdef CONFIG_PCI_IOV  void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,  			const struct ixgbe_info *ii); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c index 1d80b1cefa6..16ddf14e8ba 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c @@ -37,7 +37,6 @@  #include <linux/netdevice.h>  #include <linux/hwmon.h> -#ifdef CONFIG_IXGBE_HWMON  /* hwmon callback functions */  static ssize_t ixgbe_hwmon_show_location(struct device *dev,  					 struct device_attribute *attr, @@ -96,11 +95,11 @@ static ssize_t ixgbe_hwmon_show_maxopthresh(struct device *dev,  	return sprintf(buf, "%u\n", value);  } -/* +/**   * ixgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. - * @ adapter: pointer to the adapter structure - * @ offset: offset in the eeprom sensor data table - * @ type: type of sensor data to display + * @adapter: pointer to the adapter structure + * @offset: offset in the eeprom sensor data table + * @type: type of sensor data to display   *   * For each file we want in hwmon's sysfs interface we need a device_attribute   * This is included in our hwmon_attr struct that contains the references to @@ -241,5 +240,4 @@ err:  exit:  	return rc;  } -#endif /* CONFIG_IXGBE_HWMON */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 204848d2448..400f86a3117 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -32,9 +32,6 @@  #include <linux/mdio.h>  #include <linux/netdevice.h> -/* Vendor ID */ -#define IXGBE_INTEL_VENDOR_ID   0x8086 -  /* Device IDs */  #define IXGBE_DEV_ID_82598               0x10B6  #define IXGBE_DEV_ID_82598_BX            0x1508 @@ -57,6 +54,7 @@  #define IXGBE_DEV_ID_82599_BACKPLANE_FCOE       0x152a  #define IXGBE_DEV_ID_82599_SFP_FCOE      0x1529  #define IXGBE_SUBDEV_ID_82599_SFP        0x11A9 +#define IXGBE_SUBDEV_ID_82599_RNDC       0x1F72  #define IXGBE_SUBDEV_ID_82599_560FLR     0x17D0  #define IXGBE_DEV_ID_82599_SFP_EM        0x1507  #define IXGBE_DEV_ID_82599_SFP_SF2       0x154D @@ -1452,6 +1450,7 @@ enum {  #define IXGBE_ETQF_1588         0x40000000 /* bit 30 */  #define IXGBE_ETQF_FILTER_EN    0x80000000 /* bit 31 */  #define IXGBE_ETQF_POOL_ENABLE   (1 << 26) /* bit 26 */ +#define IXGBE_ETQF_POOL_SHIFT		20  #define IXGBE_ETQS_RX_QUEUE     0x007F0000 /* bits 22:16 */  #define IXGBE_ETQS_RX_QUEUE_SHIFT       16 @@ -2419,7 +2418,7 @@ typedef u32 ixgbe_physical_layer;   */  /* BitTimes (BT) conversion */ -#define IXGBE_BT2KB(BT) ((BT + 1023) / (8 * 1024)) +#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024))  #define IXGBE_B2BT(BT) (BT * 8)  /* Calculate Delay to respond to PFC */ @@ -2450,24 +2449,31 @@ typedef u32 ixgbe_physical_layer;  #define IXGBE_PCI_DELAY	10000  /* Calculate X540 delay value in bit times */ -#define IXGBE_FILL_RATE (36 / 25) - -#define IXGBE_DV_X540(LINK, TC) (IXGBE_FILL_RATE * \ -				 (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \ -				 (2 * IXGBE_CABLE_DC) + \ -				 (2 * IXGBE_ID_X540) + \ -				 IXGBE_HD + IXGBE_B2BT(TC))) +#define IXGBE_DV_X540(_max_frame_link, _max_frame_tc) \ +			((36 * \ +			  (IXGBE_B2BT(_max_frame_link) + \ +			   IXGBE_PFC_D + \ +			   (2 * IXGBE_CABLE_DC) + \ +			   (2 * IXGBE_ID_X540) + \ +			   IXGBE_HD) / 25 + 1) + \ +			 2 * IXGBE_B2BT(_max_frame_tc))  /* Calculate 82599, 82598 delay value in bit times */ -#define IXGBE_DV(LINK, TC) (IXGBE_FILL_RATE * \ -			    (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \ -			    (2 * IXGBE_CABLE_DC) + (2 * IXGBE_ID) + \ -			    IXGBE_HD + IXGBE_B2BT(TC))) +#define IXGBE_DV(_max_frame_link, _max_frame_tc) \ +			((36 * \ +			  (IXGBE_B2BT(_max_frame_link) + \ +			   IXGBE_PFC_D + \ +			   (2 * IXGBE_CABLE_DC) + \ +			   (2 * IXGBE_ID) + \ +			   IXGBE_HD) / 25 + 1) + \ +			 2 * IXGBE_B2BT(_max_frame_tc))  /* Calculate low threshold delay values */ -#define IXGBE_LOW_DV_X540(TC) (2 * IXGBE_B2BT(TC) + \ -			       (IXGBE_FILL_RATE * IXGBE_PCI_DELAY)) -#define IXGBE_LOW_DV(TC)      (2 * IXGBE_LOW_DV_X540(TC)) +#define IXGBE_LOW_DV_X540(_max_frame_tc) \ +			(2 * IXGBE_B2BT(_max_frame_tc) + \ +			(36 * IXGBE_PCI_DELAY / 25) + 1) +#define IXGBE_LOW_DV(_max_frame_tc) \ +			(2 * IXGBE_LOW_DV_X540(_max_frame_tc))  /* Software ATR hash keys */  #define IXGBE_ATR_BUCKET_HASH_KEY    0x3DAD14E2 @@ -2597,6 +2603,8 @@ enum ixgbe_sfp_type {  	ixgbe_sfp_type_da_act_lmt_core1 = 8,  	ixgbe_sfp_type_1g_cu_core0 = 9,  	ixgbe_sfp_type_1g_cu_core1 = 10, +	ixgbe_sfp_type_1g_sx_core0 = 11, +	ixgbe_sfp_type_1g_sx_core1 = 12,  	ixgbe_sfp_type_not_present = 0xFFFE,  	ixgbe_sfp_type_unknown = 0xFFFF  }; @@ -2837,6 +2845,7 @@ struct ixgbe_mac_operations {  	s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);  	s32 (*clear_rar)(struct ixgbe_hw *, u32);  	s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); +	s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);  	s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);  	s32 (*init_rx_addrs)(struct ixgbe_hw *);  	s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); @@ -2912,6 +2921,7 @@ struct ixgbe_mac_info {  	bool                            orig_link_settings_stored;  	bool                            autotry_restart;  	u8                              flags; +	u8				san_mac_rar_index;  	struct ixgbe_thermal_sensor_data  thermal_sensor_data;  }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index f90ec078ece..de4da5219b7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -156,6 +156,9 @@ mac_reset_top:  		hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,  		                    hw->mac.san_addr, 0, IXGBE_RAH_AV); +		/* Save the SAN MAC RAR index */ +		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; +  		/* Reserve the last RAR for the SAN MAC address */  		hw->mac.num_rar_entries--;  	} @@ -832,6 +835,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {  	.set_rar                = &ixgbe_set_rar_generic,  	.clear_rar              = &ixgbe_clear_rar_generic,  	.set_vmdq               = &ixgbe_set_vmdq_generic, +	.set_vmdq_san_mac	= &ixgbe_set_vmdq_san_mac_generic,  	.clear_vmdq             = &ixgbe_clear_vmdq_generic,  	.init_rx_addrs          = &ixgbe_init_rx_addrs_generic,  	.update_mc_addr_list    = &ixgbe_update_mc_addr_list_generic,  |