diff options
Diffstat (limited to 'drivers/net/ethernet/intel/igb')
| -rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_82575.c | 261 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_82575.h | 2 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_defines.h | 50 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_hw.h | 60 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_i210.c | 156 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_i210.h | 4 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_mac.c | 124 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_mac.h | 17 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_mbx.c | 11 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_mbx.h | 52 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_nvm.c | 27 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_phy.c | 259 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_regs.h | 53 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igb/igb.h | 133 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igb/igb_ethtool.c | 354 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igb/igb_hwmon.c | 29 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igb/igb_main.c | 1419 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igb/igb_ptp.c | 61 | 
18 files changed, 1734 insertions, 1338 deletions
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 12b1d848080..ff6a17cb136 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -100,6 +100,7 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)  		break;  	case e1000_82580:  	case e1000_i350: +	case e1000_i354:  	case e1000_i210:  	case e1000_i211:  		reg = rd32(E1000_MDICNFG); @@ -149,6 +150,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)  		switch (hw->mac.type) {  		case e1000_82580:  		case e1000_i350: +		case e1000_i354:  			phy->ops.read_reg = igb_read_phy_reg_82580;  			phy->ops.write_reg = igb_write_phy_reg_82580;  			break; @@ -174,13 +176,14 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)  	/* Verify phy id and set remaining function pointers */  	switch (phy->id) { +	case M88E1545_E_PHY_ID:  	case I347AT4_E_PHY_ID:  	case M88E1112_E_PHY_ID:  	case M88E1111_I_PHY_ID:  		phy->type		= e1000_phy_m88; +		phy->ops.check_polarity	= igb_check_polarity_m88;  		phy->ops.get_phy_info	= igb_get_phy_info_m88; -		if (phy->id == I347AT4_E_PHY_ID || -		    phy->id == M88E1112_E_PHY_ID) +		if (phy->id != M88E1111_I_PHY_ID)  			phy->ops.get_cable_length =  					 igb_get_cable_length_m88_gen2;  		else @@ -227,7 +230,7 @@ out:   *  igb_init_nvm_params_82575 - Init NVM func ptrs.   *  @hw: pointer to the HW structure   **/ -s32 igb_init_nvm_params_82575(struct e1000_hw *hw) +static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)  {  	struct e1000_nvm_info *nvm = &hw->nvm;  	u32 eecd = rd32(E1000_EECD); @@ -287,6 +290,7 @@ s32 igb_init_nvm_params_82575(struct e1000_hw *hw)  			nvm->ops.read = igb_read_nvm_spi;  		nvm->ops.write = igb_write_nvm_spi;  		break; +	case e1000_i354:  	case e1000_i350:  		nvm->ops.validate = igb_validate_nvm_checksum_i350;  		nvm->ops.update = igb_update_nvm_checksum_i350; @@ -352,6 +356,7 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)  		mac->rar_entry_count = E1000_RAR_ENTRIES_82580;  		break;  	case e1000_i350: +	case e1000_i354:  		mac->rar_entry_count = E1000_RAR_ENTRIES_I350;  		break;  	default: @@ -384,6 +389,9 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)  		dev_spec->eee_disable = false;  	else  		dev_spec->eee_disable = true; +	/* Allow a single clear of the SW semaphore on I210 and newer */ +	if (mac->type >= e1000_i210) +		dev_spec->clear_semaphore_once = true;  	/* physical interface link setup */  	mac->ops.setup_physical_interface =  		(hw->phy.media_type == e1000_media_type_copper) @@ -435,8 +443,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)  		mac->type = e1000_i350;  		break;  	case E1000_DEV_ID_I210_COPPER: -	case E1000_DEV_ID_I210_COPPER_OEM1: -	case E1000_DEV_ID_I210_COPPER_IT:  	case E1000_DEV_ID_I210_FIBER:  	case E1000_DEV_ID_I210_SERDES:  	case E1000_DEV_ID_I210_SGMII: @@ -445,14 +451,18 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)  	case E1000_DEV_ID_I211_COPPER:  		mac->type = e1000_i211;  		break; +	case E1000_DEV_ID_I354_BACKPLANE_1GBPS: +	case E1000_DEV_ID_I354_SGMII: +	case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: +		mac->type = e1000_i354; +		break;  	default:  		return -E1000_ERR_MAC_INIT;  		break;  	}  	/* Set media type */ -	/* -	 * The 82575 uses bits 22:23 for link mode. The mode can be changed +	/* The 82575 uses bits 22:23 for link mode. The mode can be changed  	 * based on the EEPROM. We cannot rely upon device ID. There  	 * is no distinguishable difference between fiber and internal  	 * SerDes mode on the 82575. There can be an external PHY attached @@ -621,8 +631,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)  	u32 ctrl_ext;  	u32 mdic; -	/* -	 * For SGMII PHYs, we try the list of possible addresses until +	/* For SGMII PHYs, we try the list of possible addresses until  	 * we find one that works.  For non-SGMII PHYs  	 * (e.g. integrated copper PHYs), an address of 1 should  	 * work.  The result of this function should mean phy->phy_addr @@ -644,6 +653,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)  			break;  		case e1000_82580:  		case e1000_i350: +		case e1000_i354:  		case e1000_i210:  		case e1000_i211:  			mdic = rd32(E1000_MDICNFG); @@ -665,8 +675,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)  	wrfl();  	msleep(300); -	/* -	 * The address field in the I2CCMD register is 3 bits and 0 is invalid. +	/* The address field in the I2CCMD register is 3 bits and 0 is invalid.  	 * Therefore, we need to test 1-7  	 */  	for (phy->addr = 1; phy->addr < 8; phy->addr++) { @@ -674,8 +683,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)  		if (ret_val == 0) {  			hw_dbg("Vendor ID 0x%08X read at address %u\n",  			       phy_id, phy->addr); -			/* -			 * At the time of this writing, The M88 part is +			/* At the time of this writing, The M88 part is  			 * the only supported SGMII PHY product.  			 */  			if (phy_id == M88_VENDOR) @@ -711,15 +719,13 @@ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)  {  	s32 ret_val; -	/* -	 * This isn't a true "hard" reset, but is the only reset +	/* This isn't a true "hard" reset, but is the only reset  	 * available to us at this time.  	 */  	hw_dbg("Soft resetting SGMII attached PHY...\n"); -	/* -	 * SFP documentation requires the following to configure the SPF module +	/* SFP documentation requires the following to configure the SPF module  	 * to work on SGMII.  No further documentation is given.  	 */  	ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); @@ -774,8 +780,7 @@ static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)  		data &= ~IGP02E1000_PM_D0_LPLU;  		ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,  						 data); -		/* -		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used +		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used  		 * during Dx states where the power conservation is most  		 * important.  During driver activity we should enable  		 * SmartSpeed, so performance is maintained. @@ -838,8 +843,7 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)  	} else {  		data &= ~E1000_82580_PM_D0_LPLU; -		/* -		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used +		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used  		 * during Dx states where the power conservation is most  		 * important.  During driver activity we should enable  		 * SmartSpeed, so performance is maintained. @@ -867,7 +871,7 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)   *  During driver activity, SmartSpeed should be enabled so performance is   *  maintained.   **/ -s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) +static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)  {  	struct e1000_phy_info *phy = &hw->phy;  	s32 ret_val = 0; @@ -877,8 +881,7 @@ s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)  	if (!active) {  		data &= ~E1000_82580_PM_D3_LPLU; -		/* -		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used +		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used  		 * during Dx states where the power conservation is most  		 * important.  During driver activity we should enable  		 * SmartSpeed, so performance is maintained. @@ -964,8 +967,7 @@ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)  		if (!(swfw_sync & (fwmask | swmask)))  			break; -		/* -		 * Firmware currently using resource (fwmask) +		/* Firmware currently using resource (fwmask)  		 * or other software thread using resource (swmask)  		 */  		igb_put_hw_semaphore(hw); @@ -1065,8 +1067,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)  	if (hw->phy.media_type != e1000_media_type_copper) {  		ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,  		                                             &duplex); -		/* -		 * Use this flag to determine if link needs to be checked or +		/* Use this flag to determine if link needs to be checked or  		 * not.  If  we have link clear the flag so that we do not  		 * continue to check for link.  		 */ @@ -1135,15 +1136,13 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,  	*speed = 0;  	*duplex = 0; -	/* -	 * Read the PCS Status register for link state. For non-copper mode, +	/* Read the PCS Status register for link state. For non-copper mode,  	 * the status register is not accurate. The PCS status register is  	 * used instead.  	 */  	pcs = rd32(E1000_PCS_LSTAT); -	/* -	 * The link up bit determines when link is up on autoneg. The sync ok +	/* The link up bit determines when link is up on autoneg. The sync ok  	 * gets set once both sides sync up and agree upon link. Stable link  	 * can be determined by checking for both link up and link sync ok  	 */ @@ -1214,8 +1213,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)  	u32 ctrl, icr;  	s32 ret_val; -	/* -	 * Prevent the PCI-E bus from sticking if there is no TLP connection +	/* Prevent the PCI-E bus from sticking if there is no TLP connection  	 * on the last TLP read/write transaction when MAC is reset.  	 */  	ret_val = igb_disable_pcie_master(hw); @@ -1244,8 +1242,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)  	ret_val = igb_get_auto_rd_done(hw);  	if (ret_val) { -		/* -		 * When auto config read does not complete, do not +		/* When auto config read does not complete, do not  		 * return with an error. This can happen in situations  		 * where there is no eeprom and prevents getting link.  		 */ @@ -1287,7 +1284,7 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)  	/* Disabling VLAN filtering */  	hw_dbg("Initializing the IEEE VLAN\n"); -	if (hw->mac.type == e1000_i350) +	if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))  		igb_clear_vfta_i350(hw);  	else  		igb_clear_vfta(hw); @@ -1308,8 +1305,7 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)  	/* Setup link and flow control */  	ret_val = igb_setup_link(hw); -	/* -	 * Clear all of the statistics registers (clear on read).  It is +	/* Clear all of the statistics registers (clear on read).  It is  	 * important that we do this after we have tried to establish link  	 * because the symbol error count will increment wildly if there  	 * is no link. @@ -1364,6 +1360,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)  		switch (hw->phy.id) {  		case I347AT4_E_PHY_ID:  		case M88E1112_E_PHY_ID: +		case M88E1545_E_PHY_ID:  		case I210_I_PHY_ID:  			ret_val = igb_copper_link_setup_m88_gen2(hw);  			break; @@ -1412,17 +1409,17 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)  		return ret_val; -	/* -	 * On the 82575, SerDes loopback mode persists until it is +	/* On the 82575, SerDes loopback mode persists until it is  	 * explicitly turned off or a power cycle is performed.  A read to  	 * the register does not indicate its status.  Therefore, we ensure  	 * loopback mode is disabled during initialization.  	 */  	wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); -	/* power on the sfp cage if present */ +	/* power on the sfp cage if present and turn on I2C */  	ctrl_ext = rd32(E1000_CTRL_EXT);  	ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; +	ctrl_ext |= E1000_CTRL_I2C_ENA;  	wr32(E1000_CTRL_EXT, ctrl_ext);  	ctrl_reg = rd32(E1000_CTRL); @@ -1466,8 +1463,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)  				pcs_autoneg = false;  		} -		/* -		 * non-SGMII modes only supports a speed of 1000/Full for the +		/* non-SGMII modes only supports a speed of 1000/Full for the  		 * link so it is best to just force the MAC and let the pcs  		 * link either autoneg or be forced to 1000/Full  		 */ @@ -1481,8 +1477,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)  	wr32(E1000_CTRL, ctrl_reg); -	/* -	 * New SerDes mode allows for forcing speed or autonegotiating speed +	/* New SerDes mode allows for forcing speed or autonegotiating speed  	 * at 1gb. Autoneg should be default set by most drivers. This is the  	 * mode that will be compatible with older link partners and switches.  	 * However, both are supported by the hardware and some drivers/tools. @@ -1592,8 +1587,7 @@ static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)  {  	s32 ret_val = 0; -	/* -	 * If there's an alternate MAC address place it in RAR0 +	/* If there's an alternate MAC address place it in RAR0  	 * so that it will override the Si installed default perm  	 * address.  	 */ @@ -1777,8 +1771,7 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)  	if (gcr & E1000_GCR_CMPL_TMOUT_MASK)  		goto out; -	/* -	 * if capababilities version is type 1 we can write the +	/* if capabilities version is type 1 we can write the  	 * timeout of 10ms to 200ms through the GCR register  	 */  	if (!(gcr & E1000_GCR_CAP_VER2)) { @@ -1786,8 +1779,7 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)  		goto out;  	} -	/* -	 * for version 2 capabilities we need to write the config space +	/* for version 2 capabilities we need to write the config space  	 * directly in order to set the completion timeout value for  	 * 16ms to 55ms  	 */ @@ -1825,6 +1817,7 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)  		reg_offset = E1000_DTXSWC;  		break;  	case e1000_i350: +	case e1000_i354:  		reg_offset = E1000_TXSWC;  		break;  	default: @@ -1866,6 +1859,7 @@ void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)  			dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;  		wr32(E1000_DTXSWC, dtxswc);  		break; +	case e1000_i354:  	case e1000_i350:  		dtxswc = rd32(E1000_TXSWC);  		if (enable) @@ -1879,7 +1873,6 @@ void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)  		break;  	} -  }  /** @@ -1914,7 +1907,6 @@ static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)  {  	s32 ret_val; -  	ret_val = hw->phy.ops.acquire(hw);  	if (ret_val)  		goto out; @@ -2016,8 +2008,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)  	/* Get current control state. */  	ctrl = rd32(E1000_CTRL); -	/* -	 * Prevent the PCI-E bus from sticking if there is no TLP connection +	/* Prevent the PCI-E bus from sticking if there is no TLP connection  	 * on the last TLP read/write transaction when MAC is reset.  	 */  	ret_val = igb_disable_pcie_master(hw); @@ -2052,18 +2043,13 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)  	ret_val = igb_get_auto_rd_done(hw);  	if (ret_val) { -		/* -		 * When auto config read does not complete, do not +		/* When auto config read does not complete, do not  		 * return with an error. This can happen in situations  		 * where there is no eeprom and prevents getting link.  		 */  		hw_dbg("Auto Read Done did not complete\n");  	} -	/* If EEPROM is not present, run manual init scripts */ -	if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) -		igb_reset_init_script_82575(hw); -  	/* clear global device reset status bit */  	wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); @@ -2197,7 +2183,8 @@ static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)  	if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {  		/* if checksums compatibility bit is set validate checksums -		 * for all 4 ports. */ +		 * for all 4 ports. +		 */  		eeprom_regions_count = 4;  	} @@ -2309,6 +2296,41 @@ out:  }  /** + *  __igb_access_emi_reg - Read/write EMI register + *  @hw: pointer to the HW structure + *  @addr: EMI address to program + *  @data: pointer to value to read/write from/to the EMI address + *  @read: boolean flag to indicate read or write + **/ +static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address, +				  u16 *data, bool read) +{ +	s32 ret_val = E1000_SUCCESS; + +	ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); +	if (ret_val) +		return ret_val; + +	if (read) +		ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); +	else +		ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); + +	return ret_val; +} + +/** + *  igb_read_emi_reg - Read Extended Management Interface register + *  @hw: pointer to the HW structure + *  @addr: EMI address to program + *  @data: value to be read from the EMI address + **/ +s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) +{ +	return __igb_access_emi_reg(hw, addr, data, true); +} + +/**   *  igb_set_eee_i350 - Enable/disable EEE support   *  @hw: pointer to the HW structure   * @@ -2338,7 +2360,6 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)  		if (eee_su & E1000_EEE_SU_LPI_CLK_STP)  			hw_dbg("LPI Clock Stop Bit should not be set!\n"); -  	} else {  		ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |  			E1000_IPCNFG_EEE_100M_AN); @@ -2355,6 +2376,108 @@ out:  	return ret_val;  } +/** + *  igb_set_eee_i354 - Enable/disable EEE support + *  @hw: pointer to the HW structure + * + *  Enable/disable EEE legacy mode based on setting in dev_spec structure. + * + **/ +s32 igb_set_eee_i354(struct e1000_hw *hw) +{ +	struct e1000_phy_info *phy = &hw->phy; +	s32 ret_val = 0; +	u16 phy_data; + +	if ((hw->phy.media_type != e1000_media_type_copper) || +	    (phy->id != M88E1545_E_PHY_ID)) +		goto out; + +	if (!hw->dev_spec._82575.eee_disable) { +		/* Switch to PHY page 18. */ +		ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 18); +		if (ret_val) +			goto out; + +		ret_val = phy->ops.read_reg(hw, E1000_M88E1545_EEE_CTRL_1, +					    &phy_data); +		if (ret_val) +			goto out; + +		phy_data |= E1000_M88E1545_EEE_CTRL_1_MS; +		ret_val = phy->ops.write_reg(hw, E1000_M88E1545_EEE_CTRL_1, +					     phy_data); +		if (ret_val) +			goto out; + +		/* Return the PHY to page 0. */ +		ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 0); +		if (ret_val) +			goto out; + +		/* Turn on EEE advertisement. */ +		ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, +					     E1000_EEE_ADV_DEV_I354, +					     &phy_data); +		if (ret_val) +			goto out; + +		phy_data |= E1000_EEE_ADV_100_SUPPORTED | +			    E1000_EEE_ADV_1000_SUPPORTED; +		ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, +						E1000_EEE_ADV_DEV_I354, +						phy_data); +	} else { +		/* Turn off EEE advertisement. */ +		ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, +					     E1000_EEE_ADV_DEV_I354, +					     &phy_data); +		if (ret_val) +			goto out; + +		phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | +			      E1000_EEE_ADV_1000_SUPPORTED); +		ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, +					      E1000_EEE_ADV_DEV_I354, +					      phy_data); +	} + +out: +	return ret_val; +} + +/** + *  igb_get_eee_status_i354 - Get EEE status + *  @hw: pointer to the HW structure + *  @status: EEE status + * + *  Get EEE status by guessing based on whether Tx or Rx LPI indications have + *  been received. + **/ +s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status) +{ +	struct e1000_phy_info *phy = &hw->phy; +	s32 ret_val = 0; +	u16 phy_data; + +	/* Check if EEE is supported on this device. */ +	if ((hw->phy.media_type != e1000_media_type_copper) || +	    (phy->id != M88E1545_E_PHY_ID)) +		goto out; + +	ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, +				     E1000_PCS_STATUS_DEV_I354, +				     &phy_data); +	if (ret_val) +		goto out; + +	*status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | +			      E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; + +out: +	return ret_val; +} +  static const u8 e1000_emc_temp_data[4] = {  	E1000_EMC_INTERNAL_DATA,  	E1000_EMC_DIODE1_DATA, @@ -2368,11 +2491,12 @@ static const u8 e1000_emc_therm_limit[4] = {  	E1000_EMC_DIODE3_THERM_LIMIT  }; -/* igb_get_thermal_sensor_data_generic - Gathers thermal sensor data +/** + *  igb_get_thermal_sensor_data_generic - Gathers thermal sensor data   *  @hw: pointer to hardware structure   *   *  Updates the temperatures in mac.thermal_sensor_data - */ + **/  s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)  {  	s32 status = E1000_SUCCESS; @@ -2420,12 +2544,13 @@ s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)  	return status;  } -/* igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds +/** + *  igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds   *  @hw: pointer to hardware structure   *   *  Sets the thermal sensor thresholds according to the NVM map   *  and save off the threshold and location values into mac.thermal_sensor_data - */ + **/  s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)  {  	s32 status = E1000_SUCCESS; diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h index 73ab41f0e03..74a1506b423 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.h +++ b/drivers/net/ethernet/intel/igb/e1000_82575.h @@ -263,7 +263,9 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int);  void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);  void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);  u16 igb_rxpbs_adjust_82580(u32 data); +s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data);  s32 igb_set_eee_i350(struct e1000_hw *); +s32 igb_set_eee_i354(struct e1000_hw *);  s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *);  s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw); diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 7e13337d3b9..31a0f82cc65 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -138,8 +138,7 @@  #define E1000_RCTL_PMCF           0x00800000    /* pass MAC control frames */  #define E1000_RCTL_SECRC          0x04000000    /* Strip Ethernet CRC */ -/* - * Use byte values for the following shift parameters +/* Use byte values for the following shift parameters   * Usage:   *     psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &   *                  E1000_PSRCTL_BSIZE0_MASK) | @@ -237,11 +236,14 @@  #define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000  /* BMC external code execution disabled */ +#define E1000_STATUS_2P5_SKU		0x00001000 /* Val of 2.5GBE SKU strap */ +#define E1000_STATUS_2P5_SKU_OVER	0x00002000 /* Val of 2.5GBE SKU Over */  /* Constants used to intrepret the masked PCI-X bus speed. */  #define SPEED_10    10  #define SPEED_100   100  #define SPEED_1000  1000 +#define SPEED_2500  2500  #define HALF_DUPLEX 1  #define FULL_DUPLEX 2 @@ -382,8 +384,7 @@  #define E1000_EICR_OTHER        0x80000000 /* Interrupt Cause Active */  /* TCP Timer */ -/* - * This defines the bits that are set in the Interrupt Mask +/* This defines the bits that are set in the Interrupt Mask   * Set/Read Register.  Each bit is documented below:   *   o RXT0   = Receiver Timer Interrupt (ring 0)   *   o TXDW   = Transmit Descriptor Written Back @@ -440,8 +441,7 @@  #define E1000_VLAN_FILTER_TBL_SIZE 128  /* VLAN Filter Table (4096 bits) */  /* Receive Address */ -/* - * Number of high/low register pairs in the RAR. The RAR (Receive Address +/* Number of high/low register pairs in the RAR. The RAR (Receive Address   * Registers) holds the directed and multicast addresses that we monitor.   * Technically, we have 16 spots.  However, we reserve one of these spots   * (RAR[15]) for our directed address used by controllers with @@ -760,8 +760,7 @@  #define MAX_PHY_MULTI_PAGE_REG 0xF  /* Bit definitions for valid PHY IDs. */ -/* - * I = Integrated +/* I = Integrated   * E = External   */  #define M88E1111_I_PHY_ID    0x01410CC0 @@ -772,6 +771,7 @@  #define I350_I_PHY_ID        0x015403B0  #define M88_VENDOR           0x0141  #define I210_I_PHY_ID        0x01410C00 +#define M88E1545_E_PHY_ID    0x01410EA0  /* M88E1000 Specific Registers */  #define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */ @@ -791,8 +791,7 @@  #define M88E1000_PSCR_AUTO_X_1000T     0x0040  /* Auto crossover enabled all speeds */  #define M88E1000_PSCR_AUTO_X_MODE      0x0060 -/* - * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold +/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold   * 0=Normal 10BASE-T Rx Threshold   */  /* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ @@ -802,8 +801,7 @@  #define M88E1000_PSSR_REV_POLARITY       0x0002 /* 1=Polarity reversed */  #define M88E1000_PSSR_DOWNSHIFT          0x0020 /* 1=Downshifted */  #define M88E1000_PSSR_MDIX               0x0040 /* 1=MDIX; 0=MDI */ -/* - * 0 = <50M +/* 0 = <50M   * 1 = 50-80M   * 2 = 80-110M   * 3 = 110-140M @@ -816,20 +814,17 @@  #define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7  /* M88E1000 Extended PHY Specific Control Register */ -/* - * 1 = Lost lock detect enabled. +/* 1 = Lost lock detect enabled.   * Will assert lost lock and bring   * link down if idle not seen   * within 1ms in 1000BASE-T   */ -/* - * Number of times we will attempt to autonegotiate before downshifting if we +/* Number of times we will attempt to autonegotiate before downshifting if we   * are the master   */  #define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00  #define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X   0x0000 -/* - * Number of times we will attempt to autonegotiate before downshifting if we +/* Number of times we will attempt to autonegotiate before downshifting if we   * are the slave   */  #define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK  0x0300 @@ -844,8 +839,7 @@  /* i347-AT4 Extended PHY Specific Control Register */ -/* - *  Number of times we will attempt to autonegotiate before downshifting if we +/*  Number of times we will attempt to autonegotiate before downshifting if we   *  are the master   */  #define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 @@ -895,6 +889,22 @@  #define E1000_EEER_LPI_FC            0x00040000  /* EEE Enable on FC */  #define E1000_EEE_SU_LPI_CLK_STP     0X00800000  /* EEE LPI Clock Stop */  #define E1000_EEER_EEE_NEG           0x20000000  /* EEE capability nego */ +#define E1000_EEE_LP_ADV_ADDR_I350   0x040F      /* EEE LP Advertisement */ +#define E1000_EEE_LP_ADV_DEV_I210    7           /* EEE LP Adv Device */ +#define E1000_EEE_LP_ADV_ADDR_I210   61          /* EEE LP Adv Register */ +#define E1000_MMDAC_FUNC_DATA        0x4000      /* Data, no post increment */ +#define E1000_M88E1545_PAGE_ADDR	0x16       /* Page Offset Register */ +#define E1000_M88E1545_EEE_CTRL_1	0x0 +#define E1000_M88E1545_EEE_CTRL_1_MS	0x0001     /* EEE Master/Slave */ +#define E1000_EEE_ADV_DEV_I354		7 +#define E1000_EEE_ADV_ADDR_I354		60 +#define E1000_EEE_ADV_100_SUPPORTED	(1 << 1)   /* 100BaseTx EEE Supported */ +#define E1000_EEE_ADV_1000_SUPPORTED	(1 << 2)   /* 1000BaseT EEE Supported */ +#define E1000_PCS_STATUS_DEV_I354	3 +#define E1000_PCS_STATUS_ADDR_I354	1 +#define E1000_PCS_STATUS_TX_LPI_IND	0x0200     /* Tx in LPI state */ +#define E1000_PCS_STATUS_RX_LPI_RCVD	0x0400 +#define E1000_PCS_STATUS_TX_LPI_RCVD	0x0800  /* SerDes Control */  #define E1000_GEN_CTL_READY             0x80000000 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 0d5cf9c63d0..488abb24a54 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -38,38 +38,39 @@  struct e1000_hw; -#define E1000_DEV_ID_82576                    0x10C9 -#define E1000_DEV_ID_82576_FIBER              0x10E6 -#define E1000_DEV_ID_82576_SERDES             0x10E7 -#define E1000_DEV_ID_82576_QUAD_COPPER        0x10E8 -#define E1000_DEV_ID_82576_QUAD_COPPER_ET2    0x1526 -#define E1000_DEV_ID_82576_NS                 0x150A -#define E1000_DEV_ID_82576_NS_SERDES          0x1518 -#define E1000_DEV_ID_82576_SERDES_QUAD        0x150D -#define E1000_DEV_ID_82575EB_COPPER           0x10A7 -#define E1000_DEV_ID_82575EB_FIBER_SERDES     0x10A9 -#define E1000_DEV_ID_82575GB_QUAD_COPPER      0x10D6 -#define E1000_DEV_ID_82580_COPPER             0x150E -#define E1000_DEV_ID_82580_FIBER              0x150F -#define E1000_DEV_ID_82580_SERDES             0x1510 -#define E1000_DEV_ID_82580_SGMII              0x1511 -#define E1000_DEV_ID_82580_COPPER_DUAL        0x1516 -#define E1000_DEV_ID_82580_QUAD_FIBER         0x1527 -#define E1000_DEV_ID_DH89XXCC_SGMII           0x0438 -#define E1000_DEV_ID_DH89XXCC_SERDES          0x043A -#define E1000_DEV_ID_DH89XXCC_BACKPLANE       0x043C -#define E1000_DEV_ID_DH89XXCC_SFP             0x0440 -#define E1000_DEV_ID_I350_COPPER              0x1521 -#define E1000_DEV_ID_I350_FIBER               0x1522 -#define E1000_DEV_ID_I350_SERDES              0x1523 -#define E1000_DEV_ID_I350_SGMII               0x1524 +#define E1000_DEV_ID_82576			0x10C9 +#define E1000_DEV_ID_82576_FIBER		0x10E6 +#define E1000_DEV_ID_82576_SERDES		0x10E7 +#define E1000_DEV_ID_82576_QUAD_COPPER		0x10E8 +#define E1000_DEV_ID_82576_QUAD_COPPER_ET2	0x1526 +#define E1000_DEV_ID_82576_NS			0x150A +#define E1000_DEV_ID_82576_NS_SERDES		0x1518 +#define E1000_DEV_ID_82576_SERDES_QUAD		0x150D +#define E1000_DEV_ID_82575EB_COPPER		0x10A7 +#define E1000_DEV_ID_82575EB_FIBER_SERDES	0x10A9 +#define E1000_DEV_ID_82575GB_QUAD_COPPER	0x10D6 +#define E1000_DEV_ID_82580_COPPER		0x150E +#define E1000_DEV_ID_82580_FIBER		0x150F +#define E1000_DEV_ID_82580_SERDES		0x1510 +#define E1000_DEV_ID_82580_SGMII		0x1511 +#define E1000_DEV_ID_82580_COPPER_DUAL		0x1516 +#define E1000_DEV_ID_82580_QUAD_FIBER		0x1527 +#define E1000_DEV_ID_DH89XXCC_SGMII		0x0438 +#define E1000_DEV_ID_DH89XXCC_SERDES		0x043A +#define E1000_DEV_ID_DH89XXCC_BACKPLANE		0x043C +#define E1000_DEV_ID_DH89XXCC_SFP		0x0440 +#define E1000_DEV_ID_I350_COPPER		0x1521 +#define E1000_DEV_ID_I350_FIBER			0x1522 +#define E1000_DEV_ID_I350_SERDES		0x1523 +#define E1000_DEV_ID_I350_SGMII			0x1524  #define E1000_DEV_ID_I210_COPPER		0x1533 -#define E1000_DEV_ID_I210_COPPER_OEM1		0x1534 -#define E1000_DEV_ID_I210_COPPER_IT		0x1535  #define E1000_DEV_ID_I210_FIBER			0x1536  #define E1000_DEV_ID_I210_SERDES		0x1537  #define E1000_DEV_ID_I210_SGMII			0x1538  #define E1000_DEV_ID_I211_COPPER		0x1539 +#define E1000_DEV_ID_I354_BACKPLANE_1GBPS	0x1F40 +#define E1000_DEV_ID_I354_SGMII			0x1F41 +#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS	0x1F45  #define E1000_REVISION_2 2  #define E1000_REVISION_4 4 @@ -90,6 +91,7 @@ enum e1000_mac_type {  	e1000_82576,  	e1000_82580,  	e1000_i350, +	e1000_i354,  	e1000_i210,  	e1000_i211,  	e1000_num_macs  /* List is 1-based, so subtract 1 for true count. */ @@ -98,7 +100,8 @@ enum e1000_mac_type {  enum e1000_media_type {  	e1000_media_type_unknown = 0,  	e1000_media_type_copper = 1, -	e1000_media_type_internal_serdes = 2, +	e1000_media_type_fiber = 2, +	e1000_media_type_internal_serdes = 3,  	e1000_num_media_types  }; @@ -524,6 +527,7 @@ struct e1000_dev_spec_82575 {  	bool sgmii_active;  	bool global_device_reset;  	bool eee_disable; +	bool clear_semaphore_once;  };  struct e1000_hw { diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index 6a42344f24f..ddb3cf51b9b 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -44,10 +44,42 @@  static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)  {  	u32 swsm; -	s32 ret_val = E1000_SUCCESS;  	s32 timeout = hw->nvm.word_size + 1;  	s32 i = 0; +	/* Get the SW semaphore */ +	while (i < timeout) { +		swsm = rd32(E1000_SWSM); +		if (!(swsm & E1000_SWSM_SMBI)) +			break; + +		udelay(50); +		i++; +	} + +	if (i == timeout) { +		/* In rare circumstances, the SW semaphore may already be held +		 * unintentionally. Clear the semaphore once before giving up. +		 */ +		if (hw->dev_spec._82575.clear_semaphore_once) { +			hw->dev_spec._82575.clear_semaphore_once = false; +			igb_put_hw_semaphore(hw); +			for (i = 0; i < timeout; i++) { +				swsm = rd32(E1000_SWSM); +				if (!(swsm & E1000_SWSM_SMBI)) +					break; + +				udelay(50); +			} +		} + +		/* If we do not have the semaphore here, we have to give up. */ +		if (i == timeout) { +			hw_dbg("Driver can't access device - SMBI bit is set.\n"); +			return -E1000_ERR_NVM; +		} +	} +  	/* Get the FW semaphore. */  	for (i = 0; i < timeout; i++) {  		swsm = rd32(E1000_SWSM); @@ -64,12 +96,10 @@ static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)  		/* Release semaphores */  		igb_put_hw_semaphore(hw);  		hw_dbg("Driver can't access the NVM\n"); -		ret_val = -E1000_ERR_NVM; -		goto out; +		return -E1000_ERR_NVM;  	} -out: -	return ret_val; +	return E1000_SUCCESS;  }  /** @@ -99,23 +129,6 @@ void igb_release_nvm_i210(struct e1000_hw *hw)  }  /** - *  igb_put_hw_semaphore_i210 - Release hardware semaphore - *  @hw: pointer to the HW structure - * - *  Release hardware semaphore used to access the PHY or NVM - */ -static void igb_put_hw_semaphore_i210(struct e1000_hw *hw) -{ -	u32 swsm; - -	swsm = rd32(E1000_SWSM); - -	swsm &= ~E1000_SWSM_SWESMBI; - -	wr32(E1000_SWSM, swsm); -} - -/**   *  igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore   *  @hw: pointer to the HW structure   *  @mask: specifies which semaphore to acquire @@ -138,13 +151,11 @@ s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)  		}  		swfw_sync = rd32(E1000_SW_FW_SYNC); -		if (!(swfw_sync & fwmask)) +		if (!(swfw_sync & (fwmask | swmask)))  			break; -		/* -		 * Firmware currently using resource (fwmask) -		 */ -		igb_put_hw_semaphore_i210(hw); +		/* Firmware currently using resource (fwmask) */ +		igb_put_hw_semaphore(hw);  		mdelay(5);  		i++;  	} @@ -158,7 +169,7 @@ s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)  	swfw_sync |= swmask;  	wr32(E1000_SW_FW_SYNC, swfw_sync); -	igb_put_hw_semaphore_i210(hw); +	igb_put_hw_semaphore(hw);  out:  	return ret_val;  } @@ -182,7 +193,7 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)  	swfw_sync &= ~mask;  	wr32(E1000_SW_FW_SYNC, swfw_sync); -	igb_put_hw_semaphore_i210(hw); +	igb_put_hw_semaphore(hw);  }  /** @@ -203,7 +214,8 @@ s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,  	/* We cannot hold synchronization semaphores for too long,  	 * because of forceful takeover procedure. However it is more efficient -	 * to read in bursts than synchronizing access for each word. */ +	 * to read in bursts than synchronizing access for each word. +	 */  	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {  		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?  			E1000_EERD_EEWR_MAX_COUNT : (words - i); @@ -242,8 +254,7 @@ static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,  	u32 attempts = 100000;  	s32 ret_val = E1000_SUCCESS; -	/* -	 * A check for invalid values:  offset too large, too many words, +	/* A check for invalid values:  offset too large, too many words,  	 * too many words for the offset, and not enough words.  	 */  	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || @@ -294,7 +305,7 @@ out:   *   *  If error code is returned, data and Shadow RAM may be inconsistent - buffer   *  partially written. - */ + **/  s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,  			      u16 *data)  { @@ -326,7 +337,7 @@ s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,  /**   *  igb_read_nvm_i211 - Read NVM wrapper function for I211   *  @hw: pointer to the HW structure - *  @address: the word address (aka eeprom offset) to read + *  @words: number of words to read   *  @data: pointer to the data read   *   *  Wrapper function to return data formerly found in the NVM. @@ -549,8 +560,7 @@ s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)  	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { -		/* -		 * Replace the read function with semaphore grabbing with +		/* Replace the read function with semaphore grabbing with  		 * the one that skips this for a while.  		 * We have semaphore taken already here.  		 */ @@ -570,7 +580,6 @@ s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)  	return status;  } -  /**   *  igb_update_nvm_checksum_i210 - Update EEPROM checksum   *  @hw: pointer to the HW structure @@ -585,8 +594,7 @@ s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)  	u16 checksum = 0;  	u16 i, nvm_data; -	/* -	 * Read the first word from the EEPROM. If this times out or fails, do +	/* Read the first word from the EEPROM. If this times out or fails, do  	 * not continue or we could be in for a very long wait while every  	 * EEPROM read fails  	 */ @@ -597,8 +605,7 @@ s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)  	}  	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { -		/* -		 * Do not use hw->nvm.ops.write, hw->nvm.ops.read +		/* Do not use hw->nvm.ops.write, hw->nvm.ops.read  		 * because we do not want to take the synchronization  		 * semaphores twice here.  		 */ @@ -635,7 +642,7 @@ out:   *  igb_pool_flash_update_done_i210 - Pool FLUDONE status.   *  @hw: pointer to the HW structure   * - */ + **/  static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)  {  	s32 ret_val = -E1000_ERR_NVM; @@ -714,3 +721,68 @@ s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data)  out:  	return ret_val;  } + +/** + *  __igb_access_xmdio_reg - Read/write XMDIO register + *  @hw: pointer to the HW structure + *  @address: XMDIO address to program + *  @dev_addr: device address to program + *  @data: pointer to value to read/write from/to the XMDIO address + *  @read: boolean flag to indicate read or write + **/ +static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address, +				  u8 dev_addr, u16 *data, bool read) +{ +	s32 ret_val = E1000_SUCCESS; + +	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); +	if (ret_val) +		return ret_val; + +	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address); +	if (ret_val) +		return ret_val; + +	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA | +							 dev_addr); +	if (ret_val) +		return ret_val; + +	if (read) +		ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data); +	else +		ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data); +	if (ret_val) +		return ret_val; + +	/* Recalibrate the device back to 0 */ +	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0); +	if (ret_val) +		return ret_val; + +	return ret_val; +} + +/** + *  igb_read_xmdio_reg - Read XMDIO register + *  @hw: pointer to the HW structure + *  @addr: XMDIO address to program + *  @dev_addr: device address to program + *  @data: value to be read from the EMI address + **/ +s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) +{ +	return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true); +} + +/** + *  igb_write_xmdio_reg - Write XMDIO register + *  @hw: pointer to the HW structure + *  @addr: XMDIO address to program + *  @dev_addr: device address to program + *  @data: value to be written to the XMDIO address + **/ +s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) +{ +	return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false); +} diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h index e4e1a73b7c7..bfc08e05c90 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.h +++ b/drivers/net/ethernet/intel/igb/e1000_i210.h @@ -45,6 +45,10 @@ extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,  			       u16 *data);  extern s32 igb_read_invm_version(struct e1000_hw *hw,  				 struct e1000_fw_version *invm_ver); +extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, +			      u16 *data); +extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, +			       u16 data);  #define E1000_STM_OPCODE		0xDB00  #define E1000_EEPROM_FLASH_SIZE_WORD	0x11 diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index a5c7200b9a7..2559d70a232 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -214,7 +214,7 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)  		else  			vfta &= ~mask;  	} -	if (hw->mac.type == e1000_i350) +	if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))  		igb_write_vfta_i350(hw, index, vfta);  	else  		igb_write_vfta(hw, index, vfta); @@ -230,8 +230,8 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)   *  Checks the nvm for an alternate MAC address.  An alternate MAC address   *  can be setup by pre-boot software and must be treated like a permanent   *  address and must override the actual permanent MAC address.  If an - *  alternate MAC address is fopund it is saved in the hw struct and - *  prgrammed into RAR0 and the cuntion returns success, otherwise the + *  alternate MAC address is found it is saved in the hw struct and + *  programmed into RAR0 and the function returns success, otherwise the   *  function returns an error.   **/  s32 igb_check_alt_mac_addr(struct e1000_hw *hw) @@ -241,8 +241,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)  	u16 offset, nvm_alt_mac_addr_offset, nvm_data;  	u8 alt_mac_addr[ETH_ALEN]; -	/* -	 * Alternate MAC address is handled by the option ROM for 82580 +	/* Alternate MAC address is handled by the option ROM for 82580  	 * and newer. SW support not required.  	 */  	if (hw->mac.type >= e1000_82580) @@ -285,8 +284,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)  		goto out;  	} -	/* -	 * We have a valid alternate MAC address, and we want to treat it the +	/* We have a valid alternate MAC address, and we want to treat it the  	 * same as the normal permanent MAC address stored by the HW into the  	 * RAR. Do this by mapping this address into RAR0.  	 */ @@ -309,8 +307,7 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)  {  	u32 rar_low, rar_high; -	/* -	 * HW expects these in little endian so we reverse the byte order +	/* HW expects these in little endian so we reverse the byte order  	 * from network order (big endian) to little endian  	 */  	rar_low = ((u32) addr[0] | @@ -323,8 +320,7 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)  	if (rar_low || rar_high)  		rar_high |= E1000_RAH_AV; -	/* -	 * Some bridges will combine consecutive 32-bit writes into +	/* Some bridges will combine consecutive 32-bit writes into  	 * a single burst write, which will malfunction on some parts.  	 * The flushes avoid this.  	 */ @@ -348,8 +344,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value)  {  	u32 hash_bit, hash_reg, mta; -	/* -	 * The MTA is a register array of 32-bit registers. It is +	/* The MTA is a register array of 32-bit registers. It is  	 * treated like an array of (32*mta_reg_count) bits.  We want to  	 * set bit BitArray[hash_value]. So we figure out what register  	 * the bit is in, read it, OR in the new bit, then write @@ -386,15 +381,13 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)  	/* Register count multiplied by bits per register */  	hash_mask = (hw->mac.mta_reg_count * 32) - 1; -	/* -	 * For a mc_filter_type of 0, bit_shift is the number of left-shifts +	/* For a mc_filter_type of 0, bit_shift is the number of left-shifts  	 * where 0xFF would still fall within the hash mask.  	 */  	while (hash_mask >> bit_shift != 0xFF)  		bit_shift++; -	/* -	 * The portion of the address that is used for the hash table +	/* The portion of the address that is used for the hash table  	 * is determined by the mc_filter_type setting.  	 * The algorithm is such that there is a total of 8 bits of shifting.  	 * The bit_shift for a mc_filter_type of 0 represents the number of @@ -536,8 +529,7 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw)  	s32 ret_val;  	bool link; -	/* -	 * We only want to go out to the PHY registers to see if Auto-Neg +	/* We only want to go out to the PHY registers to see if Auto-Neg  	 * has completed and/or if our link status has changed.  The  	 * get_link_status flag is set upon receiving a Link Status  	 * Change or Rx Sequence Error interrupt. @@ -547,8 +539,7 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw)  		goto out;  	} -	/* -	 * First we want to see if the MII Status Register reports +	/* First we want to see if the MII Status Register reports  	 * link.  If so, then we want to get the current speed/duplex  	 * of the PHY.  	 */ @@ -561,14 +552,12 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw)  	mac->get_link_status = false; -	/* -	 * Check if there was DownShift, must be checked +	/* Check if there was DownShift, must be checked  	 * immediately after link-up  	 */  	igb_check_downshift(hw); -	/* -	 * If we are forcing speed/duplex, then we simply return since +	/* If we are forcing speed/duplex, then we simply return since  	 * we have already determined whether we have link or not.  	 */  	if (!mac->autoneg) { @@ -576,15 +565,13 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw)  		goto out;  	} -	/* -	 * Auto-Neg is enabled.  Auto Speed Detection takes care +	/* Auto-Neg is enabled.  Auto Speed Detection takes care  	 * of MAC speed/duplex configuration.  So we only need to  	 * configure Collision Distance in the MAC.  	 */  	igb_config_collision_dist(hw); -	/* -	 * Configure Flow Control now that Auto-Neg has completed. +	/* Configure Flow Control now that Auto-Neg has completed.  	 * First, we need to restore the desired flow control  	 * settings because we may have had to re-autoneg with a  	 * different link partner. @@ -611,15 +598,13 @@ s32 igb_setup_link(struct e1000_hw *hw)  {  	s32 ret_val = 0; -	/* -	 * In the case of the phy reset being blocked, we already have a link. +	/* In the case of the phy reset being blocked, we already have a link.  	 * We do not need to set it up again.  	 */  	if (igb_check_reset_block(hw))  		goto out; -	/* -	 * If requested flow control is set to default, set flow control +	/* If requested flow control is set to default, set flow control  	 * based on the EEPROM flow control settings.  	 */  	if (hw->fc.requested_mode == e1000_fc_default) { @@ -628,8 +613,7 @@ s32 igb_setup_link(struct e1000_hw *hw)  			goto out;  	} -	/* -	 * We want to save off the original Flow Control configuration just +	/* We want to save off the original Flow Control configuration just  	 * in case we get disconnected and then reconnected into a different  	 * hub or switch with different Flow Control capabilities.  	 */ @@ -642,8 +626,7 @@ s32 igb_setup_link(struct e1000_hw *hw)  	if (ret_val)  		goto out; -	/* -	 * Initialize the flow control address, type, and PAUSE timer +	/* Initialize the flow control address, type, and PAUSE timer  	 * registers to their default values.  This is done even if flow  	 * control is disabled, because it does not hurt anything to  	 * initialize these registers. @@ -696,16 +679,14 @@ static s32 igb_set_fc_watermarks(struct e1000_hw *hw)  	s32 ret_val = 0;  	u32 fcrtl = 0, fcrth = 0; -	/* -	 * Set the flow control receive threshold registers.  Normally, +	/* Set the flow control receive threshold registers.  Normally,  	 * these registers will be set to a default threshold that may be  	 * adjusted later by the driver's runtime code.  However, if the  	 * ability to transmit pause frames is not enabled, then these  	 * registers will be set to 0.  	 */  	if (hw->fc.current_mode & e1000_fc_tx_pause) { -		/* -		 * We need to set up the Receive Threshold high and low water +		/* We need to set up the Receive Threshold high and low water  		 * marks as well as (optionally) enabling the transmission of  		 * XON frames.  		 */ @@ -733,8 +714,7 @@ static s32 igb_set_default_fc(struct e1000_hw *hw)  	s32 ret_val = 0;  	u16 nvm_data; -	/* -	 * Read and store word 0x0F of the EEPROM. This word contains bits +	/* Read and store word 0x0F of the EEPROM. This word contains bits  	 * that determine the hardware's default PAUSE (flow control) mode,  	 * a bit that determines whether the HW defaults to enabling or  	 * disabling auto-negotiation, and the direction of the @@ -778,8 +758,7 @@ s32 igb_force_mac_fc(struct e1000_hw *hw)  	ctrl = rd32(E1000_CTRL); -	/* -	 * Because we didn't get link via the internal auto-negotiation +	/* Because we didn't get link via the internal auto-negotiation  	 * mechanism (we either forced link or we got link via PHY  	 * auto-neg), we have to manually enable/disable transmit an  	 * receive flow control. @@ -843,8 +822,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)  	u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;  	u16 speed, duplex; -	/* -	 * Check for the case where we have fiber media and auto-neg failed +	/* Check for the case where we have fiber media and auto-neg failed  	 * so we had to force link.  In this case, we need to force the  	 * configuration of the MAC to match the "fc" parameter.  	 */ @@ -861,15 +839,13 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)  		goto out;  	} -	/* -	 * Check for the case where we have copper media and auto-neg is +	/* Check for the case where we have copper media and auto-neg is  	 * enabled.  In this case, we need to check and see if Auto-Neg  	 * has completed, and if so, how the PHY and link partner has  	 * flow control configured.  	 */  	if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { -		/* -		 * Read the MII Status Register and check to see if AutoNeg +		/* Read the MII Status Register and check to see if AutoNeg  		 * has completed.  We read this twice because this reg has  		 * some "sticky" (latched) bits.  		 */ @@ -888,8 +864,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)  			goto out;  		} -		/* -		 * The AutoNeg process has completed, so we now need to +		/* The AutoNeg process has completed, so we now need to  		 * read both the Auto Negotiation Advertisement  		 * Register (Address 4) and the Auto_Negotiation Base  		 * Page Ability Register (Address 5) to determine how @@ -904,8 +879,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)  		if (ret_val)  			goto out; -		/* -		 * Two bits in the Auto Negotiation Advertisement Register +		/* Two bits in the Auto Negotiation Advertisement Register  		 * (Address 4) and two bits in the Auto Negotiation Base  		 * Page Ability Register (Address 5) determine flow control  		 * for both the PHY and the link partner.  The following @@ -940,8 +914,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)  		 */  		if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&  		    (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { -			/* -			 * Now we need to check if the user selected RX ONLY +			/* Now we need to check if the user selected RX ONLY  			 * of pause frames.  In this case, we had to advertise  			 * FULL flow control because we could not advertise RX  			 * ONLY. Hence, we must now check to see if we need to @@ -956,8 +929,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)  				       "RX PAUSE frames only.\r\n");  			}  		} -		/* -		 * For receiving PAUSE frames ONLY. +		/* For receiving PAUSE frames ONLY.  		 *  		 *   LOCAL DEVICE  |   LINK PARTNER  		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result @@ -971,8 +943,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)  			hw->fc.current_mode = e1000_fc_tx_pause;  			hw_dbg("Flow Control = TX PAUSE frames only.\r\n");  		} -		/* -		 * For transmitting PAUSE frames ONLY. +		/* For transmitting PAUSE frames ONLY.  		 *  		 *   LOCAL DEVICE  |   LINK PARTNER  		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result @@ -986,8 +957,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)  			hw->fc.current_mode = e1000_fc_rx_pause;  			hw_dbg("Flow Control = RX PAUSE frames only.\r\n");  		} -		/* -		 * Per the IEEE spec, at this point flow control should be +		/* Per the IEEE spec, at this point flow control should be  		 * disabled.  However, we want to consider that we could  		 * be connected to a legacy switch that doesn't advertise  		 * desired flow control, but can be forced on the link @@ -1007,9 +977,9 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)  		 * be asked to delay transmission of packets than asking  		 * our link partner to pause transmission of frames.  		 */ -		else if ((hw->fc.requested_mode == e1000_fc_none || -			  hw->fc.requested_mode == e1000_fc_tx_pause) || -			 hw->fc.strict_ieee) { +		else if ((hw->fc.requested_mode == e1000_fc_none) || +			 (hw->fc.requested_mode == e1000_fc_tx_pause) || +			 (hw->fc.strict_ieee)) {  			hw->fc.current_mode = e1000_fc_none;  			hw_dbg("Flow Control = NONE.\r\n");  		} else { @@ -1017,8 +987,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)  			hw_dbg("Flow Control = RX PAUSE frames only.\r\n");  		} -		/* -		 * Now we need to do one last check...  If we auto- +		/* Now we need to do one last check...  If we auto-  		 * negotiated to HALF DUPLEX, flow control should not be  		 * enabled per IEEE 802.3 spec.  		 */ @@ -1031,8 +1000,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)  		if (duplex == HALF_DUPLEX)  			hw->fc.current_mode = e1000_fc_none; -		/* -		 * Now we call a subroutine to actually force the MAC +		/* Now we call a subroutine to actually force the MAC  		 * controller to use the correct flow control settings.  		 */  		ret_val = igb_force_mac_fc(hw); @@ -1203,6 +1171,17 @@ s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,  		hw_dbg("Half Duplex\n");  	} +	/* Check if it is an I354 2.5Gb backplane connection. */ +	if (hw->mac.type == e1000_i354) { +		if ((status & E1000_STATUS_2P5_SKU) && +		    !(status & E1000_STATUS_2P5_SKU_OVER)) { +			*speed = SPEED_2500; +			*duplex = FULL_DUPLEX; +			hw_dbg("2500 Mbs, "); +			hw_dbg("Full Duplex\n"); +		} +	} +  	return 0;  } @@ -1427,8 +1406,7 @@ s32 igb_blink_led(struct e1000_hw *hw)  	u32 ledctl_blink = 0;  	u32 i; -	/* -	 * set the blink bit for each LED that's "on" (0x0E) +	/* set the blink bit for each LED that's "on" (0x0E)  	 * in ledctl_mode2  	 */  	ledctl_blink = hw->mac.ledctl_mode2; @@ -1467,7 +1445,7 @@ s32 igb_led_off(struct e1000_hw *hw)   *  @hw: pointer to the HW structure   *   *  Returns 0 (0) if successful, else returns -10 - *  (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued + *  (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused   *  the master requests to be disabled.   *   *  Disables PCI-Express master access and verifies there are no pending diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h index e6d6ce43326..5e13e83cc60 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.h +++ b/drivers/net/ethernet/intel/igb/e1000_mac.h @@ -35,8 +35,7 @@  #include "e1000_defines.h"  #include "e1000_i210.h" -/* - * Functions that should not be called directly from drivers but can be used +/* Functions that should not be called directly from drivers but can be used   * by other files in this 'shared code'   */  s32  igb_blink_led(struct e1000_hw *hw); @@ -49,15 +48,15 @@ s32  igb_get_auto_rd_done(struct e1000_hw *hw);  s32  igb_get_bus_info_pcie(struct e1000_hw *hw);  s32  igb_get_hw_semaphore(struct e1000_hw *hw);  s32  igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, -				       u16 *duplex); +				     u16 *duplex);  s32  igb_id_led_init(struct e1000_hw *hw);  s32  igb_led_off(struct e1000_hw *hw);  void igb_update_mc_addr_list(struct e1000_hw *hw, -	                     u8 *mc_addr_list, u32 mc_addr_count); +			     u8 *mc_addr_list, u32 mc_addr_count);  s32  igb_setup_link(struct e1000_hw *hw);  s32  igb_validate_mdi_setting(struct e1000_hw *hw);  s32  igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, -			       u32 offset, u8 data); +			     u32 offset, u8 data);  void igb_clear_hw_cntrs_base(struct e1000_hw *hw);  void igb_clear_vfta(struct e1000_hw *hw); @@ -80,12 +79,12 @@ enum e1000_mng_mode {  	e1000_mng_mode_host_if_only  }; -#define E1000_FACTPS_MNGCG    0x20000000 +#define E1000_FACTPS_MNGCG	0x20000000 -#define E1000_FWSM_MODE_MASK  0xE -#define E1000_FWSM_MODE_SHIFT 1 +#define E1000_FWSM_MODE_MASK	0xE +#define E1000_FWSM_MODE_SHIFT	1 -#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN    0x2 +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN	0x2  extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c index 38e0df35090..dac1447fabf 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.c +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c @@ -196,7 +196,8 @@ out:   *  returns SUCCESS if it successfully received a message notification and   *  copied it into the receive buffer.   **/ -static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, +			       u16 mbx_id)  {  	struct e1000_mbx_info *mbx = &hw->mbx;  	s32 ret_val = -E1000_ERR_MBX; @@ -222,7 +223,8 @@ out:   *  returns SUCCESS if it successfully copied message into the buffer and   *  received an ack to that message within delay * timeout period   **/ -static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, +				u16 mbx_id)  {  	struct e1000_mbx_info *mbx = &hw->mbx;  	s32 ret_val = -E1000_ERR_MBX; @@ -325,7 +327,6 @@ static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)  	s32 ret_val = -E1000_ERR_MBX;  	u32 p2v_mailbox; -  	/* Take ownership of the buffer */  	wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); @@ -347,7 +348,7 @@ static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)   *  returns SUCCESS if it successfully copied message into the buffer   **/  static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, -                              u16 vf_number) +			    u16 vf_number)  {  	s32 ret_val;  	u16 i; @@ -388,7 +389,7 @@ out_no_write:   *  a message due to a VF request so no polling for message is needed.   **/  static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, -                             u16 vf_number) +			   u16 vf_number)  {  	s32 ret_val;  	u16 i; diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h index c13b56d9edb..de9bba41acf 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.h +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h @@ -30,42 +30,42 @@  #include "e1000_hw.h" -#define E1000_P2VMAILBOX_STS   0x00000001 /* Initiate message send to VF */ -#define E1000_P2VMAILBOX_ACK   0x00000002 /* Ack message recv'd from VF */ -#define E1000_P2VMAILBOX_VFU   0x00000004 /* VF owns the mailbox buffer */ -#define E1000_P2VMAILBOX_PFU   0x00000008 /* PF owns the mailbox buffer */ -#define E1000_P2VMAILBOX_RVFU  0x00000010 /* Reset VFU - used when VF stuck */ +#define E1000_P2VMAILBOX_STS	0x00000001 /* Initiate message send to VF */ +#define E1000_P2VMAILBOX_ACK	0x00000002 /* Ack message recv'd from VF */ +#define E1000_P2VMAILBOX_VFU	0x00000004 /* VF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_PFU	0x00000008 /* PF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_RVFU	0x00000010 /* Reset VFU - used when VF stuck */ -#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ -#define E1000_MBVFICR_VFREQ_VF1  0x00000001 /* bit for VF 1 message */ -#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ -#define E1000_MBVFICR_VFACK_VF1  0x00010000 /* bit for VF 1 ack */ +#define E1000_MBVFICR_VFREQ_MASK	0x000000FF /* bits for VF messages */ +#define E1000_MBVFICR_VFREQ_VF1		0x00000001 /* bit for VF 1 message */ +#define E1000_MBVFICR_VFACK_MASK	0x00FF0000 /* bits for VF acks */ +#define E1000_MBVFICR_VFACK_VF1		0x00010000 /* bit for VF 1 ack */ -#define E1000_VFMAILBOX_SIZE   16 /* 16 32 bit words - 64 bytes */ +#define E1000_VFMAILBOX_SIZE	16 /* 16 32 bit words - 64 bytes */  /* If it's a E1000_VF_* msg then it originates in the VF and is sent to the   * PF.  The reverse is true if it is E1000_PF_*.   * Message ACK's are the value or'd with 0xF0000000   */ -#define E1000_VT_MSGTYPE_ACK      0x80000000  /* Messages below or'd with -                                               * this are the ACK */ -#define E1000_VT_MSGTYPE_NACK     0x40000000  /* Messages below or'd with -                                               * this are the NACK */ -#define E1000_VT_MSGTYPE_CTS      0x20000000  /* Indicates that VF is still -                                                 clear to send requests */ -#define E1000_VT_MSGINFO_SHIFT    16 +/* Messages below or'd with this are the ACK */ +#define E1000_VT_MSGTYPE_ACK	0x80000000 +/* Messages below or'd with this are the NACK */ +#define E1000_VT_MSGTYPE_NACK	0x40000000 +/* Indicates that VF is still clear to send requests */ +#define E1000_VT_MSGTYPE_CTS	0x20000000 +#define E1000_VT_MSGINFO_SHIFT	16  /* bits 23:16 are used for exra info for certain messages */ -#define E1000_VT_MSGINFO_MASK     (0xFF << E1000_VT_MSGINFO_SHIFT) +#define E1000_VT_MSGINFO_MASK	(0xFF << E1000_VT_MSGINFO_SHIFT) -#define E1000_VF_RESET            0x01 /* VF requests reset */ -#define E1000_VF_SET_MAC_ADDR     0x02 /* VF requests to set MAC addr */ -#define E1000_VF_SET_MULTICAST    0x03 /* VF requests to set MC addr */ -#define E1000_VF_SET_VLAN         0x04 /* VF requests to set VLAN */ -#define E1000_VF_SET_LPE          0x05 /* VF requests to set VMOLR.LPE */ -#define E1000_VF_SET_PROMISC      0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ -#define E1000_VF_SET_PROMISC_MULTICAST    (0x02 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_RESET		0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR	0x02 /* VF requests to set MAC addr */ +#define E1000_VF_SET_MULTICAST	0x03 /* VF requests to set MC addr */ +#define E1000_VF_SET_VLAN	0x04 /* VF requests to set VLAN */ +#define E1000_VF_SET_LPE	0x05 /* VF requests to set VMOLR.LPE */ +#define E1000_VF_SET_PROMISC	0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ +#define E1000_VF_SET_PROMISC_MULTICAST	(0x02 << E1000_VT_MSGINFO_SHIFT) -#define E1000_PF_CONTROL_MSG      0x0100 /* PF control message */ +#define E1000_PF_CONTROL_MSG	0x0100 /* PF control message */  s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16);  s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16); diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c index 5b62adbe134..7f9cd7cbd35 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.c +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c @@ -289,15 +289,14 @@ static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)  		udelay(1);  		timeout = NVM_MAX_RETRY_SPI; -		/* -		 * Read "Status Register" repeatedly until the LSB is cleared. +		/* Read "Status Register" repeatedly until the LSB is cleared.  		 * The EEPROM will signal that the command has been completed  		 * by clearing bit 0 of the internal status register.  If it's  		 * not cleared within 'timeout', then error out.  		 */  		while (timeout) {  			igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, -						 hw->nvm.opcode_bits); +					       hw->nvm.opcode_bits);  			spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8);  			if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))  				break; @@ -335,8 +334,7 @@ s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)  	u16 word_in;  	u8 read_opcode = NVM_READ_OPCODE_SPI; -	/* -	 * A check for invalid values:  offset too large, too many words, +	/* A check for invalid values:  offset too large, too many words,  	 * and not enough words.  	 */  	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || @@ -363,8 +361,7 @@ s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)  	igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);  	igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); -	/* -	 * Read the data.  SPI NVMs increment the address with each byte +	/* Read the data.  SPI NVMs increment the address with each byte  	 * read and will roll over if reading beyond the end.  This allows  	 * us to read the whole NVM from any offset  	 */ @@ -395,8 +392,7 @@ s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)  	u32 i, eerd = 0;  	s32 ret_val = 0; -	/* -	 * A check for invalid values:  offset too large, too many words, +	/* A check for invalid values:  offset too large, too many words,  	 * and not enough words.  	 */  	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || @@ -408,7 +404,7 @@ s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)  	for (i = 0; i < words; i++) {  		eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + -		       E1000_NVM_RW_REG_START; +			E1000_NVM_RW_REG_START;  		wr32(E1000_EERD, eerd);  		ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); @@ -441,8 +437,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)  	s32 ret_val = -E1000_ERR_NVM;  	u16 widx = 0; -	/* -	 * A check for invalid values:  offset too large, too many words, +	/* A check for invalid values:  offset too large, too many words,  	 * and not enough words.  	 */  	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || @@ -472,8 +467,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)  		igb_standby_nvm(hw); -		/* -		 * Some SPI eeproms use the 8th address bit embedded in the +		/* Some SPI eeproms use the 8th address bit embedded in the  		 * opcode  		 */  		if ((nvm->address_bits == 8) && (offset >= 128)) @@ -538,8 +532,7 @@ s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size)  		goto out;  	} -	/* -	 * if nvm_data is not ptr guard the PBA must be in legacy format which +	/* if nvm_data is not ptr guard the PBA must be in legacy format which  	 * means pointer is actually our second data word for the PBA number  	 * and we can decode it into an ascii string  	 */ @@ -728,6 +721,7 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)  	case e1000_82575:  	case e1000_82576:  	case e1000_82580: +	case e1000_i354:  	case e1000_i350:  	case e1000_i210:  		break; @@ -746,6 +740,7 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)  	switch (hw->mac.type) {  	case e1000_i210: +	case e1000_i354:  	case e1000_i350:  		/* find combo image version */  		hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 2918c979b5b..115b0da6e01 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -33,29 +33,29 @@  static s32  igb_phy_setup_autoneg(struct e1000_hw *hw);  static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, -					       u16 *phy_ctrl); +					     u16 *phy_ctrl);  static s32  igb_wait_autoneg(struct e1000_hw *hw);  static s32  igb_set_master_slave_mode(struct e1000_hw *hw);  /* Cable length tables */ -static const u16 e1000_m88_cable_length_table[] = -	{ 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; +static const u16 e1000_m88_cable_length_table[] = { +	0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };  #define M88E1000_CABLE_LENGTH_TABLE_SIZE \ -                (sizeof(e1000_m88_cable_length_table) / \ -                 sizeof(e1000_m88_cable_length_table[0])) +	(sizeof(e1000_m88_cable_length_table) / \ +	sizeof(e1000_m88_cable_length_table[0])) -static const u16 e1000_igp_2_cable_length_table[] = -    { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, -      0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, -      6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, -      21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, -      40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, -      60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, -      83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, -      104, 109, 114, 118, 121, 124}; +static const u16 e1000_igp_2_cable_length_table[] = { +	0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, +	0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, +	6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, +	21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, +	40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, +	60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, +	83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, +	104, 109, 114, 118, 121, 124};  #define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ -		(sizeof(e1000_igp_2_cable_length_table) / \ -		 sizeof(e1000_igp_2_cable_length_table[0])) +	(sizeof(e1000_igp_2_cable_length_table) / \ +	 sizeof(e1000_igp_2_cable_length_table[0]))  /**   *  igb_check_reset_block - Check if PHY reset is blocked @@ -71,8 +71,7 @@ s32 igb_check_reset_block(struct e1000_hw *hw)  	manc = rd32(E1000_MANC); -	return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? -	       E1000_BLK_PHY_RESET : 0; +	return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0;  }  /** @@ -149,8 +148,7 @@ s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)  		goto out;  	} -	/* -	 * Set up Op-code, Phy Address, and register offset in the MDI +	/* Set up Op-code, Phy Address, and register offset in the MDI  	 * Control register.  The MAC will take care of interfacing with the  	 * PHY to retrieve the desired data.  	 */ @@ -160,8 +158,7 @@ s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)  	wr32(E1000_MDIC, mdic); -	/* -	 * Poll the ready bit to see if the MDI read completed +	/* Poll the ready bit to see if the MDI read completed  	 * Increasing the time out as testing showed failures with  	 * the lower time out  	 */ @@ -207,8 +204,7 @@ s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)  		goto out;  	} -	/* -	 * Set up Op-code, Phy Address, and register offset in the MDI +	/* Set up Op-code, Phy Address, and register offset in the MDI  	 * Control register.  The MAC will take care of interfacing with the  	 * PHY to retrieve the desired data.  	 */ @@ -219,8 +215,7 @@ s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)  	wr32(E1000_MDIC, mdic); -	/* -	 * Poll the ready bit to see if the MDI read completed +	/* Poll the ready bit to see if the MDI read completed  	 * Increasing the time out as testing showed failures with  	 * the lower time out  	 */ @@ -259,15 +254,13 @@ s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)  	struct e1000_phy_info *phy = &hw->phy;  	u32 i, i2ccmd = 0; - -	/* -	 * Set up Op-code, Phy Address, and register address in the I2CCMD +	/* Set up Op-code, Phy Address, and register address in the I2CCMD  	 * register.  The MAC will take care of interfacing with the  	 * PHY to retrieve the desired data.  	 */  	i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | -	          (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | -	          (E1000_I2CCMD_OPCODE_READ)); +		  (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | +		  (E1000_I2CCMD_OPCODE_READ));  	wr32(E1000_I2CCMD, i2ccmd); @@ -317,15 +310,14 @@ s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)  	/* Swap the data bytes for the I2C interface */  	phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); -	/* -	 * Set up Op-code, Phy Address, and register address in the I2CCMD +	/* Set up Op-code, Phy Address, and register address in the I2CCMD  	 * register.  The MAC will take care of interfacing with the  	 * PHY to retrieve the desired data.  	 */  	i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | -	          (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | -	          E1000_I2CCMD_OPCODE_WRITE | -	          phy_data_swapped); +		  (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | +		  E1000_I2CCMD_OPCODE_WRITE | +		  phy_data_swapped);  	wr32(E1000_I2CCMD, i2ccmd); @@ -371,8 +363,8 @@ s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)  	if (offset > MAX_PHY_MULTI_PAGE_REG) {  		ret_val = igb_write_phy_reg_mdic(hw, -						   IGP01E1000_PHY_PAGE_SELECT, -						   (u16)offset); +						 IGP01E1000_PHY_PAGE_SELECT, +						 (u16)offset);  		if (ret_val) {  			hw->phy.ops.release(hw);  			goto out; @@ -410,8 +402,8 @@ s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)  	if (offset > MAX_PHY_MULTI_PAGE_REG) {  		ret_val = igb_write_phy_reg_mdic(hw, -						   IGP01E1000_PHY_PAGE_SELECT, -						   (u16)offset); +						 IGP01E1000_PHY_PAGE_SELECT, +						 (u16)offset);  		if (ret_val) {  			hw->phy.ops.release(hw);  			goto out; @@ -419,7 +411,7 @@ s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)  	}  	ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, -					   data); +					 data);  	hw->phy.ops.release(hw); @@ -439,7 +431,6 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)  	s32 ret_val;  	u16 phy_data; -  	if (phy->reset_disable) {  		ret_val = 0;  		goto out; @@ -472,8 +463,7 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)  	if (ret_val)  		goto out;  	phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; -	/* -	 * Options: +	/* Options:  	 *   0 - Auto (default)  	 *   1 - MDI mode  	 *   2 - MDI-X mode @@ -520,8 +510,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)  	phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; -	/* -	 * Options: +	/* Options:  	 *   MDI/MDI-X = 0 (default)  	 *   0 - Auto for all speeds  	 *   1 - MDI mode @@ -546,8 +535,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)  		break;  	} -	/* -	 * Options: +	/* Options:  	 *   disable_polarity_correction = 0 (default)  	 *       Automatic Correction for Reversed Cable Polarity  	 *   0 - Disabled @@ -562,12 +550,11 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)  		goto out;  	if (phy->revision < E1000_REVISION_4) { -		/* -		 * Force TX_CLK in the Extended PHY Specific Control Register +		/* Force TX_CLK in the Extended PHY Specific Control Register  		 * to 25MHz clock.  		 */  		ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, -					     &phy_data); +					    &phy_data);  		if (ret_val)  			goto out; @@ -630,8 +617,7 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)  	if (ret_val)  		goto out; -	/* -	 * Options: +	/* Options:  	 *   MDI/MDI-X = 0 (default)  	 *   0 - Auto for all speeds  	 *   1 - MDI mode @@ -659,8 +645,7 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)  		break;  	} -	/* -	 * Options: +	/* Options:  	 *   disable_polarity_correction = 0 (default)  	 *       Automatic Correction for Reversed Cable Polarity  	 *   0 - Disabled @@ -714,14 +699,12 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)  		goto out;  	} -	/* -	 * Wait 100ms for MAC to configure PHY from NVM settings, to avoid +	/* Wait 100ms for MAC to configure PHY from NVM settings, to avoid  	 * timeout issues when LFS is enabled.  	 */  	msleep(100); -	/* -	 * The NVM settings will configure LPLU in D3 for +	/* The NVM settings will configure LPLU in D3 for  	 * non-IGP1 PHYs.  	 */  	if (phy->type == e1000_phy_igp) { @@ -765,8 +748,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)  	/* set auto-master slave resolution settings */  	if (hw->mac.autoneg) { -		/* -		 * when autonegotiation advertisement is only 1000Mbps then we +		/* when autonegotiation advertisement is only 1000Mbps then we  		 * should disable SmartSpeed and enable Auto MasterSlave  		 * resolution as hardware default.  		 */ @@ -844,14 +826,12 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw)  	s32 ret_val;  	u16 phy_ctrl; -	/* -	 * Perform some bounds checking on the autoneg advertisement +	/* Perform some bounds checking on the autoneg advertisement  	 * parameter.  	 */  	phy->autoneg_advertised &= phy->autoneg_mask; -	/* -	 * If autoneg_advertised is zero, we assume it was not defaulted +	/* If autoneg_advertised is zero, we assume it was not defaulted  	 * by the calling code so we set to advertise full capability.  	 */  	if (phy->autoneg_advertised == 0) @@ -865,8 +845,7 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw)  	}  	hw_dbg("Restarting Auto-Neg\n"); -	/* -	 * Restart auto-negotiation by setting the Auto Neg Enable bit and +	/* Restart auto-negotiation by setting the Auto Neg Enable bit and  	 * the Auto Neg Restart bit in the PHY control register.  	 */  	ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); @@ -878,8 +857,7 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw)  	if (ret_val)  		goto out; -	/* -	 * Does the user want to wait for Auto-Neg to complete here, or +	/* Does the user want to wait for Auto-Neg to complete here, or  	 * check at a later time (for example, callback routine).  	 */  	if (phy->autoneg_wait_to_complete) { @@ -928,16 +906,14 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)  			goto out;  	} -	/* -	 * Need to parse both autoneg_advertised and fc and set up +	/* Need to parse both autoneg_advertised and fc and set up  	 * the appropriate PHY registers.  First we will parse for  	 * autoneg_advertised software override.  Since we can advertise  	 * a plethora of combinations, we need to check each bit  	 * individually.  	 */ -	/* -	 * First we clear all the 10/100 mb speed bits in the Auto-Neg +	/* First we clear all the 10/100 mb speed bits in the Auto-Neg  	 * Advertisement Register (Address 4) and the 1000 mb speed bits in  	 * the  1000Base-T Control Register (Address 9).  	 */ @@ -983,8 +959,7 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)  		mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;  	} -	/* -	 * Check for a software override of the flow control settings, and +	/* Check for a software override of the flow control settings, and  	 * setup the PHY advertisement registers accordingly.  If  	 * auto-negotiation is enabled, then software will have to set the  	 * "PAUSE" bits to the correct value in the Auto-Negotiation @@ -1003,15 +978,13 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)  	 */  	switch (hw->fc.current_mode) {  	case e1000_fc_none: -		/* -		 * Flow control (RX & TX) is completely disabled by a +		/* Flow control (RX & TX) is completely disabled by a  		 * software over-ride.  		 */  		mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);  		break;  	case e1000_fc_rx_pause: -		/* -		 * RX Flow control is enabled, and TX Flow control is +		/* RX Flow control is enabled, and TX Flow control is  		 * disabled, by a software over-ride.  		 *  		 * Since there really isn't a way to advertise that we are @@ -1023,16 +996,14 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)  		mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);  		break;  	case e1000_fc_tx_pause: -		/* -		 * TX Flow control is enabled, and RX Flow control is +		/* TX Flow control is enabled, and RX Flow control is  		 * disabled, by a software over-ride.  		 */  		mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;  		mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;  		break;  	case e1000_fc_full: -		/* -		 * Flow control (both RX and TX) is enabled by a software +		/* Flow control (both RX and TX) is enabled by a software  		 * over-ride.  		 */  		mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); @@ -1075,18 +1046,15 @@ s32 igb_setup_copper_link(struct e1000_hw *hw)  	s32 ret_val;  	bool link; -  	if (hw->mac.autoneg) { -		/* -		 * Setup autoneg and flow control advertisement and perform +		/* Setup autoneg and flow control advertisement and perform  		 * autonegotiation.  		 */  		ret_val = igb_copper_link_autoneg(hw);  		if (ret_val)  			goto out;  	} else { -		/* -		 * PHY will be set to 10H, 10F, 100H or 100F +		/* PHY will be set to 10H, 10F, 100H or 100F  		 * depending on user settings.  		 */  		hw_dbg("Forcing Speed and Duplex\n"); @@ -1097,14 +1065,10 @@ s32 igb_setup_copper_link(struct e1000_hw *hw)  		}  	} -	/* -	 * Check link status. Wait up to 100 microseconds for link to become +	/* Check link status. Wait up to 100 microseconds for link to become  	 * valid.  	 */ -	ret_val = igb_phy_has_link(hw, -	                           COPPER_LINK_UP_LIMIT, -	                           10, -	                           &link); +	ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link);  	if (ret_val)  		goto out; @@ -1145,8 +1109,7 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)  	if (ret_val)  		goto out; -	/* -	 * Clear Auto-Crossover to force MDI manually.  IGP requires MDI +	/* Clear Auto-Crossover to force MDI manually.  IGP requires MDI  	 * forced whenever speed and duplex are forced.  	 */  	ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); @@ -1167,10 +1130,7 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)  	if (phy->autoneg_wait_to_complete) {  		hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); -		ret_val = igb_phy_has_link(hw, -						     PHY_FORCE_LIMIT, -						     100000, -						     &link); +		ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link);  		if (ret_val)  			goto out; @@ -1178,10 +1138,7 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)  			hw_dbg("Link taking longer than expected.\n");  		/* Try once more */ -		ret_val = igb_phy_has_link(hw, -						     PHY_FORCE_LIMIT, -						     100000, -						     &link); +		ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link);  		if (ret_val)  			goto out;  	} @@ -1209,8 +1166,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)  	/* I210 and I211 devices support Auto-Crossover in forced operation. */  	if (phy->type != e1000_phy_i210) { -		/* -		 * Clear Auto-Crossover to force MDI manually.  M88E1000 +		/* Clear Auto-Crossover to force MDI manually.  M88E1000  		 * requires MDI forced whenever speed and duplex are forced.  		 */  		ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, @@ -1266,13 +1222,12 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)  			if (!reset_dsp)  				hw_dbg("Link taking longer than expected.\n");  			else { -				/* -				 * We didn't get link. +				/* We didn't get link.  				 * Reset the DSP and cross our fingers.  				 */  				ret_val = phy->ops.write_reg(hw, -							     M88E1000_PHY_PAGE_SELECT, -							     0x001d); +						M88E1000_PHY_PAGE_SELECT, +						0x001d);  				if (ret_val)  					goto out;  				ret_val = igb_phy_reset_dsp(hw); @@ -1298,8 +1253,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)  	if (ret_val)  		goto out; -	/* -	 * Resetting the phy means we need to re-force TX_CLK in the +	/* Resetting the phy means we need to re-force TX_CLK in the  	 * Extended PHY Specific Control Register to 25MHz clock from  	 * the reset value of 2.5MHz.  	 */ @@ -1308,8 +1262,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)  	if (ret_val)  		goto out; -	/* -	 * In addition, we must re-enable CRS on Tx for both half and full +	/* In addition, we must re-enable CRS on Tx for both half and full  	 * duplex.  	 */  	ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); @@ -1336,7 +1289,7 @@ out:   *  take affect.   **/  static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, -					       u16 *phy_ctrl) +					     u16 *phy_ctrl)  {  	struct e1000_mac_info *mac = &hw->mac;  	u32 ctrl; @@ -1417,8 +1370,7 @@ s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)  					     data);  		if (ret_val)  			goto out; -		/* -		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used +		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used  		 * during Dx states where the power conservation is most  		 * important.  During driver activity we should enable  		 * SmartSpeed, so performance is maintained. @@ -1461,13 +1413,13 @@ s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)  		/* When LPLU is enabled, we should disable SmartSpeed */  		ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, -					     &data); +					    &data);  		if (ret_val)  			goto out;  		data &= ~IGP01E1000_PSCFR_SMART_SPEED;  		ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, -					      data); +					     data);  	}  out: @@ -1556,8 +1508,7 @@ static s32 igb_check_polarity_igp(struct e1000_hw *hw)  	s32 ret_val;  	u16 data, offset, mask; -	/* -	 * Polarity is determined based on the speed of +	/* Polarity is determined based on the speed of  	 * our connection.  	 */  	ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); @@ -1569,8 +1520,7 @@ static s32 igb_check_polarity_igp(struct e1000_hw *hw)  		offset	= IGP01E1000_PHY_PCS_INIT_REG;  		mask	= IGP01E1000_PHY_POLARITY_MASK;  	} else { -		/* -		 * This really only applies to 10Mbps since +		/* This really only applies to 10Mbps since  		 * there is no polarity for 100Mbps (always 0).  		 */  		offset	= IGP01E1000_PHY_PORT_STATUS; @@ -1589,7 +1539,7 @@ out:  }  /** - *  igb_wait_autoneg - Wait for auto-neg compeletion + *  igb_wait_autoneg - Wait for auto-neg completion   *  @hw: pointer to the HW structure   *   *  Waits for auto-negotiation to complete or for the auto-negotiation time @@ -1613,8 +1563,7 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw)  		msleep(100);  	} -	/* -	 * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation +	/* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation  	 * has completed.  	 */  	return ret_val; @@ -1630,21 +1579,19 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw)   *  Polls the PHY status register for link, 'iterations' number of times.   **/  s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, -			       u32 usec_interval, bool *success) +		     u32 usec_interval, bool *success)  {  	s32 ret_val = 0;  	u16 i, phy_status;  	for (i = 0; i < iterations; i++) { -		/* -		 * Some PHYs require the PHY_STATUS register to be read +		/* Some PHYs require the PHY_STATUS register to be read  		 * twice due to the link bit being sticky.  No harm doing  		 * it across the board.  		 */  		ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); -		if (ret_val) { -			/* -			 * If the first read fails, another entity may have +		if (ret_val && usec_interval > 0) { +			/* If the first read fails, another entity may have  			 * ownership of the resources, wait and try again to  			 * see if they have relinquished the resources yet.  			 */ @@ -1735,6 +1682,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)  		phy->max_cable_length = phy_data / (is_cm ? 100 : 1);  		phy->cable_length = phy_data / (is_cm ? 100 : 1);  		break; +	case M88E1545_E_PHY_ID:  	case I347AT4_E_PHY_ID:  		/* Remember the original page select and set it to 7 */  		ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, @@ -1834,10 +1782,10 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)  	u16 cur_agc_index, max_agc_index = 0;  	u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;  	static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { -	       IGP02E1000_PHY_AGC_A, -	       IGP02E1000_PHY_AGC_B, -	       IGP02E1000_PHY_AGC_C, -	       IGP02E1000_PHY_AGC_D +		IGP02E1000_PHY_AGC_A, +		IGP02E1000_PHY_AGC_B, +		IGP02E1000_PHY_AGC_C, +		IGP02E1000_PHY_AGC_D  	};  	/* Read the AGC registers for all channels */ @@ -1846,8 +1794,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)  		if (ret_val)  			goto out; -		/* -		 * Getting bits 15:9, which represent the combination of +		/* Getting bits 15:9, which represent the combination of  		 * coarse and fine gain values.  The result is a number  		 * that can be put into the lookup table to obtain the  		 * approximate cable length. @@ -2167,15 +2114,13 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)  	hw->phy.ops.write_reg(hw, 0x1796, 0x0008);  	/* Change cg_icount + enable integbp for channels BCD */  	hw->phy.ops.write_reg(hw, 0x1798, 0xD008); -	/* -	 * Change cg_icount + enable integbp + change prop_factor_master +	/* Change cg_icount + enable integbp + change prop_factor_master  	 * to 8 for channel A  	 */  	hw->phy.ops.write_reg(hw, 0x1898, 0xD918);  	/* Disable AHT in Slave mode on channel A */  	hw->phy.ops.write_reg(hw, 0x187A, 0x0800); -	/* -	 * Enable LPLU and disable AN to 1000 in non-D0a states, +	/* Enable LPLU and disable AN to 1000 in non-D0a states,  	 * Enable SPD+B2B  	 */  	hw->phy.ops.write_reg(hw, 0x0019, 0x008D); @@ -2257,8 +2202,8 @@ static s32 igb_check_polarity_82580(struct e1000_hw *hw)  	if (!ret_val)  		phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY) -		                      ? e1000_rev_polarity_reversed -		                      : e1000_rev_polarity_normal; +				      ? e1000_rev_polarity_reversed +				      : e1000_rev_polarity_normal;  	return ret_val;  } @@ -2278,7 +2223,6 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)  	u16 phy_data;  	bool link; -  	ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);  	if (ret_val)  		goto out; @@ -2289,8 +2233,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)  	if (ret_val)  		goto out; -	/* -	 * Clear Auto-Crossover to force MDI manually.  82580 requires MDI +	/* Clear Auto-Crossover to force MDI manually.  82580 requires MDI  	 * forced whenever speed and duplex are forced.  	 */  	ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); @@ -2310,10 +2253,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)  	if (phy->autoneg_wait_to_complete) {  		hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n"); -		ret_val = igb_phy_has_link(hw, -		                           PHY_FORCE_LIMIT, -		                           100000, -		                           &link); +		ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);  		if (ret_val)  			goto out; @@ -2321,10 +2261,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)  			hw_dbg("Link taking longer than expected.\n");  		/* Try once more */ -		ret_val = igb_phy_has_link(hw, -		                           PHY_FORCE_LIMIT, -		                           100000, -		                           &link); +		ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);  		if (ret_val)  			goto out;  	} @@ -2349,7 +2286,6 @@ s32 igb_get_phy_info_82580(struct e1000_hw *hw)  	u16 data;  	bool link; -  	ret_val = igb_phy_has_link(hw, 1, 0, &link);  	if (ret_val)  		goto out; @@ -2383,12 +2319,12 @@ s32 igb_get_phy_info_82580(struct e1000_hw *hw)  			goto out;  		phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) -		                ? e1000_1000t_rx_status_ok -		                : e1000_1000t_rx_status_not_ok; +				? e1000_1000t_rx_status_ok +				: e1000_1000t_rx_status_not_ok;  		phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) -		                 ? e1000_1000t_rx_status_ok -		                 : e1000_1000t_rx_status_not_ok; +				 ? e1000_1000t_rx_status_ok +				 : e1000_1000t_rx_status_not_ok;  	} else {  		phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;  		phy->local_rx = e1000_1000t_rx_status_undefined; @@ -2412,13 +2348,12 @@ s32 igb_get_cable_length_82580(struct e1000_hw *hw)  	s32 ret_val;  	u16 phy_data, length; -  	ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data);  	if (ret_val)  		goto out;  	length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >> -	         I82580_DSTATUS_CABLE_LENGTH_SHIFT; +		 I82580_DSTATUS_CABLE_LENGTH_SHIFT;  	if (length == E1000_CABLE_LENGTH_UNDEFINED)  		ret_val = -E1000_ERR_PHY; diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 15343286082..82632c6c53a 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -65,6 +65,7 @@  #define E1000_TIPG     0x00410  /* TX Inter-packet gap -RW */  #define E1000_AIT      0x00458  /* Adaptive Interframe Spacing Throttle - RW */  #define E1000_LEDCTL   0x00E00  /* LED Control - RW */ +#define E1000_LEDMUX   0x08130  /* LED MUX Control */  #define E1000_PBA      0x01000  /* Packet Buffer Allocation - RW */  #define E1000_PBS      0x01008  /* Packet Buffer Size */  #define E1000_EEMNGCTL 0x01010  /* MNG EEprom Control */ @@ -83,6 +84,9 @@  #define E1000_I2C_DATA_IN   0x00001000  /* I2C- Data In */  #define E1000_I2C_CLK_OE_N  0x00002000  /* I2C- Clock Output Enable */  #define E1000_I2C_CLK_IN    0x00004000  /* I2C- Clock In */ +#define E1000_MPHY_ADDR_CTRL	0x0024 /* GbE MPHY Address Control */ +#define E1000_MPHY_DATA		0x0E10 /* GBE MPHY Data */ +#define E1000_MPHY_STAT		0x0E0C /* GBE MPHY Statistics */  /* IEEE 1588 TIMESYNCH */  #define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ @@ -117,21 +121,21 @@  #define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))  /* DMA Coalescing registers */ -#define E1000_DMACR             0x02508 /* Control Register */ -#define E1000_DMCTXTH           0x03550 /* Transmit Threshold */ -#define E1000_DMCTLX            0x02514 /* Time to Lx Request */ -#define E1000_DMCRTRH           0x05DD0 /* Receive Packet Rate Threshold */ -#define E1000_DMCCNT            0x05DD4 /* Current Rx Count */ -#define E1000_FCRTC             0x02170 /* Flow Control Rx high watermark */ -#define E1000_PCIEMISC          0x05BB8 /* PCIE misc config register */ +#define E1000_DMACR	0x02508 /* Control Register */ +#define E1000_DMCTXTH	0x03550 /* Transmit Threshold */ +#define E1000_DMCTLX	0x02514 /* Time to Lx Request */ +#define E1000_DMCRTRH	0x05DD0 /* Receive Packet Rate Threshold */ +#define E1000_DMCCNT	0x05DD4 /* Current Rx Count */ +#define E1000_FCRTC	0x02170 /* Flow Control Rx high watermark */ +#define E1000_PCIEMISC	0x05BB8 /* PCIE misc config register */  /* TX Rate Limit Registers */ -#define E1000_RTTDQSEL	0x3604	/* Tx Desc Plane Queue Select - WO */ -#define E1000_RTTBCNRM	0x3690	/* Tx BCN Rate-scheduler MMW */ -#define E1000_RTTBCNRC	0x36B0	/* Tx BCN Rate-Scheduler Config - WO */ +#define E1000_RTTDQSEL	0x3604 /* Tx Desc Plane Queue Select - WO */ +#define E1000_RTTBCNRM	0x3690 /* Tx BCN Rate-scheduler MMW */ +#define E1000_RTTBCNRC	0x36B0 /* Tx BCN Rate-Scheduler Config - WO */  /* Split and Replication RX Control - RW */ -#define E1000_RXPBS    0x02404  /* Rx Packet Buffer Size - RW */ +#define E1000_RXPBS	0x02404 /* Rx Packet Buffer Size - RW */  /* Thermal sensor configuration and status registers */  #define E1000_THMJT	0x08100 /* Junction Temperature */ @@ -140,8 +144,7 @@  #define E1000_THHIGHTC	0x0810C /* High Threshold Control */  #define E1000_THSTAT	0x08110 /* Thermal Sensor Status */ -/* - * Convenience macros +/* Convenience macros   *   * Note: "_n" is the queue number of the register to be written to.   * @@ -287,7 +290,7 @@  #define E1000_RFCTL    0x05008  /* Receive Filter Control*/  #define E1000_MTA      0x05200  /* Multicast Table Array - RW Array */  #define E1000_RA       0x05400  /* Receive Address - RW Array */ -#define E1000_RA2      0x054E0  /* 2nd half of receive address array - RW Array */ +#define E1000_RA2      0x054E0  /* 2nd half of Rx address array - RW Array */  #define E1000_PSRTYPE(_i)       (0x05480 + ((_i) * 4))  #define E1000_RAL(_i)  (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \                                         (0x054E0 + ((_i - 16) * 8))) @@ -360,21 +363,25 @@  	(readl(hw->hw_addr + reg + ((offset) << 2)))  /* DMA Coalescing registers */ -#define E1000_PCIEMISC          0x05BB8 /* PCIE misc config register */ +#define E1000_PCIEMISC	0x05BB8 /* PCIE misc config register */  /* Energy Efficient Ethernet "EEE" register */ -#define E1000_IPCNFG  0x0E38  /* Internal PHY Configuration */ -#define E1000_EEER    0x0E30  /* Energy Efficient Ethernet */ -#define E1000_EEE_SU  0X0E34  /* EEE Setup */ +#define E1000_IPCNFG	0x0E38 /* Internal PHY Configuration */ +#define E1000_EEER	0x0E30 /* Energy Efficient Ethernet */ +#define E1000_EEE_SU	0X0E34 /* EEE Setup */ +#define E1000_EMIADD	0x10   /* Extended Memory Indirect Address */ +#define E1000_EMIDATA	0x11   /* Extended Memory Indirect Data */ +#define E1000_MMDAC	13     /* MMD Access Control */ +#define E1000_MMDAAD	14     /* MMD Access Address/Data */  /* Thermal Sensor Register */ -#define E1000_THSTAT    0x08110 /* Thermal Sensor Status */ +#define E1000_THSTAT	0x08110 /* Thermal Sensor Status */  /* OS2BMC Registers */ -#define E1000_B2OSPC    0x08FE0 /* BMC2OS packets sent by BMC */ -#define E1000_B2OGPRC   0x04158 /* BMC2OS packets received by host */ -#define E1000_O2BGPTC   0x08FE4 /* OS2BMC packets received by BMC */ -#define E1000_O2BSPC    0x0415C /* OS2BMC packets transmitted by host */ +#define E1000_B2OSPC	0x08FE0 /* BMC2OS packets sent by BMC */ +#define E1000_B2OGPRC	0x04158 /* BMC2OS packets received by host */ +#define E1000_O2BGPTC	0x08FE4 /* OS2BMC packets received by BMC */ +#define E1000_O2BSPC	0x0415C /* OS2BMC packets transmitted by host */  #define E1000_SRWR		0x12018  /* Shadow Ram Write Register - RW */  #define E1000_I210_FLMNGCTL	0x12038 diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index ab577a763a2..9d6c075e232 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -44,54 +44,54 @@  struct igb_adapter; -#define E1000_PCS_CFG_IGN_SD               1 +#define E1000_PCS_CFG_IGN_SD	1  /* Interrupt defines */ -#define IGB_START_ITR                    648 /* ~6000 ints/sec */ -#define IGB_4K_ITR                       980 -#define IGB_20K_ITR                      196 -#define IGB_70K_ITR                       56 +#define IGB_START_ITR		648 /* ~6000 ints/sec */ +#define IGB_4K_ITR		980 +#define IGB_20K_ITR		196 +#define IGB_70K_ITR		56  /* TX/RX descriptor defines */ -#define IGB_DEFAULT_TXD                  256 -#define IGB_DEFAULT_TX_WORK		 128 -#define IGB_MIN_TXD                       80 -#define IGB_MAX_TXD                     4096 +#define IGB_DEFAULT_TXD		256 +#define IGB_DEFAULT_TX_WORK	128 +#define IGB_MIN_TXD		80 +#define IGB_MAX_TXD		4096 -#define IGB_DEFAULT_RXD                  256 -#define IGB_MIN_RXD                       80 -#define IGB_MAX_RXD                     4096 +#define IGB_DEFAULT_RXD		256 +#define IGB_MIN_RXD		80 +#define IGB_MAX_RXD		4096 -#define IGB_DEFAULT_ITR                    3 /* dynamic */ -#define IGB_MAX_ITR_USECS              10000 -#define IGB_MIN_ITR_USECS                 10 -#define NON_Q_VECTORS                      1 -#define MAX_Q_VECTORS                      8 +#define IGB_DEFAULT_ITR		3 /* dynamic */ +#define IGB_MAX_ITR_USECS	10000 +#define IGB_MIN_ITR_USECS	10 +#define NON_Q_VECTORS		1 +#define MAX_Q_VECTORS		8  /* Transmit and receive queues */ -#define IGB_MAX_RX_QUEUES                  8 -#define IGB_MAX_RX_QUEUES_82575            4 -#define IGB_MAX_RX_QUEUES_I211             2 -#define IGB_MAX_TX_QUEUES                  8 -#define IGB_MAX_VF_MC_ENTRIES              30 -#define IGB_MAX_VF_FUNCTIONS               8 -#define IGB_MAX_VFTA_ENTRIES               128 -#define IGB_82576_VF_DEV_ID                0x10CA -#define IGB_I350_VF_DEV_ID                 0x1520 +#define IGB_MAX_RX_QUEUES	8 +#define IGB_MAX_RX_QUEUES_82575	4 +#define IGB_MAX_RX_QUEUES_I211	2 +#define IGB_MAX_TX_QUEUES	8 +#define IGB_MAX_VF_MC_ENTRIES	30 +#define IGB_MAX_VF_FUNCTIONS	8 +#define IGB_MAX_VFTA_ENTRIES	128 +#define IGB_82576_VF_DEV_ID	0x10CA +#define IGB_I350_VF_DEV_ID	0x1520  /* NVM version defines */ -#define IGB_MAJOR_MASK			0xF000 -#define IGB_MINOR_MASK			0x0FF0 -#define IGB_BUILD_MASK			0x000F -#define IGB_COMB_VER_MASK		0x00FF -#define IGB_MAJOR_SHIFT			12 -#define IGB_MINOR_SHIFT			4 -#define IGB_COMB_VER_SHFT		8 -#define IGB_NVM_VER_INVALID		0xFFFF -#define IGB_ETRACK_SHIFT		16 -#define NVM_ETRACK_WORD			0x0042 -#define NVM_COMB_VER_OFF		0x0083 -#define NVM_COMB_VER_PTR		0x003d +#define IGB_MAJOR_MASK		0xF000 +#define IGB_MINOR_MASK		0x0FF0 +#define IGB_BUILD_MASK		0x000F +#define IGB_COMB_VER_MASK	0x00FF +#define IGB_MAJOR_SHIFT		12 +#define IGB_MINOR_SHIFT		4 +#define IGB_COMB_VER_SHFT	8 +#define IGB_NVM_VER_INVALID	0xFFFF +#define IGB_ETRACK_SHIFT	16 +#define NVM_ETRACK_WORD		0x0042 +#define NVM_COMB_VER_OFF	0x0083 +#define NVM_COMB_VER_PTR	0x003d  struct vf_data_storage {  	unsigned char vf_mac_addresses[ETH_ALEN]; @@ -103,6 +103,7 @@ struct vf_data_storage {  	u16 pf_vlan; /* When set, guest VLAN config not allowed. */  	u16 pf_qos;  	u16 tx_rate; +	bool spoofchk_enabled;  };  #define IGB_VF_FLAG_CTS            0x00000001 /* VF is clear to send data */ @@ -121,14 +122,14 @@ struct vf_data_storage {   *           descriptors until either it has this many to write back, or the   *           ITR timer expires.   */ -#define IGB_RX_PTHRESH                     8 -#define IGB_RX_HTHRESH                     8 -#define IGB_TX_PTHRESH                     8 -#define IGB_TX_HTHRESH                     1 -#define IGB_RX_WTHRESH                     ((hw->mac.type == e1000_82576 && \ -					     adapter->msix_entries) ? 1 : 4) -#define IGB_TX_WTHRESH                     ((hw->mac.type == e1000_82576 && \ -					     adapter->msix_entries) ? 1 : 16) +#define IGB_RX_PTHRESH	((hw->mac.type == e1000_i354) ? 12 : 8) +#define IGB_RX_HTHRESH	8 +#define IGB_TX_PTHRESH	((hw->mac.type == e1000_i354) ? 20 : 8) +#define IGB_TX_HTHRESH	1 +#define IGB_RX_WTHRESH	((hw->mac.type == e1000_82576 && \ +			  adapter->msix_entries) ? 1 : 4) +#define IGB_TX_WTHRESH	((hw->mac.type == e1000_82576 && \ +			  adapter->msix_entries) ? 1 : 16)  /* this is the size past which hardware will drop packets when setting LPE=0 */  #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 @@ -140,17 +141,17 @@ struct vf_data_storage {  #define IGB_RX_BUFSZ		IGB_RXBUFFER_2048  /* How many Rx Buffers do we bundle into one write to the hardware ? */ -#define IGB_RX_BUFFER_WRITE	16	/* Must be power of 2 */ +#define IGB_RX_BUFFER_WRITE	16 /* Must be power of 2 */ -#define AUTO_ALL_MODES            0 -#define IGB_EEPROM_APME         0x0400 +#define AUTO_ALL_MODES		0 +#define IGB_EEPROM_APME		0x0400  #ifndef IGB_MASTER_SLAVE  /* Switch to override PHY master/slave setting */  #define IGB_MASTER_SLAVE	e1000_ms_hw_default  #endif -#define IGB_MNG_VLAN_NONE -1 +#define IGB_MNG_VLAN_NONE	-1  enum igb_tx_flags {  	/* cmd_type flags */ @@ -164,11 +165,10 @@ enum igb_tx_flags {  };  /* VLAN info */ -#define IGB_TX_FLAGS_VLAN_MASK		0xffff0000 +#define IGB_TX_FLAGS_VLAN_MASK	0xffff0000  #define IGB_TX_FLAGS_VLAN_SHIFT	16 -/* - * The largest size we can write to the descriptor is 65535.  In order to +/* The largest size we can write to the descriptor is 65535.  In order to   * maintain a power of two alignment we have to limit ourselves to 32K.   */  #define IGB_MAX_TXD_PWR	15 @@ -178,8 +178,17 @@ enum igb_tx_flags {  #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)  #define DESC_NEEDED (MAX_SKB_FRAGS + 4) +/* EEPROM byte offsets */ +#define IGB_SFF_8472_SWAP		0x5C +#define IGB_SFF_8472_COMP		0x5E + +/* Bitmasks */ +#define IGB_SFF_ADDRESSING_MODE		0x4 +#define IGB_SFF_8472_UNSUP		0x00 +  /* wrapper around a pointer to a socket buffer, - * so a DMA handle can be stored along with the buffer */ + * so a DMA handle can be stored along with the buffer + */  struct igb_tx_buffer {  	union e1000_adv_tx_desc *next_to_watch;  	unsigned long time_stamp; @@ -290,11 +299,11 @@ enum e1000_ring_flags_t {  #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) -#define IGB_RX_DESC(R, i)	    \ +#define IGB_RX_DESC(R, i)	\  	(&(((union e1000_adv_rx_desc *)((R)->desc))[i])) -#define IGB_TX_DESC(R, i)	    \ +#define IGB_TX_DESC(R, i)	\  	(&(((union e1000_adv_tx_desc *)((R)->desc))[i])) -#define IGB_TX_CTXTDESC(R, i)	    \ +#define IGB_TX_CTXTDESC(R, i)	\  	(&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))  /* igb_test_staterr - tests bits within Rx descriptor status and error fields */ @@ -453,12 +462,12 @@ struct igb_adapter {  #define IGB_FLAG_WOL_SUPPORTED		(1 << 8)  /* DMA Coalescing defines */ -#define IGB_MIN_TXPBSIZE           20408 -#define IGB_TX_BUF_4096            4096 -#define IGB_DMCTLX_DCFLUSH_DIS     0x80000000  /* Disable DMA Coal Flush */ +#define IGB_MIN_TXPBSIZE	20408 +#define IGB_TX_BUF_4096		4096 +#define IGB_DMCTLX_DCFLUSH_DIS	0x80000000  /* Disable DMA Coal Flush */ -#define IGB_82576_TSYNC_SHIFT 19 -#define IGB_TS_HDR_LEN        16 +#define IGB_82576_TSYNC_SHIFT	19 +#define IGB_TS_HDR_LEN		16  enum e1000_state_t {  	__IGB_TESTING,  	__IGB_RESETTING, diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index a3830a8ba4c..7876240fa74 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -38,6 +38,7 @@  #include <linux/slab.h>  #include <linux/pm_runtime.h>  #include <linux/highmem.h> +#include <linux/mdio.h>  #include "igb.h" @@ -178,44 +179,67 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)  		ecmd->port = PORT_TP;  		ecmd->phy_address = hw->phy.addr; +		ecmd->transceiver = XCVR_INTERNAL;  	} else { -		ecmd->supported   = (SUPPORTED_1000baseT_Full | -				     SUPPORTED_FIBRE | -				     SUPPORTED_Autoneg); +		ecmd->supported = (SUPPORTED_1000baseT_Full | +				   SUPPORTED_100baseT_Full | +				   SUPPORTED_FIBRE | +				   SUPPORTED_Autoneg | +				   SUPPORTED_Pause); +		if (hw->mac.type == e1000_i354) +				ecmd->supported |= SUPPORTED_2500baseX_Full; + +		ecmd->advertising = ADVERTISED_FIBRE; + +		switch (adapter->link_speed) { +		case SPEED_2500: +			ecmd->advertising = ADVERTISED_2500baseX_Full; +			break; +		case SPEED_1000: +			ecmd->advertising = ADVERTISED_1000baseT_Full; +			break; +		case SPEED_100: +			ecmd->advertising = ADVERTISED_100baseT_Full; +			break; +		default: +			break; +		} -		ecmd->advertising = (ADVERTISED_1000baseT_Full | -				     ADVERTISED_FIBRE | -				     ADVERTISED_Autoneg | -				     ADVERTISED_Pause); +		if (hw->mac.autoneg == 1) +			ecmd->advertising |= ADVERTISED_Autoneg;  		ecmd->port = PORT_FIBRE; +		ecmd->transceiver = XCVR_EXTERNAL;  	} -	ecmd->transceiver = XCVR_INTERNAL; -  	status = rd32(E1000_STATUS);  	if (status & E1000_STATUS_LU) { - -		if ((status & E1000_STATUS_SPEED_1000) || -		    hw->phy.media_type != e1000_media_type_copper) -			ethtool_cmd_speed_set(ecmd, SPEED_1000); +		if ((hw->mac.type == e1000_i354) && +		    (status & E1000_STATUS_2P5_SKU) && +		    !(status & E1000_STATUS_2P5_SKU_OVER)) +			ecmd->speed = SPEED_2500; +		else if (status & E1000_STATUS_SPEED_1000) +			ecmd->speed = SPEED_1000;  		else if (status & E1000_STATUS_SPEED_100) -			ethtool_cmd_speed_set(ecmd, SPEED_100); +			ecmd->speed = SPEED_100;  		else -			ethtool_cmd_speed_set(ecmd, SPEED_10); - +			ecmd->speed = SPEED_10;  		if ((status & E1000_STATUS_FD) ||  		    hw->phy.media_type != e1000_media_type_copper)  			ecmd->duplex = DUPLEX_FULL;  		else  			ecmd->duplex = DUPLEX_HALF;  	} else { -		ethtool_cmd_speed_set(ecmd, -1); +		ecmd->speed = -1;  		ecmd->duplex = -1;  	} -	ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; +	if ((hw->phy.media_type == e1000_media_type_fiber) || +	    hw->mac.autoneg) +		ecmd->autoneg = AUTONEG_ENABLE; +	else +		ecmd->autoneg = AUTONEG_DISABLE;  	/* MDI-X => 2; MDI =>1; Invalid =>0 */  	if (hw->phy.media_type == e1000_media_type_copper) @@ -238,15 +262,15 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)  	struct e1000_hw *hw = &adapter->hw;  	/* When SoL/IDER sessions are active, autoneg/speed/duplex -	 * cannot be changed */ +	 * cannot be changed +	 */  	if (igb_check_reset_block(hw)) {  		dev_err(&adapter->pdev->dev,  			"Cannot change link characteristics when SoL/IDER is active.\n");  		return -EINVAL;  	} -	/* -	 * MDI setting is only allowed when autoneg enabled because +	/* MDI setting is only allowed when autoneg enabled because  	 * some hardware doesn't allow MDI setting when speed or  	 * duplex is forced.  	 */ @@ -266,9 +290,31 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)  	if (ecmd->autoneg == AUTONEG_ENABLE) {  		hw->mac.autoneg = 1; -		hw->phy.autoneg_advertised = ecmd->advertising | -					     ADVERTISED_TP | -					     ADVERTISED_Autoneg; +		if (hw->phy.media_type == e1000_media_type_fiber) { +			hw->phy.autoneg_advertised = ecmd->advertising | +						     ADVERTISED_FIBRE | +						     ADVERTISED_Autoneg; +			switch (adapter->link_speed) { +			case SPEED_2500: +				hw->phy.autoneg_advertised = +					ADVERTISED_2500baseX_Full; +				break; +			case SPEED_1000: +				hw->phy.autoneg_advertised = +					ADVERTISED_1000baseT_Full; +				break; +			case SPEED_100: +				hw->phy.autoneg_advertised = +					ADVERTISED_100baseT_Full; +				break; +			default: +				break; +			} +		} else { +			hw->phy.autoneg_advertised = ecmd->advertising | +						     ADVERTISED_TP | +						     ADVERTISED_Autoneg; +		}  		ecmd->advertising = hw->phy.autoneg_advertised;  		if (adapter->fc_autoneg)  			hw->fc.requested_mode = e1000_fc_default; @@ -283,8 +329,7 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)  	/* MDI-X => 2; MDI => 1; Auto => 3 */  	if (ecmd->eth_tp_mdix_ctrl) { -		/* -		 * fix up the value for auto (3 => 0) as zero is mapped +		/* fix up the value for auto (3 => 0) as zero is mapped  		 * internally to auto  		 */  		if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) @@ -309,8 +354,7 @@ static u32 igb_get_link(struct net_device *netdev)  	struct igb_adapter *adapter = netdev_priv(netdev);  	struct e1000_mac_info *mac = &adapter->hw.mac; -	/* -	 * If the link is not reported up to netdev, interrupts are disabled, +	/* If the link is not reported up to netdev, interrupts are disabled,  	 * and so the physical link state may have changed since we last  	 * looked. Set get_link_status to make sure that the true link  	 * state is interrogated, rather than pulling a cached and possibly @@ -430,7 +474,8 @@ static void igb_get_regs(struct net_device *netdev,  	/* Interrupt */  	/* Reading EICS for EICR because they read the -	 * same but EICS does not clear on read */ +	 * same but EICS does not clear on read +	 */  	regs_buff[13] = rd32(E1000_EICS);  	regs_buff[14] = rd32(E1000_EICS);  	regs_buff[15] = rd32(E1000_EIMS); @@ -438,7 +483,8 @@ static void igb_get_regs(struct net_device *netdev,  	regs_buff[17] = rd32(E1000_EIAC);  	regs_buff[18] = rd32(E1000_EIAM);  	/* Reading ICS for ICR because they read the -	 * same but ICS does not clear on read */ +	 * same but ICS does not clear on read +	 */  	regs_buff[19] = rd32(E1000_ICS);  	regs_buff[20] = rd32(E1000_ICS);  	regs_buff[21] = rd32(E1000_IMS); @@ -688,12 +734,12 @@ static int igb_get_eeprom(struct net_device *netdev,  	if (hw->nvm.type == e1000_nvm_eeprom_spi)  		ret_val = hw->nvm.ops.read(hw, first_word, -					    last_word - first_word + 1, -					    eeprom_buff); +					   last_word - first_word + 1, +					   eeprom_buff);  	else {  		for (i = 0; i < last_word - first_word + 1; i++) {  			ret_val = hw->nvm.ops.read(hw, first_word + i, 1, -						    &eeprom_buff[i]); +						   &eeprom_buff[i]);  			if (ret_val)  				break;  		} @@ -740,15 +786,17 @@ static int igb_set_eeprom(struct net_device *netdev,  	ptr = (void *)eeprom_buff;  	if (eeprom->offset & 1) { -		/* need read/modify/write of first changed EEPROM word */ -		/* only the second byte of the word is being modified */ +		/* need read/modify/write of first changed EEPROM word +		 * only the second byte of the word is being modified +		 */  		ret_val = hw->nvm.ops.read(hw, first_word, 1,  					    &eeprom_buff[0]);  		ptr++;  	}  	if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { -		/* need read/modify/write of last changed EEPROM word */ -		/* only the first byte of the word is being modified */ +		/* need read/modify/write of last changed EEPROM word +		 * only the first byte of the word is being modified +		 */  		ret_val = hw->nvm.ops.read(hw, last_word, 1,  				   &eeprom_buff[last_word - first_word]);  	} @@ -763,10 +811,11 @@ static int igb_set_eeprom(struct net_device *netdev,  		eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);  	ret_val = hw->nvm.ops.write(hw, first_word, -				     last_word - first_word + 1, eeprom_buff); +				    last_word - first_word + 1, eeprom_buff);  	/* Update the checksum over the first part of the EEPROM if needed -	 * and flush shadow RAM for 82573 controllers */ +	 * and flush shadow RAM for 82573 controllers +	 */  	if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))  		hw->nvm.ops.update(hw); @@ -783,8 +832,7 @@ static void igb_get_drvinfo(struct net_device *netdev,  	strlcpy(drvinfo->driver,  igb_driver_name, sizeof(drvinfo->driver));  	strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version)); -	/* -	 * EEPROM image version # is reported as firmware version # for +	/* EEPROM image version # is reported as firmware version # for  	 * 82575 controllers  	 */  	strlcpy(drvinfo->fw_version, adapter->fw_version, @@ -847,9 +895,11 @@ static int igb_set_ringparam(struct net_device *netdev,  	}  	if (adapter->num_tx_queues > adapter->num_rx_queues) -		temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring)); +		temp_ring = vmalloc(adapter->num_tx_queues * +				    sizeof(struct igb_ring));  	else -		temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring)); +		temp_ring = vmalloc(adapter->num_rx_queues * +				    sizeof(struct igb_ring));  	if (!temp_ring) {  		err = -ENOMEM; @@ -858,10 +908,9 @@ static int igb_set_ringparam(struct net_device *netdev,  	igb_down(adapter); -	/* -	 * We can't just free everything and then setup again, +	/* We can't just free everything and then setup again,  	 * because the ISRs in MSI-X mode get passed pointers -	 * to the tx and rx ring structs. +	 * to the Tx and Rx ring structs.  	 */  	if (new_tx_count != adapter->tx_ring_count) {  		for (i = 0; i < adapter->num_tx_queues; i++) { @@ -1199,6 +1248,7 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)  	switch (adapter->hw.mac.type) {  	case e1000_i350: +	case e1000_i354:  		test = reg_test_i350;  		toggle = 0x7FEFF3FF;  		break; @@ -1361,6 +1411,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)  		ics_mask = 0x77DCFED5;  		break;  	case e1000_i350: +	case e1000_i354:  	case e1000_i210:  	case e1000_i211:  		ics_mask = 0x77DCFED5; @@ -1627,17 +1678,12 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)  		wr32(E1000_CONNSW, reg);  		/* Unset sigdetect for SERDES loopback on -		 * 82580 and i350 devices. +		 * 82580 and newer devices.  		 */ -		switch (hw->mac.type) { -		case e1000_82580: -		case e1000_i350: +		if (hw->mac.type >= e1000_82580) {  			reg = rd32(E1000_PCS_CFG0);  			reg |= E1000_PCS_CFG_IGN_SD;  			wr32(E1000_PCS_CFG0, reg); -			break; -		default: -			break;  		}  		/* Set PCS register for forced speed */ @@ -1723,8 +1769,8 @@ static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,  }  static int igb_clean_test_rings(struct igb_ring *rx_ring, -                                struct igb_ring *tx_ring, -                                unsigned int size) +				struct igb_ring *tx_ring, +				unsigned int size)  {  	union e1000_adv_rx_desc *rx_desc;  	struct igb_rx_buffer *rx_buffer_info; @@ -1737,7 +1783,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,  	rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);  	while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) { -		/* check rx buffer */ +		/* check Rx buffer */  		rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];  		/* sync Rx buffer for CPU read */ @@ -1756,11 +1802,11 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,  					   IGB_RX_BUFSZ,  					   DMA_FROM_DEVICE); -		/* unmap buffer on tx side */ +		/* unmap buffer on Tx side */  		tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];  		igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); -		/* increment rx/tx next to clean counters */ +		/* increment Rx/Tx next to clean counters */  		rx_ntc++;  		if (rx_ntc == rx_ring->count)  			rx_ntc = 0; @@ -1801,8 +1847,7 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)  	igb_create_lbtest_frame(skb, size);  	skb_put(skb, size); -	/* -	 * Calculate the loop count based on the largest descriptor ring +	/* Calculate the loop count based on the largest descriptor ring  	 * The idea is to wrap the largest ring a number of times using 64  	 * send/receive pairs during each loop  	 */ @@ -1829,7 +1874,7 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)  			break;  		} -		/* allow 200 milliseconds for packets to go from tx to rx */ +		/* allow 200 milliseconds for packets to go from Tx to Rx */  		msleep(200);  		good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size); @@ -1848,13 +1893,21 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)  static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)  {  	/* PHY loopback cannot be performed if SoL/IDER -	 * sessions are active */ +	 * sessions are active +	 */  	if (igb_check_reset_block(&adapter->hw)) {  		dev_err(&adapter->pdev->dev,  			"Cannot do PHY loopback test when SoL/IDER is active.\n");  		*data = 0;  		goto out;  	} + +	if (adapter->hw.mac.type == e1000_i354) { +		dev_info(&adapter->pdev->dev, +			"Loopback test not supported on i354.\n"); +		*data = 0; +		goto out; +	}  	*data = igb_setup_desc_rings(adapter);  	if (*data)  		goto out; @@ -1879,7 +1932,8 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)  		hw->mac.serdes_has_link = false;  		/* On some blade server designs, link establishment -		 * could take as long as 2-3 minutes */ +		 * could take as long as 2-3 minutes +		 */  		do {  			hw->mac.ops.check_for_link(&adapter->hw);  			if (hw->mac.serdes_has_link) @@ -1922,7 +1976,8 @@ static void igb_diag_test(struct net_device *netdev,  		igb_power_up_link(adapter);  		/* Link test performed before hardware reset so autoneg doesn't -		 * interfere with test result */ +		 * interfere with test result +		 */  		if (igb_link_test(adapter, &data[4]))  			eth_test->flags |= ETH_TEST_FL_FAILED; @@ -1987,8 +2042,8 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)  	struct igb_adapter *adapter = netdev_priv(netdev);  	wol->supported = WAKE_UCAST | WAKE_MCAST | -	                 WAKE_BCAST | WAKE_MAGIC | -	                 WAKE_PHY; +			 WAKE_BCAST | WAKE_MAGIC | +			 WAKE_PHY;  	wol->wolopts = 0;  	if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) @@ -2263,7 +2318,7 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)  			sprintf(p, "rx_queue_%u_alloc_failed", i);  			p += ETH_GSTRING_LEN;  		} -/*		BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ +		/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */  		break;  	}  } @@ -2283,6 +2338,7 @@ static int igb_get_ts_info(struct net_device *dev,  	case e1000_82576:  	case e1000_82580:  	case e1000_i350: +	case e1000_i354:  	case e1000_i210:  	case e1000_i211:  		info->so_timestamping = @@ -2362,7 +2418,7 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,  }  static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, -			   u32 *rule_locs) +			 u32 *rule_locs)  {  	struct igb_adapter *adapter = netdev_priv(dev);  	int ret = -EOPNOTSUPP; @@ -2506,7 +2562,8 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)  {  	struct igb_adapter *adapter = netdev_priv(netdev);  	struct e1000_hw *hw = &adapter->hw; -	u32 ipcnfg, eeer; +	u32 ipcnfg, eeer, ret_val; +	u16 phy_data;  	if ((hw->mac.type < e1000_i350) ||  	    (hw->phy.media_type != e1000_media_type_copper)) @@ -2525,6 +2582,32 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)  	if (ipcnfg & E1000_IPCNFG_EEE_100M_AN)  		edata->advertised |= ADVERTISED_100baseT_Full; +	/* EEE Link Partner Advertised */ +	switch (hw->mac.type) { +	case e1000_i350: +		ret_val = igb_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350, +					   &phy_data); +		if (ret_val) +			return -ENODATA; + +		edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); + +		break; +	case e1000_i210: +	case e1000_i211: +		ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210, +					     E1000_EEE_LP_ADV_DEV_I210, +					     &phy_data); +		if (ret_val) +			return -ENODATA; + +		edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); + +		break; +	default: +		break; +	} +  	if (eeer & E1000_EEER_EEE_NEG)  		edata->eee_active = true; @@ -2600,6 +2683,85 @@ static int igb_set_eee(struct net_device *netdev,  	return 0;  } +static int igb_get_module_info(struct net_device *netdev, +			       struct ethtool_modinfo *modinfo) +{ +	struct igb_adapter *adapter = netdev_priv(netdev); +	struct e1000_hw *hw = &adapter->hw; +	u32 status = E1000_SUCCESS; +	u16 sff8472_rev, addr_mode; +	bool page_swap = false; + +	if ((hw->phy.media_type == e1000_media_type_copper) || +	    (hw->phy.media_type == e1000_media_type_unknown)) +		return -EOPNOTSUPP; + +	/* Check whether we support SFF-8472 or not */ +	status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev); +	if (status != E1000_SUCCESS) +		return -EIO; + +	/* addressing mode is not supported */ +	status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode); +	if (status != E1000_SUCCESS) +		return -EIO; + +	/* addressing mode is not supported */ +	if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) { +		hw_dbg("Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); +		page_swap = true; +	} + +	if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) { +		/* We have an SFP, but it does not support SFF-8472 */ +		modinfo->type = ETH_MODULE_SFF_8079; +		modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; +	} else { +		/* We have an SFP which supports a revision of SFF-8472 */ +		modinfo->type = ETH_MODULE_SFF_8472; +		modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; +	} + +	return 0; +} + +static int igb_get_module_eeprom(struct net_device *netdev, +				 struct ethtool_eeprom *ee, u8 *data) +{ +	struct igb_adapter *adapter = netdev_priv(netdev); +	struct e1000_hw *hw = &adapter->hw; +	u32 status = E1000_SUCCESS; +	u16 *dataword; +	u16 first_word, last_word; +	int i = 0; + +	if (ee->len == 0) +		return -EINVAL; + +	first_word = ee->offset >> 1; +	last_word = (ee->offset + ee->len - 1) >> 1; + +	dataword = kmalloc(sizeof(u16) * (last_word - first_word + 1), +			   GFP_KERNEL); +	if (!dataword) +		return -ENOMEM; + +	/* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ +	for (i = 0; i < last_word - first_word + 1; i++) { +		status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]); +		if (status != E1000_SUCCESS) +			/* Error occurred while reading module */ +			return -EIO; + +		be16_to_cpus(&dataword[i]); +	} + +	memcpy(data, (u8 *)dataword + (ee->offset & 1), ee->len); +	kfree(dataword); + +	return 0; +} +  static int igb_ethtool_begin(struct net_device *netdev)  {  	struct igb_adapter *adapter = netdev_priv(netdev); @@ -2614,36 +2776,38 @@ static void igb_ethtool_complete(struct net_device *netdev)  }  static const struct ethtool_ops igb_ethtool_ops = { -	.get_settings           = igb_get_settings, -	.set_settings           = igb_set_settings, -	.get_drvinfo            = igb_get_drvinfo, -	.get_regs_len           = igb_get_regs_len, -	.get_regs               = igb_get_regs, -	.get_wol                = igb_get_wol, -	.set_wol                = igb_set_wol, -	.get_msglevel           = igb_get_msglevel, -	.set_msglevel           = igb_set_msglevel, -	.nway_reset             = igb_nway_reset, -	.get_link               = igb_get_link, -	.get_eeprom_len         = igb_get_eeprom_len, -	.get_eeprom             = igb_get_eeprom, -	.set_eeprom             = igb_set_eeprom, -	.get_ringparam          = igb_get_ringparam, -	.set_ringparam          = igb_set_ringparam, -	.get_pauseparam         = igb_get_pauseparam, -	.set_pauseparam         = igb_set_pauseparam, -	.self_test              = igb_diag_test, -	.get_strings            = igb_get_strings, -	.set_phys_id            = igb_set_phys_id, -	.get_sset_count         = igb_get_sset_count, -	.get_ethtool_stats      = igb_get_ethtool_stats, -	.get_coalesce           = igb_get_coalesce, -	.set_coalesce           = igb_set_coalesce, -	.get_ts_info            = igb_get_ts_info, +	.get_settings		= igb_get_settings, +	.set_settings		= igb_set_settings, +	.get_drvinfo		= igb_get_drvinfo, +	.get_regs_len		= igb_get_regs_len, +	.get_regs		= igb_get_regs, +	.get_wol		= igb_get_wol, +	.set_wol		= igb_set_wol, +	.get_msglevel		= igb_get_msglevel, +	.set_msglevel		= igb_set_msglevel, +	.nway_reset		= igb_nway_reset, +	.get_link		= igb_get_link, +	.get_eeprom_len		= igb_get_eeprom_len, +	.get_eeprom		= igb_get_eeprom, +	.set_eeprom		= igb_set_eeprom, +	.get_ringparam		= igb_get_ringparam, +	.set_ringparam		= igb_set_ringparam, +	.get_pauseparam		= igb_get_pauseparam, +	.set_pauseparam		= igb_set_pauseparam, +	.self_test		= igb_diag_test, +	.get_strings		= igb_get_strings, +	.set_phys_id		= igb_set_phys_id, +	.get_sset_count		= igb_get_sset_count, +	.get_ethtool_stats	= igb_get_ethtool_stats, +	.get_coalesce		= igb_get_coalesce, +	.set_coalesce		= igb_set_coalesce, +	.get_ts_info		= igb_get_ts_info,  	.get_rxnfc		= igb_get_rxnfc,  	.set_rxnfc		= igb_set_rxnfc,  	.get_eee		= igb_get_eee,  	.set_eee		= igb_set_eee, +	.get_module_info	= igb_get_module_info, +	.get_module_eeprom	= igb_get_module_eeprom,  	.begin			= igb_ethtool_begin,  	.complete		= igb_ethtool_complete,  }; diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c index 0478a1abe54..58f1ce967ae 100644 --- a/drivers/net/ethernet/intel/igb/igb_hwmon.c +++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c @@ -45,21 +45,21 @@ static struct i2c_board_info i350_sensor_info = {  /* hwmon callback functions */  static ssize_t igb_hwmon_show_location(struct device *dev, -					 struct device_attribute *attr, -					 char *buf) +				       struct device_attribute *attr, +				       char *buf)  {  	struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, -						     dev_attr); +						   dev_attr);  	return sprintf(buf, "loc%u\n",  		       igb_attr->sensor->location);  }  static ssize_t igb_hwmon_show_temp(struct device *dev, -				     struct device_attribute *attr, -				     char *buf) +				   struct device_attribute *attr, +				   char *buf)  {  	struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, -						     dev_attr); +						   dev_attr);  	unsigned int value;  	/* reset the temp field */ @@ -74,11 +74,11 @@ static ssize_t igb_hwmon_show_temp(struct device *dev,  }  static ssize_t igb_hwmon_show_cautionthresh(struct device *dev, -				     struct device_attribute *attr, -				     char *buf) +					    struct device_attribute *attr, +					    char *buf)  {  	struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, -						     dev_attr); +						   dev_attr);  	unsigned int value = igb_attr->sensor->caution_thresh;  	/* display millidegree */ @@ -88,11 +88,11 @@ static ssize_t igb_hwmon_show_cautionthresh(struct device *dev,  }  static ssize_t igb_hwmon_show_maxopthresh(struct device *dev, -				     struct device_attribute *attr, -				     char *buf) +					  struct device_attribute *attr, +					  char *buf)  {  	struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, -						     dev_attr); +						   dev_attr);  	unsigned int value = igb_attr->sensor->max_op_thresh;  	/* display millidegree */ @@ -111,7 +111,8 @@ static ssize_t igb_hwmon_show_maxopthresh(struct device *dev,   * the data structures we need to get the data to display.   */  static int igb_add_hwmon_attr(struct igb_adapter *adapter, -				unsigned int offset, int type) { +			      unsigned int offset, int type) +{  	int rc;  	unsigned int n_attr;  	struct hwmon_attr *igb_attr; @@ -217,7 +218,7 @@ int igb_sysfs_init(struct igb_adapter *adapter)  	 */  	n_attrs = E1000_MAX_SENSORS * 4;  	igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr), -					  GFP_KERNEL); +					GFP_KERNEL);  	if (!igb_hwmon->hwmon_list) {  		rc = -ENOMEM;  		goto err; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 64f75291e3a..64cbe0dfe04 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -60,9 +60,9 @@  #include <linux/i2c.h>  #include "igb.h" -#define MAJ 4 -#define MIN 1 -#define BUILD 2 +#define MAJ 5 +#define MIN 0 +#define BUILD 3  #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \  __stringify(BUILD) "-k"  char igb_driver_name[] = "igb"; @@ -77,6 +77,9 @@ static const struct e1000_info *igb_info_tbl[] = {  };  static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },  	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },  	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },  	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 }, @@ -156,8 +159,8 @@ static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);  static void igb_tx_timeout(struct net_device *);  static void igb_reset_task(struct work_struct *);  static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features); -static int igb_vlan_rx_add_vid(struct net_device *, u16); -static int igb_vlan_rx_kill_vid(struct net_device *, u16); +static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16); +static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);  static void igb_restore_vlan(struct igb_adapter *);  static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);  static void igb_ping_all_vfs(struct igb_adapter *); @@ -169,13 +172,14 @@ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);  static int igb_ndo_set_vf_vlan(struct net_device *netdev,  			       int vf, u16 vlan, u8 qos);  static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); +static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, +				   bool setting);  static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,  				 struct ifla_vf_info *ivi);  static void igb_check_vf_rate_limit(struct igb_adapter *);  #ifdef CONFIG_PCI_IOV  static int igb_vf_configure(struct igb_adapter *adapter, int vf); -static bool igb_vfs_are_assigned(struct igb_adapter *adapter);  #endif  #ifdef CONFIG_PM @@ -292,9 +296,7 @@ static const struct igb_reg_info igb_reg_info_tbl[] = {  	{}  }; -/* - * igb_regdump - register printout routine - */ +/* igb_regdump - register printout routine */  static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)  {  	int n = 0; @@ -360,9 +362,7 @@ static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)  		regs[2], regs[3]);  } -/* - * igb_dump - Print registers, tx-rings and rx-rings - */ +/* igb_dump - Print registers, Tx-rings and Rx-rings */  static void igb_dump(struct igb_adapter *adapter)  {  	struct net_device *netdev = adapter->netdev; @@ -569,12 +569,13 @@ exit:  	return;  } -/*  igb_get_i2c_data - Reads the I2C SDA data bit +/** + *  igb_get_i2c_data - Reads the I2C SDA data bit   *  @hw: pointer to hardware structure   *  @i2cctl: Current value of I2CCTL register   *   *  Returns the I2C data bit value - */ + **/  static int igb_get_i2c_data(void *data)  {  	struct igb_adapter *adapter = (struct igb_adapter *)data; @@ -584,12 +585,13 @@ static int igb_get_i2c_data(void *data)  	return ((i2cctl & E1000_I2C_DATA_IN) != 0);  } -/* igb_set_i2c_data - Sets the I2C data bit +/** + *  igb_set_i2c_data - Sets the I2C data bit   *  @data: pointer to hardware structure   *  @state: I2C data value (0 or 1) to set   *   *  Sets the I2C data bit - */ + **/  static void igb_set_i2c_data(void *data, int state)  {  	struct igb_adapter *adapter = (struct igb_adapter *)data; @@ -608,12 +610,13 @@ static void igb_set_i2c_data(void *data, int state)  } -/* igb_set_i2c_clk - Sets the I2C SCL clock +/** + *  igb_set_i2c_clk - Sets the I2C SCL clock   *  @data: pointer to hardware structure   *  @state: state to set clock   *   *  Sets the I2C clock line to state - */ + **/  static void igb_set_i2c_clk(void *data, int state)  {  	struct igb_adapter *adapter = (struct igb_adapter *)data; @@ -631,11 +634,12 @@ static void igb_set_i2c_clk(void *data, int state)  	wrfl();  } -/* igb_get_i2c_clk - Gets the I2C SCL clock state +/** + *  igb_get_i2c_clk - Gets the I2C SCL clock state   *  @data: pointer to hardware structure   *   *  Gets the I2C clock state - */ + **/  static int igb_get_i2c_clk(void *data)  {  	struct igb_adapter *adapter = (struct igb_adapter *)data; @@ -655,8 +659,10 @@ static const struct i2c_algo_bit_data igb_i2c_algo = {  };  /** - * igb_get_hw_dev - return device - * used by hardware layer to print debugging information + *  igb_get_hw_dev - return device + *  @hw: pointer to hardware structure + * + *  used by hardware layer to print debugging information   **/  struct net_device *igb_get_hw_dev(struct e1000_hw *hw)  { @@ -665,10 +671,10 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)  }  /** - * igb_init_module - Driver Registration Routine + *  igb_init_module - Driver Registration Routine   * - * igb_init_module is the first routine called when the driver is - * loaded. All it does is register with the PCI subsystem. + *  igb_init_module is the first routine called when the driver is + *  loaded. All it does is register with the PCI subsystem.   **/  static int __init igb_init_module(void)  { @@ -688,10 +694,10 @@ static int __init igb_init_module(void)  module_init(igb_init_module);  /** - * igb_exit_module - Driver Exit Cleanup Routine + *  igb_exit_module - Driver Exit Cleanup Routine   * - * igb_exit_module is called just before the driver is removed - * from memory. + *  igb_exit_module is called just before the driver is removed + *  from memory.   **/  static void __exit igb_exit_module(void)  { @@ -705,11 +711,11 @@ module_exit(igb_exit_module);  #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))  /** - * igb_cache_ring_register - Descriptor ring to register mapping - * @adapter: board private structure to initialize + *  igb_cache_ring_register - Descriptor ring to register mapping + *  @adapter: board private structure to initialize   * - * Once we know the feature-set enabled for the device, we'll cache - * the register offset the descriptor ring is assigned to. + *  Once we know the feature-set enabled for the device, we'll cache + *  the register offset the descriptor ring is assigned to.   **/  static void igb_cache_ring_register(struct igb_adapter *adapter)  { @@ -726,11 +732,12 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)  		if (adapter->vfs_allocated_count) {  			for (; i < adapter->rss_queues; i++)  				adapter->rx_ring[i]->reg_idx = rbase_offset + -				                               Q_IDX_82576(i); +							       Q_IDX_82576(i);  		}  	case e1000_82575:  	case e1000_82580:  	case e1000_i350: +	case e1000_i354:  	case e1000_i210:  	case e1000_i211:  	default: @@ -785,9 +792,10 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)  	switch (hw->mac.type) {  	case e1000_82575:  		/* The 82575 assigns vectors using a bitmask, which matches the -		   bitmask for the EICR/EIMS/EIMC registers.  To assign one -		   or more queues to a vector, we write the appropriate bits -		   into the MSIXBM register for that vector. */ +		 * bitmask for the EICR/EIMS/EIMC registers.  To assign one +		 * or more queues to a vector, we write the appropriate bits +		 * into the MSIXBM register for that vector. +		 */  		if (rx_queue > IGB_N0_QUEUE)  			msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;  		if (tx_queue > IGB_N0_QUEUE) @@ -798,8 +806,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)  		q_vector->eims_value = msixbm;  		break;  	case e1000_82576: -		/* -		 * 82576 uses a table that essentially consists of 2 columns +		/* 82576 uses a table that essentially consists of 2 columns  		 * with 8 rows.  The ordering is column-major so we use the  		 * lower 3 bits as the row index, and the 4th bit as the  		 * column offset. @@ -816,10 +823,10 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)  		break;  	case e1000_82580:  	case e1000_i350: +	case e1000_i354:  	case e1000_i210:  	case e1000_i211: -		/* -		 * On 82580 and newer adapters the scheme is similar to 82576 +		/* On 82580 and newer adapters the scheme is similar to 82576  		 * however instead of ordering column-major we have things  		 * ordered row-major.  So we traverse the table by using  		 * bit 0 as the column offset, and the remaining bits as the @@ -848,10 +855,11 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)  }  /** - * igb_configure_msix - Configure MSI-X hardware + *  igb_configure_msix - Configure MSI-X hardware + *  @adapter: board private structure to initialize   * - * igb_configure_msix sets up the hardware to properly - * generate MSI-X interrupts. + *  igb_configure_msix sets up the hardware to properly + *  generate MSI-X interrupts.   **/  static void igb_configure_msix(struct igb_adapter *adapter)  { @@ -875,8 +883,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)  		wr32(E1000_CTRL_EXT, tmp);  		/* enable msix_other interrupt */ -		array_wr32(E1000_MSIXBM(0), vector++, -		                      E1000_EIMS_OTHER); +		array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);  		adapter->eims_other = E1000_EIMS_OTHER;  		break; @@ -884,13 +891,15 @@ static void igb_configure_msix(struct igb_adapter *adapter)  	case e1000_82576:  	case e1000_82580:  	case e1000_i350: +	case e1000_i354:  	case e1000_i210:  	case e1000_i211:  		/* Turn on MSI-X capability first, or our settings -		 * won't stick.  And it will take days to debug. */ +		 * won't stick.  And it will take days to debug. +		 */  		wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | -		                E1000_GPIE_PBA | E1000_GPIE_EIAME | -		                E1000_GPIE_NSICR); +		     E1000_GPIE_PBA | E1000_GPIE_EIAME | +		     E1000_GPIE_NSICR);  		/* enable msix_other interrupt */  		adapter->eims_other = 1 << vector; @@ -912,10 +921,11 @@ static void igb_configure_msix(struct igb_adapter *adapter)  }  /** - * igb_request_msix - Initialize MSI-X interrupts + *  igb_request_msix - Initialize MSI-X interrupts + *  @adapter: board private structure to initialize   * - * igb_request_msix allocates MSI-X vectors and requests interrupts from the - * kernel. + *  igb_request_msix allocates MSI-X vectors and requests interrupts from the + *  kernel.   **/  static int igb_request_msix(struct igb_adapter *adapter)  { @@ -924,7 +934,7 @@ static int igb_request_msix(struct igb_adapter *adapter)  	int i, err = 0, vector = 0, free_vector = 0;  	err = request_irq(adapter->msix_entries[vector].vector, -	                  igb_msix_other, 0, netdev->name, adapter); +			  igb_msix_other, 0, netdev->name, adapter);  	if (err)  		goto err_out; @@ -948,8 +958,8 @@ static int igb_request_msix(struct igb_adapter *adapter)  			sprintf(q_vector->name, "%s-unused", netdev->name);  		err = request_irq(adapter->msix_entries[vector].vector, -		                  igb_msix_ring, 0, q_vector->name, -		                  q_vector); +				  igb_msix_ring, 0, q_vector->name, +				  q_vector);  		if (err)  			goto err_free;  	} @@ -982,13 +992,13 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)  }  /** - * igb_free_q_vector - Free memory allocated for specific interrupt vector - * @adapter: board private structure to initialize - * @v_idx: Index of vector to be freed + *  igb_free_q_vector - Free memory allocated for specific interrupt vector + *  @adapter: board private structure to initialize + *  @v_idx: Index of vector to be freed   * - * This function frees the memory allocated to the q_vector.  In addition if - * NAPI is enabled it will delete any references to the NAPI struct prior - * to freeing the q_vector. + *  This function frees the memory allocated to the q_vector.  In addition if + *  NAPI is enabled it will delete any references to the NAPI struct prior + *  to freeing the q_vector.   **/  static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)  { @@ -1003,20 +1013,19 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)  	adapter->q_vector[v_idx] = NULL;  	netif_napi_del(&q_vector->napi); -	/* -	 * ixgbe_get_stats64() might access the rings on this vector, +	/* ixgbe_get_stats64() might access the rings on this vector,  	 * we must wait a grace period before freeing it.  	 */  	kfree_rcu(q_vector, rcu);  }  /** - * igb_free_q_vectors - Free memory allocated for interrupt vectors - * @adapter: board private structure to initialize + *  igb_free_q_vectors - Free memory allocated for interrupt vectors + *  @adapter: board private structure to initialize   * - * This function frees the memory allocated to the q_vectors.  In addition if - * NAPI is enabled it will delete any references to the NAPI struct prior - * to freeing the q_vector. + *  This function frees the memory allocated to the q_vectors.  In addition if + *  NAPI is enabled it will delete any references to the NAPI struct prior + *  to freeing the q_vector.   **/  static void igb_free_q_vectors(struct igb_adapter *adapter)  { @@ -1031,10 +1040,11 @@ static void igb_free_q_vectors(struct igb_adapter *adapter)  }  /** - * igb_clear_interrupt_scheme - reset the device to a state of no interrupts + *  igb_clear_interrupt_scheme - reset the device to a state of no interrupts + *  @adapter: board private structure to initialize   * - * This function resets the device so that it has 0 rx queues, tx queues, and - * MSI-X interrupts allocated. + *  This function resets the device so that it has 0 Rx queues, Tx queues, and + *  MSI-X interrupts allocated.   */  static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)  { @@ -1043,10 +1053,12 @@ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)  }  /** - * igb_set_interrupt_capability - set MSI or MSI-X if supported + *  igb_set_interrupt_capability - set MSI or MSI-X if supported + *  @adapter: board private structure to initialize + *  @msix: boolean value of MSIX capability   * - * Attempt to configure interrupts using the best available - * capabilities of the hardware and kernel. + *  Attempt to configure interrupts using the best available + *  capabilities of the hardware and kernel.   **/  static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)  { @@ -1063,10 +1075,10 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)  	else  		adapter->num_tx_queues = adapter->rss_queues; -	/* start with one vector for every rx queue */ +	/* start with one vector for every Rx queue */  	numvecs = adapter->num_rx_queues; -	/* if tx handler is separate add 1 for every tx queue */ +	/* if Tx handler is separate add 1 for every Tx queue */  	if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))  		numvecs += adapter->num_tx_queues; @@ -1128,16 +1140,16 @@ static void igb_add_ring(struct igb_ring *ring,  }  /** - * igb_alloc_q_vector - Allocate memory for a single interrupt vector - * @adapter: board private structure to initialize - * @v_count: q_vectors allocated on adapter, used for ring interleaving - * @v_idx: index of vector in adapter struct - * @txr_count: total number of Tx rings to allocate - * @txr_idx: index of first Tx ring to allocate - * @rxr_count: total number of Rx rings to allocate - * @rxr_idx: index of first Rx ring to allocate + *  igb_alloc_q_vector - Allocate memory for a single interrupt vector + *  @adapter: board private structure to initialize + *  @v_count: q_vectors allocated on adapter, used for ring interleaving + *  @v_idx: index of vector in adapter struct + *  @txr_count: total number of Tx rings to allocate + *  @txr_idx: index of first Tx ring to allocate + *  @rxr_count: total number of Rx rings to allocate + *  @rxr_idx: index of first Rx ring to allocate   * - * We allocate one q_vector.  If allocation fails we return -ENOMEM. + *  We allocate one q_vector.  If allocation fails we return -ENOMEM.   **/  static int igb_alloc_q_vector(struct igb_adapter *adapter,  			      int v_count, int v_idx, @@ -1179,6 +1191,17 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,  	/* initialize pointer to rings */  	ring = q_vector->ring; +	/* intialize ITR */ +	if (rxr_count) { +		/* rx or rx/tx vector */ +		if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) +			q_vector->itr_val = adapter->rx_itr_setting; +	} else { +		/* tx only vector */ +		if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) +			q_vector->itr_val = adapter->tx_itr_setting; +	} +  	if (txr_count) {  		/* assign generic ring traits */  		ring->dev = &adapter->pdev->dev; @@ -1221,9 +1244,9 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,  			set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);  		/* -		 * On i350, i210, and i211, loopback VLAN packets +		 * On i350, i354, i210, and i211, loopback VLAN packets  		 * have the tag byte-swapped. -		 * */ +		 */  		if (adapter->hw.mac.type >= e1000_i350)  			set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); @@ -1240,11 +1263,11 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,  /** - * igb_alloc_q_vectors - Allocate memory for interrupt vectors - * @adapter: board private structure to initialize + *  igb_alloc_q_vectors - Allocate memory for interrupt vectors + *  @adapter: board private structure to initialize   * - * We allocate one q_vector per queue interrupt.  If allocation fails we - * return -ENOMEM. + *  We allocate one q_vector per queue interrupt.  If allocation fails we + *  return -ENOMEM.   **/  static int igb_alloc_q_vectors(struct igb_adapter *adapter)  { @@ -1298,9 +1321,11 @@ err_out:  }  /** - * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors + *  igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors + *  @adapter: board private structure to initialize + *  @msix: boolean value of MSIX capability   * - * This function initializes the interrupts and allocates all of the queues. + *  This function initializes the interrupts and allocates all of the queues.   **/  static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)  { @@ -1325,10 +1350,11 @@ err_alloc_q_vectors:  }  /** - * igb_request_irq - initialize interrupts + *  igb_request_irq - initialize interrupts + *  @adapter: board private structure to initialize   * - * Attempts to configure interrupts using the best available - * capabilities of the hardware and kernel. + *  Attempts to configure interrupts using the best available + *  capabilities of the hardware and kernel.   **/  static int igb_request_irq(struct igb_adapter *adapter)  { @@ -1394,15 +1420,14 @@ static void igb_free_irq(struct igb_adapter *adapter)  }  /** - * igb_irq_disable - Mask off interrupt generation on the NIC - * @adapter: board private structure + *  igb_irq_disable - Mask off interrupt generation on the NIC + *  @adapter: board private structure   **/  static void igb_irq_disable(struct igb_adapter *adapter)  {  	struct e1000_hw *hw = &adapter->hw; -	/* -	 * we need to be careful when disabling interrupts.  The VFs are also +	/* we need to be careful when disabling interrupts.  The VFs are also  	 * mapped into these registers and so clearing the bits can cause  	 * issues on the VF drivers so we only need to clear what we set  	 */ @@ -1427,8 +1452,8 @@ static void igb_irq_disable(struct igb_adapter *adapter)  }  /** - * igb_irq_enable - Enable default interrupt generation settings - * @adapter: board private structure + *  igb_irq_enable - Enable default interrupt generation settings + *  @adapter: board private structure   **/  static void igb_irq_enable(struct igb_adapter *adapter)  { @@ -1477,13 +1502,12 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter)  }  /** - * igb_release_hw_control - release control of the h/w to f/w - * @adapter: address of board private structure - * - * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. - * For ASF and Pass Through versions of f/w this means that the - * driver is no longer loaded. + *  igb_release_hw_control - release control of the h/w to f/w + *  @adapter: address of board private structure   * + *  igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. + *  For ASF and Pass Through versions of f/w this means that the + *  driver is no longer loaded.   **/  static void igb_release_hw_control(struct igb_adapter *adapter)  { @@ -1497,13 +1521,12 @@ static void igb_release_hw_control(struct igb_adapter *adapter)  }  /** - * igb_get_hw_control - get control of the h/w from f/w - * @adapter: address of board private structure - * - * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. - * For ASF and Pass Through versions of f/w this means that - * the driver is loaded. + *  igb_get_hw_control - get control of the h/w from f/w + *  @adapter: address of board private structure   * + *  igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. + *  For ASF and Pass Through versions of f/w this means that + *  the driver is loaded.   **/  static void igb_get_hw_control(struct igb_adapter *adapter)  { @@ -1517,8 +1540,8 @@ static void igb_get_hw_control(struct igb_adapter *adapter)  }  /** - * igb_configure - configure the hardware for RX and TX - * @adapter: private board structure + *  igb_configure - configure the hardware for RX and TX + *  @adapter: private board structure   **/  static void igb_configure(struct igb_adapter *adapter)  { @@ -1541,7 +1564,8 @@ static void igb_configure(struct igb_adapter *adapter)  	/* call igb_desc_unused which always leaves  	 * at least 1 descriptor unused to make sure -	 * next_to_use != next_to_clean */ +	 * next_to_use != next_to_clean +	 */  	for (i = 0; i < adapter->num_rx_queues; i++) {  		struct igb_ring *ring = adapter->rx_ring[i];  		igb_alloc_rx_buffers(ring, igb_desc_unused(ring)); @@ -1549,8 +1573,8 @@ static void igb_configure(struct igb_adapter *adapter)  }  /** - * igb_power_up_link - Power up the phy/serdes link - * @adapter: address of board private structure + *  igb_power_up_link - Power up the phy/serdes link + *  @adapter: address of board private structure   **/  void igb_power_up_link(struct igb_adapter *adapter)  { @@ -1563,8 +1587,8 @@ void igb_power_up_link(struct igb_adapter *adapter)  }  /** - * igb_power_down_link - Power down the phy/serdes link - * @adapter: address of board private structure + *  igb_power_down_link - Power down the phy/serdes link + *  @adapter: address of board private structure   */  static void igb_power_down_link(struct igb_adapter *adapter)  { @@ -1575,8 +1599,8 @@ static void igb_power_down_link(struct igb_adapter *adapter)  }  /** - * igb_up - Open the interface and prepare it to handle traffic - * @adapter: board private structure + *  igb_up - Open the interface and prepare it to handle traffic + *  @adapter: board private structure   **/  int igb_up(struct igb_adapter *adapter)  { @@ -1624,7 +1648,8 @@ void igb_down(struct igb_adapter *adapter)  	int i;  	/* signal that we're down so the interrupt handler does not -	 * reschedule our watchdog timer */ +	 * reschedule our watchdog timer +	 */  	set_bit(__IGB_DOWN, &adapter->state);  	/* disable receives in the hardware */ @@ -1694,6 +1719,7 @@ void igb_reset(struct igb_adapter *adapter)  	 */  	switch (mac->type) {  	case e1000_i350: +	case e1000_i354:  	case e1000_82580:  		pba = rd32(E1000_RXPBS);  		pba = igb_rxpbs_adjust_82580(pba); @@ -1720,14 +1746,16 @@ void igb_reset(struct igb_adapter *adapter)  		 * rounded up to the next 1KB and expressed in KB.  Likewise,  		 * the Rx FIFO should be large enough to accommodate at least  		 * one full receive packet and is similarly rounded up and -		 * expressed in KB. */ +		 * expressed in KB. +		 */  		pba = rd32(E1000_PBA);  		/* upper 16 bits has Tx packet buffer allocation size in KB */  		tx_space = pba >> 16;  		/* lower 16 bits has Rx packet buffer allocation size in KB */  		pba &= 0xffff; -		/* the tx fifo also stores 16 bytes of information about the tx -		 * but don't include ethernet FCS because hardware appends it */ +		/* the Tx fifo also stores 16 bytes of information about the Tx +		 * but don't include ethernet FCS because hardware appends it +		 */  		min_tx_space = (adapter->max_frame_size +  				sizeof(union e1000_adv_tx_desc) -  				ETH_FCS_LEN) * 2; @@ -1740,13 +1768,15 @@ void igb_reset(struct igb_adapter *adapter)  		/* If current Tx allocation is less than the min Tx FIFO size,  		 * and the min Tx FIFO size is less than the current Rx FIFO -		 * allocation, take space away from current Rx allocation */ +		 * allocation, take space away from current Rx allocation +		 */  		if (tx_space < min_tx_space &&  		    ((min_tx_space - tx_space) < pba)) {  			pba = pba - (min_tx_space - tx_space); -			/* if short on rx space, rx wins and must trump tx -			 * adjustment */ +			/* if short on Rx space, Rx wins and must trump Tx +			 * adjustment +			 */  			if (pba < min_rx_space)  				pba = min_rx_space;  		} @@ -1758,7 +1788,8 @@ void igb_reset(struct igb_adapter *adapter)  	 * (or the size used for early receive) above it in the Rx FIFO.  	 * Set it to the lower of:  	 * - 90% of the Rx FIFO size, or -	 * - the full Rx FIFO size minus one full frame */ +	 * - the full Rx FIFO size minus one full frame +	 */  	hwm = min(((pba << 10) * 9 / 10),  			((pba << 10) - 2 * adapter->max_frame_size)); @@ -1789,8 +1820,7 @@ void igb_reset(struct igb_adapter *adapter)  	if (hw->mac.ops.init_hw(hw))  		dev_err(&pdev->dev, "Hardware Error\n"); -	/* -	 * Flow control settings reset on hardware reset, so guarantee flow +	/* Flow control settings reset on hardware reset, so guarantee flow  	 * control is off when forcing speed.  	 */  	if (!hw->mac.autoneg) @@ -1826,14 +1856,13 @@ void igb_reset(struct igb_adapter *adapter)  static netdev_features_t igb_fix_features(struct net_device *netdev,  	netdev_features_t features)  { -	/* -	 * Since there is no support for separate rx/tx vlan accel -	 * enable/disable make sure tx flag is always in same state as rx. +	/* Since there is no support for separate Rx/Tx vlan accel +	 * enable/disable make sure Tx flag is always in same state as Rx.  	 */ -	if (features & NETIF_F_HW_VLAN_RX) -		features |= NETIF_F_HW_VLAN_TX; +	if (features & NETIF_F_HW_VLAN_CTAG_RX) +		features |= NETIF_F_HW_VLAN_CTAG_TX;  	else -		features &= ~NETIF_F_HW_VLAN_TX; +		features &= ~NETIF_F_HW_VLAN_CTAG_TX;  	return features;  } @@ -1844,7 +1873,7 @@ static int igb_set_features(struct net_device *netdev,  	netdev_features_t changed = netdev->features ^ features;  	struct igb_adapter *adapter = netdev_priv(netdev); -	if (changed & NETIF_F_HW_VLAN_RX) +	if (changed & NETIF_F_HW_VLAN_CTAG_RX)  		igb_vlan_mode(netdev, features);  	if (!(changed & NETIF_F_RXALL)) @@ -1876,6 +1905,7 @@ static const struct net_device_ops igb_netdev_ops = {  	.ndo_set_vf_mac		= igb_ndo_set_vf_mac,  	.ndo_set_vf_vlan	= igb_ndo_set_vf_vlan,  	.ndo_set_vf_tx_rate	= igb_ndo_set_vf_bw, +	.ndo_set_vf_spoofchk	= igb_ndo_set_vf_spoofchk,  	.ndo_get_vf_config	= igb_ndo_get_vf_config,  #ifdef CONFIG_NET_POLL_CONTROLLER  	.ndo_poll_controller	= igb_netpoll, @@ -1887,7 +1917,6 @@ static const struct net_device_ops igb_netdev_ops = {  /**   * igb_set_fw_version - Configure version string for ethtool   * @adapter: adapter struct - *   **/  void igb_set_fw_version(struct igb_adapter *adapter)  { @@ -1923,10 +1952,10 @@ void igb_set_fw_version(struct igb_adapter *adapter)  	return;  } -/*  igb_init_i2c - Init I2C interface +/** + *  igb_init_i2c - Init I2C interface   *  @adapter: pointer to adapter structure - * - */ + **/  static s32 igb_init_i2c(struct igb_adapter *adapter)  {  	s32 status = E1000_SUCCESS; @@ -1951,15 +1980,15 @@ static s32 igb_init_i2c(struct igb_adapter *adapter)  }  /** - * igb_probe - Device Initialization Routine - * @pdev: PCI device information struct - * @ent: entry in igb_pci_tbl + *  igb_probe - Device Initialization Routine + *  @pdev: PCI device information struct + *  @ent: entry in igb_pci_tbl   * - * Returns 0 on success, negative on failure + *  Returns 0 on success, negative on failure   * - * igb_probe initializes an adapter identified by a pci_dev structure. - * The OS initialization, configuring of the adapter private structure, - * and a hardware reset occur. + *  igb_probe initializes an adapter identified by a pci_dev structure. + *  The OS initialization, configuring of the adapter private structure, + *  and a hardware reset occur.   **/  static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  { @@ -1996,18 +2025,19 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	} else {  		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));  		if (err) { -			err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); +			err = dma_set_coherent_mask(&pdev->dev, +						    DMA_BIT_MASK(32));  			if (err) { -				dev_err(&pdev->dev, "No usable DMA " -					"configuration, aborting\n"); +				dev_err(&pdev->dev, +					"No usable DMA configuration, aborting\n");  				goto err_dma;  			}  		}  	}  	err = pci_request_selected_regions(pdev, pci_select_bars(pdev, -	                                   IORESOURCE_MEM), -	                                   igb_driver_name); +					   IORESOURCE_MEM), +					   igb_driver_name);  	if (err)  		goto err_pci_reg; @@ -2085,8 +2115,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  		dev_info(&pdev->dev,  			"PHY reset is blocked due to SOL/IDER session.\n"); -	/* -	 * features is initialized to 0 in allocation, it might have bits +	/* features is initialized to 0 in allocation, it might have bits  	 * set by igb_sw_init so we should use an or instead of an  	 * assignment.  	 */ @@ -2097,15 +2126,15 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  			    NETIF_F_TSO6 |  			    NETIF_F_RXHASH |  			    NETIF_F_RXCSUM | -			    NETIF_F_HW_VLAN_RX | -			    NETIF_F_HW_VLAN_TX; +			    NETIF_F_HW_VLAN_CTAG_RX | +			    NETIF_F_HW_VLAN_CTAG_TX;  	/* copy netdev features into list of user selectable features */  	netdev->hw_features |= netdev->features;  	netdev->hw_features |= NETIF_F_RXALL;  	/* set this bit last since it cannot be part of hw_features */ -	netdev->features |= NETIF_F_HW_VLAN_FILTER; +	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;  	netdev->vlan_features |= NETIF_F_TSO |  				 NETIF_F_TSO6 | @@ -2130,11 +2159,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);  	/* before reading the NVM, reset the controller to put the device in a -	 * known good starting state */ +	 * known good starting state +	 */  	hw->mac.ops.reset_hw(hw); -	/* -	 * make sure the NVM is good , i211 parts have special NVM that +	/* make sure the NVM is good , i211 parts have special NVM that  	 * doesn't contain a checksum  	 */  	if (hw->mac.type != e1000_i211) { @@ -2161,9 +2190,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	igb_set_fw_version(adapter);  	setup_timer(&adapter->watchdog_timer, igb_watchdog, -	            (unsigned long) adapter); +		    (unsigned long) adapter);  	setup_timer(&adapter->phy_info_timer, igb_update_phy_info, -	            (unsigned long) adapter); +		    (unsigned long) adapter);  	INIT_WORK(&adapter->reset_task, igb_reset_task);  	INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); @@ -2185,8 +2214,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	/* Check the NVM for wake support on non-port A ports */  	if (hw->mac.type >= e1000_82580)  		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + -		                 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, -		                 &eeprom_data); +				 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, +				 &eeprom_data);  	else if (hw->bus.func == 1)  		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); @@ -2195,7 +2224,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	/* now that we have the eeprom settings, apply the special cases where  	 * the eeprom may be wrong or the board simply won't support wake on -	 * lan on a particular port */ +	 * lan on a particular port +	 */  	switch (pdev->device) {  	case E1000_DEV_ID_82575GB_QUAD_COPPER:  		adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; @@ -2204,7 +2234,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	case E1000_DEV_ID_82576_FIBER:  	case E1000_DEV_ID_82576_SERDES:  		/* Wake events only supported on port A for dual fiber -		 * regardless of eeprom setting */ +		 * regardless of eeprom setting +		 */  		if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)  			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;  		break; @@ -2274,8 +2305,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {  		u16 ets_word; -		/* -		 * Read the NVM to determine if this i350 device supports an +		/* Read the NVM to determine if this i350 device supports an  		 * external thermal sensor.  		 */  		hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word); @@ -2294,17 +2324,20 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	igb_ptp_init(adapter);  	dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); -	/* print bus type/speed/width info */ -	dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", -		 netdev->name, -		 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : -		  (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : -		                                            "unknown"), -		 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : -		  (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : -		  (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : -		   "unknown"), -		 netdev->dev_addr); +	/* print bus type/speed/width info, not applicable to i354 */ +	if (hw->mac.type != e1000_i354) { +		dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", +			 netdev->name, +			 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : +			  (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : +			   "unknown"), +			 ((hw->bus.width == e1000_bus_width_pcie_x4) ? +			  "Width x4" : +			  (hw->bus.width == e1000_bus_width_pcie_x2) ? +			  "Width x2" : +			  (hw->bus.width == e1000_bus_width_pcie_x1) ? +			  "Width x1" : "unknown"), netdev->dev_addr); +	}  	ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);  	if (ret_val) @@ -2321,6 +2354,13 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	case e1000_i211:  		igb_set_eee_i350(hw);  		break; +	case e1000_i354: +		if (hw->phy.media_type == e1000_media_type_copper) { +			if ((rd32(E1000_CTRL_EXT) & +			    E1000_CTRL_EXT_LINK_MODE_SGMII)) +				igb_set_eee_i354(hw); +		} +		break;  	default:  		break;  	} @@ -2344,7 +2384,7 @@ err_ioremap:  	free_netdev(netdev);  err_alloc_etherdev:  	pci_release_selected_regions(pdev, -	                             pci_select_bars(pdev, IORESOURCE_MEM)); +				     pci_select_bars(pdev, IORESOURCE_MEM));  err_pci_reg:  err_dma:  	pci_disable_device(pdev); @@ -2361,7 +2401,7 @@ static int  igb_disable_sriov(struct pci_dev *pdev)  	/* reclaim resources allocated to VFs */  	if (adapter->vf_data) {  		/* disable iov and allow time for transactions to clear */ -		if (igb_vfs_are_assigned(adapter)) { +		if (pci_vfs_assigned(pdev)) {  			dev_warn(&pdev->dev,  				 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");  			return -EPERM; @@ -2444,26 +2484,24 @@ out:  }  #endif -/* +/**   *  igb_remove_i2c - Cleanup  I2C interface   *  @adapter: pointer to adapter structure - * - */ + **/  static void igb_remove_i2c(struct igb_adapter *adapter)  { -  	/* free the adapter bus structure */  	i2c_del_adapter(&adapter->i2c_adap);  }  /** - * igb_remove - Device Removal Routine - * @pdev: PCI device information struct + *  igb_remove - Device Removal Routine + *  @pdev: PCI device information struct   * - * igb_remove is called by the PCI subsystem to alert the driver - * that it should release a PCI device.  The could be caused by a - * Hot-Plug event, or because the driver is going to be removed from - * memory. + *  igb_remove is called by the PCI subsystem to alert the driver + *  that it should release a PCI device.  The could be caused by a + *  Hot-Plug event, or because the driver is going to be removed from + *  memory.   **/  static void igb_remove(struct pci_dev *pdev)  { @@ -2477,8 +2515,7 @@ static void igb_remove(struct pci_dev *pdev)  #endif  	igb_remove_i2c(adapter);  	igb_ptp_stop(adapter); -	/* -	 * The watchdog timer may be rescheduled, so explicitly +	/* The watchdog timer may be rescheduled, so explicitly  	 * disable watchdog from being rescheduled.  	 */  	set_bit(__IGB_DOWN, &adapter->state); @@ -2498,7 +2535,8 @@ static void igb_remove(struct pci_dev *pdev)  #endif  	/* Release control of h/w to f/w.  If f/w is AMT enabled, this -	 * would have already happened in close and is redundant. */ +	 * would have already happened in close and is redundant. +	 */  	igb_release_hw_control(adapter);  	unregister_netdev(netdev); @@ -2513,7 +2551,7 @@ static void igb_remove(struct pci_dev *pdev)  	if (hw->flash_address)  		iounmap(hw->flash_address);  	pci_release_selected_regions(pdev, -	                             pci_select_bars(pdev, IORESOURCE_MEM)); +				     pci_select_bars(pdev, IORESOURCE_MEM));  	kfree(adapter->shadow_vfta);  	free_netdev(netdev); @@ -2524,13 +2562,13 @@ static void igb_remove(struct pci_dev *pdev)  }  /** - * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space - * @adapter: board private structure to initialize + *  igb_probe_vfs - Initialize vf data storage and add VFs to pci config space + *  @adapter: board private structure to initialize   * - * This function initializes the vf specific data storage and then attempts to - * allocate the VFs.  The reason for ordering it this way is because it is much - * mor expensive time wise to disable SR-IOV than it is to allocate and free - * the memory for the VFs. + *  This function initializes the vf specific data storage and then attempts to + *  allocate the VFs.  The reason for ordering it this way is because it is much + *  mor expensive time wise to disable SR-IOV than it is to allocate and free + *  the memory for the VFs.   **/  static void igb_probe_vfs(struct igb_adapter *adapter)  { @@ -2576,6 +2614,7 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)  		}  		/* fall through */  	case e1000_82580: +	case e1000_i354:  	default:  		max_rss_queues = IGB_MAX_RX_QUEUES;  		break; @@ -2590,8 +2629,7 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)  		/* Device supports enough interrupts without queue pairing. */  		break;  	case e1000_82576: -		/* -		 * If VFs are going to be allocated with RSS queues then we +		/* If VFs are going to be allocated with RSS queues then we  		 * should pair the queues in order to conserve interrupts due  		 * to limited supply.  		 */ @@ -2601,10 +2639,10 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)  		/* fall through */  	case e1000_82580:  	case e1000_i350: +	case e1000_i354:  	case e1000_i210:  	default: -		/* -		 * If rss_queues > half of max_rss_queues, pair the queues in +		/* If rss_queues > half of max_rss_queues, pair the queues in  		 * order to conserve interrupts due to limited supply.  		 */  		if (adapter->rss_queues > (max_rss_queues / 2)) @@ -2614,12 +2652,12 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)  }  /** - * igb_sw_init - Initialize general software structures (struct igb_adapter) - * @adapter: board private structure to initialize + *  igb_sw_init - Initialize general software structures (struct igb_adapter) + *  @adapter: board private structure to initialize   * - * igb_sw_init initializes the Adapter private data structure. - * Fields are initialized based on PCI device information and - * OS network device settings (MTU size). + *  igb_sw_init initializes the Adapter private data structure. + *  Fields are initialized based on PCI device information and + *  OS network device settings (MTU size).   **/  static int igb_sw_init(struct igb_adapter *adapter)  { @@ -2689,16 +2727,16 @@ static int igb_sw_init(struct igb_adapter *adapter)  }  /** - * igb_open - Called when a network interface is made active - * @netdev: network interface device structure + *  igb_open - Called when a network interface is made active + *  @netdev: network interface device structure   * - * Returns 0 on success, negative value on failure + *  Returns 0 on success, negative value on failure   * - * The open entry point is called when a network interface is made - * active by the system (IFF_UP).  At this point all resources needed - * for transmit and receive operations are allocated, the interrupt - * handler is registered with the OS, the watchdog timer is started, - * and the stack is notified that the interface is ready. + *  The open entry point is called when a network interface is made + *  active by the system (IFF_UP).  At this point all resources needed + *  for transmit and receive operations are allocated, the interrupt + *  handler is registered with the OS, the watchdog timer is started, + *  and the stack is notified that the interface is ready.   **/  static int __igb_open(struct net_device *netdev, bool resuming)  { @@ -2734,7 +2772,8 @@ static int __igb_open(struct net_device *netdev, bool resuming)  	/* before we allocate an interrupt, we must be ready to handle it.  	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt  	 * as soon as we call pci_request_irq, so we have to setup our -	 * clean_rx handler before we do so.  */ +	 * clean_rx handler before we do so. +	 */  	igb_configure(adapter);  	err = igb_request_irq(adapter); @@ -2803,15 +2842,15 @@ static int igb_open(struct net_device *netdev)  }  /** - * igb_close - Disables a network interface - * @netdev: network interface device structure + *  igb_close - Disables a network interface + *  @netdev: network interface device structure   * - * Returns 0, this is not allowed to fail + *  Returns 0, this is not allowed to fail   * - * The close entry point is called when an interface is de-activated - * by the OS.  The hardware is still under the driver's control, but - * needs to be disabled.  A global MAC reset is issued to stop the - * hardware, and all transmit and receive resources are freed. + *  The close entry point is called when an interface is de-activated + *  by the OS.  The hardware is still under the driver's control, but + *  needs to be disabled.  A global MAC reset is issued to stop the + *  hardware, and all transmit and receive resources are freed.   **/  static int __igb_close(struct net_device *netdev, bool suspending)  { @@ -2840,10 +2879,10 @@ static int igb_close(struct net_device *netdev)  }  /** - * igb_setup_tx_resources - allocate Tx resources (Descriptors) - * @tx_ring: tx descriptor ring (for a specific queue) to setup + *  igb_setup_tx_resources - allocate Tx resources (Descriptors) + *  @tx_ring: tx descriptor ring (for a specific queue) to setup   * - * Return 0 on success, negative on failure + *  Return 0 on success, negative on failure   **/  int igb_setup_tx_resources(struct igb_ring *tx_ring)  { @@ -2878,11 +2917,11 @@ err:  }  /** - * igb_setup_all_tx_resources - wrapper to allocate Tx resources - *				  (Descriptors) for all queues - * @adapter: board private structure + *  igb_setup_all_tx_resources - wrapper to allocate Tx resources + *				 (Descriptors) for all queues + *  @adapter: board private structure   * - * Return 0 on success, negative on failure + *  Return 0 on success, negative on failure   **/  static int igb_setup_all_tx_resources(struct igb_adapter *adapter)  { @@ -2904,8 +2943,8 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)  }  /** - * igb_setup_tctl - configure the transmit control registers - * @adapter: Board private structure + *  igb_setup_tctl - configure the transmit control registers + *  @adapter: Board private structure   **/  void igb_setup_tctl(struct igb_adapter *adapter)  { @@ -2930,11 +2969,11 @@ void igb_setup_tctl(struct igb_adapter *adapter)  }  /** - * igb_configure_tx_ring - Configure transmit ring after Reset - * @adapter: board private structure - * @ring: tx ring to configure + *  igb_configure_tx_ring - Configure transmit ring after Reset + *  @adapter: board private structure + *  @ring: tx ring to configure   * - * Configure a transmit ring after a reset. + *  Configure a transmit ring after a reset.   **/  void igb_configure_tx_ring(struct igb_adapter *adapter,                             struct igb_ring *ring) @@ -2950,9 +2989,9 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,  	mdelay(10);  	wr32(E1000_TDLEN(reg_idx), -	                ring->count * sizeof(union e1000_adv_tx_desc)); +	     ring->count * sizeof(union e1000_adv_tx_desc));  	wr32(E1000_TDBAL(reg_idx), -	                tdba & 0x00000000ffffffffULL); +	     tdba & 0x00000000ffffffffULL);  	wr32(E1000_TDBAH(reg_idx), tdba >> 32);  	ring->tail = hw->hw_addr + E1000_TDT(reg_idx); @@ -2968,10 +3007,10 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,  }  /** - * igb_configure_tx - Configure transmit Unit after Reset - * @adapter: board private structure + *  igb_configure_tx - Configure transmit Unit after Reset + *  @adapter: board private structure   * - * Configure the Tx unit of the MAC after a reset. + *  Configure the Tx unit of the MAC after a reset.   **/  static void igb_configure_tx(struct igb_adapter *adapter)  { @@ -2982,10 +3021,10 @@ static void igb_configure_tx(struct igb_adapter *adapter)  }  /** - * igb_setup_rx_resources - allocate Rx resources (Descriptors) - * @rx_ring:    rx descriptor ring (for a specific queue) to setup + *  igb_setup_rx_resources - allocate Rx resources (Descriptors) + *  @rx_ring: Rx descriptor ring (for a specific queue) to setup   * - * Returns 0 on success, negative on failure + *  Returns 0 on success, negative on failure   **/  int igb_setup_rx_resources(struct igb_ring *rx_ring)  { @@ -3021,11 +3060,11 @@ err:  }  /** - * igb_setup_all_rx_resources - wrapper to allocate Rx resources - *				  (Descriptors) for all queues - * @adapter: board private structure + *  igb_setup_all_rx_resources - wrapper to allocate Rx resources + *				 (Descriptors) for all queues + *  @adapter: board private structure   * - * Return 0 on success, negative on failure + *  Return 0 on success, negative on failure   **/  static int igb_setup_all_rx_resources(struct igb_adapter *adapter)  { @@ -3047,8 +3086,8 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)  }  /** - * igb_setup_mrqc - configure the multiple receive queue control registers - * @adapter: Board private structure + *  igb_setup_mrqc - configure the multiple receive queue control registers + *  @adapter: Board private structure   **/  static void igb_setup_mrqc(struct igb_adapter *adapter)  { @@ -3081,8 +3120,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)  		break;  	} -	/* -	 * Populate the indirection table 4 entries at a time.  To do this +	/* Populate the indirection table 4 entries at a time.  To do this  	 * we are generating the results for n and n+2 and then interleaving  	 * those with the results with n+1 and n+3.  	 */ @@ -3098,8 +3136,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)  		wr32(E1000_RETA(j), reta);  	} -	/* -	 * Disable raw packet checksumming so that RSS hash is placed in +	/* Disable raw packet checksumming so that RSS hash is placed in  	 * descriptor on writeback.  No need to enable TCP/UDP/IP checksum  	 * offloads as they are enabled by default  	 */ @@ -3129,7 +3166,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)  	/* If VMDq is enabled then we set the appropriate mode for that, else  	 * we default to RSS so that an RSS hash is calculated per packet even -	 * if we are only using one queue */ +	 * if we are only using one queue +	 */  	if (adapter->vfs_allocated_count) {  		if (hw->mac.type > e1000_82575) {  			/* Set the default pool for the PF's first queue */ @@ -3154,8 +3192,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)  }  /** - * igb_setup_rctl - configure the receive control registers - * @adapter: Board private structure + *  igb_setup_rctl - configure the receive control registers + *  @adapter: Board private structure   **/  void igb_setup_rctl(struct igb_adapter *adapter)  { @@ -3170,8 +3208,7 @@ void igb_setup_rctl(struct igb_adapter *adapter)  	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |  		(hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); -	/* -	 * enable stripping of CRC. It's unlikely this will break BMC +	/* enable stripping of CRC. It's unlikely this will break BMC  	 * redirection as it did with e1000. Newer features require  	 * that the HW strips the CRC.  	 */ @@ -3198,7 +3235,8 @@ void igb_setup_rctl(struct igb_adapter *adapter)  	/* This is useful for sniffing bad packets. */  	if (adapter->netdev->features & NETIF_F_RXALL) {  		/* UPE and MPE will be handled by normal PROMISC logic -		 * in e1000e_set_rx_mode */ +		 * in e1000e_set_rx_mode +		 */  		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */  			 E1000_RCTL_BAM | /* RX All Bcast Pkts */  			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ @@ -3221,7 +3259,8 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,  	u32 vmolr;  	/* if it isn't the PF check to see if VFs are enabled and -	 * increase the size to support vlan tags */ +	 * increase the size to support vlan tags +	 */  	if (vfn < adapter->vfs_allocated_count &&  	    adapter->vf_data[vfn].vlans_enabled)  		size += VLAN_TAG_SIZE; @@ -3235,10 +3274,10 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,  }  /** - * igb_rlpml_set - set maximum receive packet size - * @adapter: board private structure + *  igb_rlpml_set - set maximum receive packet size + *  @adapter: board private structure   * - * Configure maximum receivable packet size. + *  Configure maximum receivable packet size.   **/  static void igb_rlpml_set(struct igb_adapter *adapter)  { @@ -3248,8 +3287,7 @@ static void igb_rlpml_set(struct igb_adapter *adapter)  	if (pf_id) {  		igb_set_vf_rlpml(adapter, max_frame_size, pf_id); -		/* -		 * If we're in VMDQ or SR-IOV mode, then set global RLPML +		/* If we're in VMDQ or SR-IOV mode, then set global RLPML  		 * to our max jumbo frame size, in case we need to enable  		 * jumbo frames on one of the rings later.  		 * This will not pass over-length frames into the default @@ -3267,17 +3305,16 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter,  	struct e1000_hw *hw = &adapter->hw;  	u32 vmolr; -	/* -	 * This register exists only on 82576 and newer so if we are older then +	/* This register exists only on 82576 and newer so if we are older then  	 * we should exit and do nothing  	 */  	if (hw->mac.type < e1000_82576)  		return;  	vmolr = rd32(E1000_VMOLR(vfn)); -	vmolr |= E1000_VMOLR_STRVLAN;      /* Strip vlan tags */ +	vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */  	if (aupe) -		vmolr |= E1000_VMOLR_AUPE;        /* Accept untagged packets */ +		vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */  	else  		vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ @@ -3286,25 +3323,24 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter,  	if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)  		vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ -	/* -	 * for VMDq only allow the VFs and pool 0 to accept broadcast and +	/* for VMDq only allow the VFs and pool 0 to accept broadcast and  	 * multicast packets  	 */  	if (vfn <= adapter->vfs_allocated_count) -		vmolr |= E1000_VMOLR_BAM;	   /* Accept broadcast */ +		vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */  	wr32(E1000_VMOLR(vfn), vmolr);  }  /** - * igb_configure_rx_ring - Configure a receive ring after Reset - * @adapter: board private structure - * @ring: receive ring to be configured + *  igb_configure_rx_ring - Configure a receive ring after Reset + *  @adapter: board private structure + *  @ring: receive ring to be configured   * - * Configure the Rx unit of the MAC after a reset. + *  Configure the Rx unit of the MAC after a reset.   **/  void igb_configure_rx_ring(struct igb_adapter *adapter, -                           struct igb_ring *ring) +			   struct igb_ring *ring)  {  	struct e1000_hw *hw = &adapter->hw;  	u64 rdba = ring->dma; @@ -3319,7 +3355,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,  	     rdba & 0x00000000ffffffffULL);  	wr32(E1000_RDBAH(reg_idx), rdba >> 32);  	wr32(E1000_RDLEN(reg_idx), -	               ring->count * sizeof(union e1000_adv_rx_desc)); +	     ring->count * sizeof(union e1000_adv_rx_desc));  	/* initialize head and tail */  	ring->tail = hw->hw_addr + E1000_RDT(reg_idx); @@ -3351,10 +3387,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,  }  /** - * igb_configure_rx - Configure receive Unit after Reset - * @adapter: board private structure + *  igb_configure_rx - Configure receive Unit after Reset + *  @adapter: board private structure   * - * Configure the Rx unit of the MAC after a reset. + *  Configure the Rx unit of the MAC after a reset.   **/  static void igb_configure_rx(struct igb_adapter *adapter)  { @@ -3365,19 +3401,20 @@ static void igb_configure_rx(struct igb_adapter *adapter)  	/* set the correct pool for the PF default MAC address in entry 0 */  	igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, -	                 adapter->vfs_allocated_count); +			 adapter->vfs_allocated_count);  	/* Setup the HW Rx Head and Tail Descriptor Pointers and -	 * the Base and Length of the Rx Descriptor Ring */ +	 * the Base and Length of the Rx Descriptor Ring +	 */  	for (i = 0; i < adapter->num_rx_queues; i++)  		igb_configure_rx_ring(adapter, adapter->rx_ring[i]);  }  /** - * igb_free_tx_resources - Free Tx Resources per Queue - * @tx_ring: Tx descriptor ring for a specific queue + *  igb_free_tx_resources - Free Tx Resources per Queue + *  @tx_ring: Tx descriptor ring for a specific queue   * - * Free all transmit software resources + *  Free all transmit software resources   **/  void igb_free_tx_resources(struct igb_ring *tx_ring)  { @@ -3397,10 +3434,10 @@ void igb_free_tx_resources(struct igb_ring *tx_ring)  }  /** - * igb_free_all_tx_resources - Free Tx Resources for All Queues - * @adapter: board private structure + *  igb_free_all_tx_resources - Free Tx Resources for All Queues + *  @adapter: board private structure   * - * Free all transmit software resources + *  Free all transmit software resources   **/  static void igb_free_all_tx_resources(struct igb_adapter *adapter)  { @@ -3433,8 +3470,8 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *ring,  }  /** - * igb_clean_tx_ring - Free Tx Buffers - * @tx_ring: ring to be cleaned + *  igb_clean_tx_ring - Free Tx Buffers + *  @tx_ring: ring to be cleaned   **/  static void igb_clean_tx_ring(struct igb_ring *tx_ring)  { @@ -3464,8 +3501,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)  }  /** - * igb_clean_all_tx_rings - Free Tx Buffers for all queues - * @adapter: board private structure + *  igb_clean_all_tx_rings - Free Tx Buffers for all queues + *  @adapter: board private structure   **/  static void igb_clean_all_tx_rings(struct igb_adapter *adapter)  { @@ -3476,10 +3513,10 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)  }  /** - * igb_free_rx_resources - Free Rx Resources - * @rx_ring: ring to clean the resources from + *  igb_free_rx_resources - Free Rx Resources + *  @rx_ring: ring to clean the resources from   * - * Free all receive software resources + *  Free all receive software resources   **/  void igb_free_rx_resources(struct igb_ring *rx_ring)  { @@ -3499,10 +3536,10 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)  }  /** - * igb_free_all_rx_resources - Free Rx Resources for All Queues - * @adapter: board private structure + *  igb_free_all_rx_resources - Free Rx Resources for All Queues + *  @adapter: board private structure   * - * Free all receive software resources + *  Free all receive software resources   **/  static void igb_free_all_rx_resources(struct igb_adapter *adapter)  { @@ -3513,8 +3550,8 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)  }  /** - * igb_clean_rx_ring - Free Rx Buffers per Queue - * @rx_ring: ring to free buffers from + *  igb_clean_rx_ring - Free Rx Buffers per Queue + *  @rx_ring: ring to free buffers from   **/  static void igb_clean_rx_ring(struct igb_ring *rx_ring)  { @@ -3556,8 +3593,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)  }  /** - * igb_clean_all_rx_rings - Free Rx Buffers for all queues - * @adapter: board private structure + *  igb_clean_all_rx_rings - Free Rx Buffers for all queues + *  @adapter: board private structure   **/  static void igb_clean_all_rx_rings(struct igb_adapter *adapter)  { @@ -3568,11 +3605,11 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)  }  /** - * igb_set_mac - Change the Ethernet Address of the NIC - * @netdev: network interface device structure - * @p: pointer to an address structure + *  igb_set_mac - Change the Ethernet Address of the NIC + *  @netdev: network interface device structure + *  @p: pointer to an address structure   * - * Returns 0 on success, negative on failure + *  Returns 0 on success, negative on failure   **/  static int igb_set_mac(struct net_device *netdev, void *p)  { @@ -3588,19 +3625,19 @@ static int igb_set_mac(struct net_device *netdev, void *p)  	/* set the correct pool for the new PF MAC address in entry 0 */  	igb_rar_set_qsel(adapter, hw->mac.addr, 0, -	                 adapter->vfs_allocated_count); +			 adapter->vfs_allocated_count);  	return 0;  }  /** - * igb_write_mc_addr_list - write multicast addresses to MTA - * @netdev: network interface device structure + *  igb_write_mc_addr_list - write multicast addresses to MTA + *  @netdev: network interface device structure   * - * Writes multicast address list to the MTA hash table. - * Returns: -ENOMEM on failure - *                0 on no addresses written - *                X on writing X addresses to MTA + *  Writes multicast address list to the MTA hash table. + *  Returns: -ENOMEM on failure + *           0 on no addresses written + *           X on writing X addresses to MTA   **/  static int igb_write_mc_addr_list(struct net_device *netdev)  { @@ -3633,13 +3670,13 @@ static int igb_write_mc_addr_list(struct net_device *netdev)  }  /** - * igb_write_uc_addr_list - write unicast addresses to RAR table - * @netdev: network interface device structure + *  igb_write_uc_addr_list - write unicast addresses to RAR table + *  @netdev: network interface device structure   * - * Writes unicast address list to the RAR table. - * Returns: -ENOMEM on failure/insufficient address space - *                0 on no addresses written - *                X on writing X addresses to the RAR table + *  Writes unicast address list to the RAR table. + *  Returns: -ENOMEM on failure/insufficient address space + *           0 on no addresses written + *           X on writing X addresses to the RAR table   **/  static int igb_write_uc_addr_list(struct net_device *netdev)  { @@ -3660,8 +3697,8 @@ static int igb_write_uc_addr_list(struct net_device *netdev)  			if (!rar_entries)  				break;  			igb_rar_set_qsel(adapter, ha->addr, -			                 rar_entries--, -			                 vfn); +					 rar_entries--, +					 vfn);  			count++;  		}  	} @@ -3676,13 +3713,13 @@ static int igb_write_uc_addr_list(struct net_device *netdev)  }  /** - * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set - * @netdev: network interface device structure + *  igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + *  @netdev: network interface device structure   * - * The set_rx_mode entry point is called whenever the unicast or multicast - * address lists or the network interface flags are updated.  This routine is - * responsible for configuring the hardware for proper unicast, multicast, - * promiscuous mode, and all-multi behavior. + *  The set_rx_mode entry point is called whenever the unicast or multicast + *  address lists or the network interface flags are updated.  This routine is + *  responsible for configuring the hardware for proper unicast, multicast, + *  promiscuous mode, and all-multi behavior.   **/  static void igb_set_rx_mode(struct net_device *netdev)  { @@ -3699,6 +3736,10 @@ static void igb_set_rx_mode(struct net_device *netdev)  	rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);  	if (netdev->flags & IFF_PROMISC) { +		u32 mrqc = rd32(E1000_MRQC); +		/* retain VLAN HW filtering if in VT mode */ +		if (mrqc & E1000_MRQC_ENABLE_VMDQ) +			rctl |= E1000_RCTL_VFE;  		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);  		vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);  	} else { @@ -3706,8 +3747,7 @@ static void igb_set_rx_mode(struct net_device *netdev)  			rctl |= E1000_RCTL_MPE;  			vmolr |= E1000_VMOLR_MPME;  		} else { -			/* -			 * Write addresses to the MTA, if the attempt fails +			/* Write addresses to the MTA, if the attempt fails  			 * then we should just turn on promiscuous mode so  			 * that we can at least receive multicast traffic  			 */ @@ -3719,8 +3759,7 @@ static void igb_set_rx_mode(struct net_device *netdev)  				vmolr |= E1000_VMOLR_ROMPE;  			}  		} -		/* -		 * Write addresses to available RAR registers, if there is not +		/* Write addresses to available RAR registers, if there is not  		 * sufficient space to store all the addresses then enable  		 * unicast promiscuous mode  		 */ @@ -3733,8 +3772,7 @@ static void igb_set_rx_mode(struct net_device *netdev)  	}  	wr32(E1000_RCTL, rctl); -	/* -	 * In order to support SR-IOV and eventually VMDq it is necessary to set +	/* In order to support SR-IOV and eventually VMDq it is necessary to set  	 * the VMOLR to enable the appropriate modes.  Without this workaround  	 * we will have issues with VLAN tag stripping not being done for frames  	 * that are only arriving because we are the default pool @@ -3743,7 +3781,7 @@ static void igb_set_rx_mode(struct net_device *netdev)  		return;  	vmolr |= rd32(E1000_VMOLR(vfn)) & -	         ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); +		 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);  	wr32(E1000_VMOLR(vfn), vmolr);  	igb_restore_vf_multicasts(adapter);  } @@ -3788,7 +3826,8 @@ static void igb_spoof_check(struct igb_adapter *adapter)  }  /* Need to wait a few seconds after link up to get diagnostic information from - * the phy */ + * the phy + */  static void igb_update_phy_info(unsigned long data)  {  	struct igb_adapter *adapter = (struct igb_adapter *) data; @@ -3796,8 +3835,8 @@ static void igb_update_phy_info(unsigned long data)  }  /** - * igb_has_link - check shared code for link and determine up/down - * @adapter: pointer to driver private info + *  igb_has_link - check shared code for link and determine up/down + *  @adapter: pointer to driver private info   **/  bool igb_has_link(struct igb_adapter *adapter)  { @@ -3842,17 +3881,16 @@ static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)  		ctrl_ext = rd32(E1000_CTRL_EXT);  		if ((hw->phy.media_type == e1000_media_type_copper) && -		    !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) { +		    !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))  			ret = !!(thstat & event); -		}  	}  	return ret;  }  /** - * igb_watchdog - Timer Call-back - * @data: pointer to adapter cast into an unsigned long + *  igb_watchdog - Timer Call-back + *  @data: pointer to adapter cast into an unsigned long   **/  static void igb_watchdog(unsigned long data)  { @@ -3864,9 +3902,10 @@ static void igb_watchdog(unsigned long data)  static void igb_watchdog_task(struct work_struct *work)  {  	struct igb_adapter *adapter = container_of(work, -	                                           struct igb_adapter, -                                                   watchdog_task); +						   struct igb_adapter, +						   watchdog_task);  	struct e1000_hw *hw = &adapter->hw; +	struct e1000_phy_info *phy = &hw->phy;  	struct net_device *netdev = adapter->netdev;  	u32 link;  	int i; @@ -3879,8 +3918,8 @@ static void igb_watchdog_task(struct work_struct *work)  		if (!netif_carrier_ok(netdev)) {  			u32 ctrl;  			hw->mac.ops.get_speed_and_duplex(hw, -			                                 &adapter->link_speed, -			                                 &adapter->link_duplex); +							 &adapter->link_speed, +							 &adapter->link_duplex);  			ctrl = rd32(E1000_CTRL);  			/* Links status message must follow this format */ @@ -3895,6 +3934,11 @@ static void igb_watchdog_task(struct work_struct *work)  			       (ctrl & E1000_CTRL_RFCE) ?  "RX" :  			       (ctrl & E1000_CTRL_TFCE) ?  "TX" : "None"); +			/* check if SmartSpeed worked */ +			igb_check_downshift(hw); +			if (phy->speed_downgraded) +				netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); +  			/* check for thermal sensor event */  			if (igb_thermal_sensor_event(hw,  			    E1000_THSTAT_LINK_THROTTLE)) { @@ -3963,7 +4007,8 @@ static void igb_watchdog_task(struct work_struct *work)  			/* We've lost link, so the controller stops DMA,  			 * but we've got queued Tx work that's never going  			 * to get done, so reset controller to flush Tx. -			 * (Do the reset outside of interrupt context). */ +			 * (Do the reset outside of interrupt context). +			 */  			if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {  				adapter->tx_timeout_count++;  				schedule_work(&adapter->reset_task); @@ -3976,7 +4021,7 @@ static void igb_watchdog_task(struct work_struct *work)  		set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);  	} -	/* Cause software interrupt to ensure rx ring is cleaned */ +	/* Cause software interrupt to ensure Rx ring is cleaned */  	if (adapter->msix_entries) {  		u32 eics = 0;  		for (i = 0; i < adapter->num_q_vectors; i++) @@ -4003,20 +4048,20 @@ enum latency_range {  };  /** - * igb_update_ring_itr - update the dynamic ITR value based on packet size + *  igb_update_ring_itr - update the dynamic ITR value based on packet size + *  @q_vector: pointer to q_vector   * - *      Stores a new ITR value based on strictly on packet size.  This - *      algorithm is less sophisticated than that used in igb_update_itr, - *      due to the difficulty of synchronizing statistics across multiple - *      receive rings.  The divisors and thresholds used by this function - *      were determined based on theoretical maximum wire speed and testing - *      data, in order to minimize response time while increasing bulk - *      throughput. - *      This functionality is controlled by the InterruptThrottleRate module - *      parameter (see igb_param.c) - *      NOTE:  This function is called only when operating in a multiqueue - *             receive environment. - * @q_vector: pointer to q_vector + *  Stores a new ITR value based on strictly on packet size.  This + *  algorithm is less sophisticated than that used in igb_update_itr, + *  due to the difficulty of synchronizing statistics across multiple + *  receive rings.  The divisors and thresholds used by this function + *  were determined based on theoretical maximum wire speed and testing + *  data, in order to minimize response time while increasing bulk + *  throughput. + *  This functionality is controlled by the InterruptThrottleRate module + *  parameter (see igb_param.c) + *  NOTE:  This function is called only when operating in a multiqueue + *         receive environment.   **/  static void igb_update_ring_itr(struct igb_q_vector *q_vector)  { @@ -4077,20 +4122,21 @@ clear_counts:  }  /** - * igb_update_itr - update the dynamic ITR value based on statistics - *      Stores a new ITR value based on packets and byte - *      counts during the last interrupt.  The advantage of per interrupt - *      computation is faster updates and more accurate ITR for the current - *      traffic pattern.  Constants in this function were computed - *      based on theoretical maximum wire speed and thresholds were set based - *      on testing data as well as attempting to minimize response time - *      while increasing bulk throughput. - *      this functionality is controlled by the InterruptThrottleRate module - *      parameter (see igb_param.c) - *      NOTE:  These calculations are only valid when operating in a single- - *             queue environment. - * @q_vector: pointer to q_vector - * @ring_container: ring info to update the itr for + *  igb_update_itr - update the dynamic ITR value based on statistics + *  @q_vector: pointer to q_vector + *  @ring_container: ring info to update the itr for + * + *  Stores a new ITR value based on packets and byte + *  counts during the last interrupt.  The advantage of per interrupt + *  computation is faster updates and more accurate ITR for the current + *  traffic pattern.  Constants in this function were computed + *  based on theoretical maximum wire speed and thresholds were set based + *  on testing data as well as attempting to minimize response time + *  while increasing bulk throughput. + *  this functionality is controlled by the InterruptThrottleRate module + *  parameter (see igb_param.c) + *  NOTE:  These calculations are only valid when operating in a single- + *         queue environment.   **/  static void igb_update_itr(struct igb_q_vector *q_vector,  			   struct igb_ring_container *ring_container) @@ -4188,12 +4234,12 @@ set_itr_now:  	if (new_itr != q_vector->itr_val) {  		/* this attempts to bias the interrupt rate towards Bulk  		 * by adding intermediate steps when interrupt rate is -		 * increasing */ +		 * increasing +		 */  		new_itr = new_itr > q_vector->itr_val ? -		             max((new_itr * q_vector->itr_val) / -		                 (new_itr + (q_vector->itr_val >> 2)), -				 new_itr) : -			     new_itr; +			  max((new_itr * q_vector->itr_val) / +			  (new_itr + (q_vector->itr_val >> 2)), +			  new_itr) : new_itr;  		/* Don't write the value here; it resets the adapter's  		 * internal timer, and causes us to delay far longer than  		 * we should between interrupts.  Instead, we write the ITR @@ -4320,8 +4366,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)  		default:  			if (unlikely(net_ratelimit())) {  				dev_warn(tx_ring->dev, -				 "partial checksum but proto=%x!\n", -				 first->protocol); +					 "partial checksum but proto=%x!\n", +					 first->protocol);  			}  			break;  		} @@ -4344,8 +4390,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)  		default:  			if (unlikely(net_ratelimit())) {  				dev_warn(tx_ring->dev, -				 "partial checksum but l4 proto=%x!\n", -				 l4_hdr); +					 "partial checksum but l4 proto=%x!\n", +					 l4_hdr);  			}  			break;  		} @@ -4497,8 +4543,7 @@ static void igb_tx_map(struct igb_ring *tx_ring,  	/* set the timestamp */  	first->time_stamp = jiffies; -	/* -	 * Force memory writes to complete before letting h/w know there +	/* Force memory writes to complete before letting h/w know there  	 * are new descriptors to fetch.  (Only applicable for weak-ordered  	 * memory model archs, such as IA-64).  	 * @@ -4519,7 +4564,8 @@ static void igb_tx_map(struct igb_ring *tx_ring,  	writel(i, tx_ring->tail);  	/* we need this if more than one processor can write to our tail -	 * at a time, it syncronizes IO on IA64/Altix systems */ +	 * at a time, it synchronizes IO on IA64/Altix systems +	 */  	mmiowb();  	return; @@ -4549,11 +4595,13 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)  	/* Herbert's original patch had:  	 *  smp_mb__after_netif_stop_queue(); -	 * but since that doesn't exist yet, just open code it. */ +	 * but since that doesn't exist yet, just open code it. +	 */  	smp_mb();  	/* We need to check again in a case another CPU has just -	 * made room available. */ +	 * made room available. +	 */  	if (igb_desc_unused(tx_ring) < size)  		return -EBUSY; @@ -4577,7 +4625,6 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)  netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,  				struct igb_ring *tx_ring)  { -	struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);  	struct igb_tx_buffer *first;  	int tso;  	u32 tx_flags = 0; @@ -4612,15 +4659,18 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,  	skb_tx_timestamp(skb); -	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && -		     !(adapter->ptp_tx_skb))) { -		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; -		tx_flags |= IGB_TX_FLAGS_TSTAMP; +	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { +		struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); -		adapter->ptp_tx_skb = skb_get(skb); -		adapter->ptp_tx_start = jiffies; -		if (adapter->hw.mac.type == e1000_82576) -			schedule_work(&adapter->ptp_tx_work); +		if (!(adapter->ptp_tx_skb)) { +			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; +			tx_flags |= IGB_TX_FLAGS_TSTAMP; + +			adapter->ptp_tx_skb = skb_get(skb); +			adapter->ptp_tx_start = jiffies; +			if (adapter->hw.mac.type == e1000_82576) +				schedule_work(&adapter->ptp_tx_work); +		}  	}  	if (vlan_tx_tag_present(skb)) { @@ -4677,8 +4727,7 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,  		return NETDEV_TX_OK;  	} -	/* -	 * The minimum packet size with TCTL.PSP set is 17 so pad the skb +	/* The minimum packet size with TCTL.PSP set is 17 so pad the skb  	 * in order to meet this minimum size requirement.  	 */  	if (unlikely(skb->len < 17)) { @@ -4692,8 +4741,8 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,  }  /** - * igb_tx_timeout - Respond to a Tx Hang - * @netdev: network interface device structure + *  igb_tx_timeout - Respond to a Tx Hang + *  @netdev: network interface device structure   **/  static void igb_tx_timeout(struct net_device *netdev)  { @@ -4722,13 +4771,12 @@ static void igb_reset_task(struct work_struct *work)  }  /** - * igb_get_stats64 - Get System Network Statistics - * @netdev: network interface device structure - * @stats: rtnl_link_stats64 pointer - * + *  igb_get_stats64 - Get System Network Statistics + *  @netdev: network interface device structure + *  @stats: rtnl_link_stats64 pointer   **/  static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, -						 struct rtnl_link_stats64 *stats) +						struct rtnl_link_stats64 *stats)  {  	struct igb_adapter *adapter = netdev_priv(netdev); @@ -4741,11 +4789,11 @@ static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,  }  /** - * igb_change_mtu - Change the Maximum Transfer Unit - * @netdev: network interface device structure - * @new_mtu: new value for maximum frame size + *  igb_change_mtu - Change the Maximum Transfer Unit + *  @netdev: network interface device structure + *  @new_mtu: new value for maximum frame size   * - * Returns 0 on success, negative on failure + *  Returns 0 on success, negative on failure   **/  static int igb_change_mtu(struct net_device *netdev, int new_mtu)  { @@ -4788,10 +4836,9 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)  }  /** - * igb_update_stats - Update the board statistics counters - * @adapter: board private structure + *  igb_update_stats - Update the board statistics counters + *  @adapter: board private structure   **/ -  void igb_update_stats(struct igb_adapter *adapter,  		      struct rtnl_link_stats64 *net_stats)  { @@ -4806,8 +4853,7 @@ void igb_update_stats(struct igb_adapter *adapter,  #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF -	/* -	 * Prevent stats update while adapter is being reset, or if the pci +	/* Prevent stats update while adapter is being reset, or if the pci  	 * connection is down.  	 */  	if (adapter->link_speed == 0) @@ -4941,7 +4987,8 @@ void igb_update_stats(struct igb_adapter *adapter,  	/* Rx Errors */  	/* RLEC on some newer hardware can be incorrect so build -	 * our own version based on RUC and ROC */ +	 * our own version based on RUC and ROC +	 */  	net_stats->rx_errors = adapter->stats.rxerrc +  		adapter->stats.crcerrs + adapter->stats.algnerrc +  		adapter->stats.ruc + adapter->stats.roc + @@ -5000,7 +5047,8 @@ static irqreturn_t igb_msix_other(int irq, void *data)  		adapter->stats.doosync++;  		/* The DMA Out of Sync is also indication of a spoof event  		 * in IOV mode. Check the Wrong VM Behavior register to -		 * see if it is really a spoof event. */ +		 * see if it is really a spoof event. +		 */  		igb_check_wvbr(adapter);  	} @@ -5074,8 +5122,7 @@ static void igb_update_tx_dca(struct igb_adapter *adapter,  	if (hw->mac.type != e1000_82575)  		txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT; -	/* -	 * We can enable relaxed ordering for reads, but not writes when +	/* We can enable relaxed ordering for reads, but not writes when  	 * DCA is enabled.  This is due to a known issue in some chipsets  	 * which will cause the DCA tag to be cleared.  	 */ @@ -5096,8 +5143,7 @@ static void igb_update_rx_dca(struct igb_adapter *adapter,  	if (hw->mac.type != e1000_82575)  		rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT; -	/* -	 * We can enable relaxed ordering for reads, but not writes when +	/* We can enable relaxed ordering for reads, but not writes when  	 * DCA is enabled.  This is due to a known issue in some chipsets  	 * which will cause the DCA tag to be cleared.  	 */ @@ -5166,7 +5212,8 @@ static int __igb_notify_dca(struct device *dev, void *data)  	case DCA_PROVIDER_REMOVE:  		if (adapter->flags & IGB_FLAG_DCA_ENABLED) {  			/* without this a class_device is left -			 * hanging around in the sysfs model */ +			 * hanging around in the sysfs model +			 */  			dca_remove_requester(dev);  			dev_info(&pdev->dev, "DCA disabled\n");  			adapter->flags &= ~IGB_FLAG_DCA_ENABLED; @@ -5179,12 +5226,12 @@ static int __igb_notify_dca(struct device *dev, void *data)  }  static int igb_notify_dca(struct notifier_block *nb, unsigned long event, -                          void *p) +			  void *p)  {  	int ret_val;  	ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event, -	                                 __igb_notify_dca); +					 __igb_notify_dca);  	return ret_val ? NOTIFY_BAD : NOTIFY_DONE;  } @@ -5198,40 +5245,10 @@ static int igb_vf_configure(struct igb_adapter *adapter, int vf)  	eth_zero_addr(mac_addr);  	igb_set_vf_mac(adapter, vf, mac_addr); -	return 0; -} - -static bool igb_vfs_are_assigned(struct igb_adapter *adapter) -{ -	struct pci_dev *pdev = adapter->pdev; -	struct pci_dev *vfdev; -	int dev_id; - -	switch (adapter->hw.mac.type) { -	case e1000_82576: -		dev_id = IGB_82576_VF_DEV_ID; -		break; -	case e1000_i350: -		dev_id = IGB_I350_VF_DEV_ID; -		break; -	default: -		return false; -	} - -	/* loop through all the VFs to see if we own any that are assigned */ -	vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL); -	while (vfdev) { -		/* if we don't own it we don't care */ -		if (vfdev->is_virtfn && vfdev->physfn == pdev) { -			/* if it is assigned we cannot release it */ -			if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) -				return true; -		} - -		vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev); -	} +	/* By default spoof check is enabled for all VFs */ +	adapter->vf_data[vf].spoofchk_enabled = true; -	return false; +	return 0;  }  #endif @@ -5256,7 +5273,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)  	struct vf_data_storage *vf_data = &adapter->vf_data[vf];  	vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | -	                    IGB_VF_FLAG_MULTI_PROMISC); +			    IGB_VF_FLAG_MULTI_PROMISC);  	vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);  	if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { @@ -5264,8 +5281,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)  		vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;  		*msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;  	} else { -		/* -		 * if we have hashes and we are clearing a multicast promisc +		/* if we have hashes and we are clearing a multicast promisc  		 * flag we need to write the hashes to the MTA as this step  		 * was previously skipped  		 */ @@ -5286,7 +5302,6 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)  		return -EINVAL;  	return 0; -  }  static int igb_set_vf_multicasts(struct igb_adapter *adapter, @@ -5493,30 +5508,91 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev,  			 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);  		if (test_bit(__IGB_DOWN, &adapter->state)) {  			dev_warn(&adapter->pdev->dev, -				 "The VF VLAN has been set," -				 " but the PF device is not up.\n"); +				 "The VF VLAN has been set, but the PF device is not up.\n");  			dev_warn(&adapter->pdev->dev, -				 "Bring the PF device up before" -				 " attempting to use the VF device.\n"); +				 "Bring the PF device up before attempting to use the VF device.\n");  		}  	} else {  		igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan, -				   false, vf); +			     false, vf);  		igb_set_vmvir(adapter, vlan, vf);  		igb_set_vmolr(adapter, vf, true);  		adapter->vf_data[vf].pf_vlan = 0;  		adapter->vf_data[vf].pf_qos = 0; -       } +	}  out: -       return err; +	return err; +} + +static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid) +{ +	struct e1000_hw *hw = &adapter->hw; +	int i; +	u32 reg; + +	/* Find the vlan filter for this id */ +	for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { +		reg = rd32(E1000_VLVF(i)); +		if ((reg & E1000_VLVF_VLANID_ENABLE) && +		    vid == (reg & E1000_VLVF_VLANID_MASK)) +			break; +	} + +	if (i >= E1000_VLVF_ARRAY_SIZE) +		i = -1; + +	return i;  }  static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)  { +	struct e1000_hw *hw = &adapter->hw;  	int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;  	int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); +	int err = 0; + +	/* If in promiscuous mode we need to make sure the PF also has +	 * the VLAN filter set. +	 */ +	if (add && (adapter->netdev->flags & IFF_PROMISC)) +		err = igb_vlvf_set(adapter, vid, add, +				   adapter->vfs_allocated_count); +	if (err) +		goto out; -	return igb_vlvf_set(adapter, vid, add, vf); +	err = igb_vlvf_set(adapter, vid, add, vf); + +	if (err) +		goto out; + +	/* Go through all the checks to see if the VLAN filter should +	 * be wiped completely. +	 */ +	if (!add && (adapter->netdev->flags & IFF_PROMISC)) { +		u32 vlvf, bits; + +		int regndx = igb_find_vlvf_entry(adapter, vid); +		if (regndx < 0) +			goto out; +		/* See if any other pools are set for this VLAN filter +		 * entry other than the PF. +		 */ +		vlvf = bits = rd32(E1000_VLVF(regndx)); +		bits &= 1 << (E1000_VLVF_POOLSEL_SHIFT + +			      adapter->vfs_allocated_count); +		/* If the filter was removed then ensure PF pool bit +		 * is cleared if the PF only added itself to the pool +		 * because the PF is in promiscuous mode. +		 */ +		if ((vlvf & VLAN_VID_MASK) == vid && +		    !test_bit(vid, adapter->active_vlans) && +		    !bits) +			igb_vlvf_set(adapter, vid, add, +				     adapter->vfs_allocated_count); +	} + +out: +	return err;  }  static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) @@ -5586,8 +5662,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)  static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)  { -	/* -	 * The VF MAC Address is stored in a packed array of bytes +	/* The VF MAC Address is stored in a packed array of bytes  	 * starting at the second 32 bit word of the msg array  	 */  	unsigned char *addr = (char *)&msg[1]; @@ -5636,11 +5711,9 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)  	if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))  		return; -	/* -	 * until the vf completes a reset it should not be +	/* until the vf completes a reset it should not be  	 * allowed to start any configuration.  	 */ -  	if (msgbuf[0] == E1000_VF_RESET) {  		igb_vf_reset_msg(adapter, vf);  		return; @@ -5660,9 +5733,8 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)  			retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);  		else  			dev_warn(&pdev->dev, -				 "VF %d attempted to override administratively " -				 "set MAC address\nReload the VF driver to " -				 "resume operations\n", vf); +				 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n", +				 vf);  		break;  	case E1000_VF_SET_PROMISC:  		retval = igb_set_vf_promisc(adapter, msgbuf, vf); @@ -5677,9 +5749,8 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)  		retval = -1;  		if (vf_data->pf_vlan)  			dev_warn(&pdev->dev, -				 "VF %d attempted to override administratively " -				 "set VLAN tag\nReload the VF driver to " -				 "resume operations\n", vf); +				 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n", +				 vf);  		else  			retval = igb_set_vf_vlan(adapter, msgbuf, vf);  		break; @@ -5748,9 +5819,9 @@ static void igb_set_uta(struct igb_adapter *adapter)  }  /** - * igb_intr_msi - Interrupt Handler - * @irq: interrupt number - * @data: pointer to a network interface device structure + *  igb_intr_msi - Interrupt Handler + *  @irq: interrupt number + *  @data: pointer to a network interface device structure   **/  static irqreturn_t igb_intr_msi(int irq, void *data)  { @@ -5793,9 +5864,9 @@ static irqreturn_t igb_intr_msi(int irq, void *data)  }  /** - * igb_intr - Legacy Interrupt Handler - * @irq: interrupt number - * @data: pointer to a network interface device structure + *  igb_intr - Legacy Interrupt Handler + *  @irq: interrupt number + *  @data: pointer to a network interface device structure   **/  static irqreturn_t igb_intr(int irq, void *data)  { @@ -5803,11 +5874,13 @@ static irqreturn_t igb_intr(int irq, void *data)  	struct igb_q_vector *q_vector = adapter->q_vector[0];  	struct e1000_hw *hw = &adapter->hw;  	/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No -	 * need for the IMC write */ +	 * need for the IMC write +	 */  	u32 icr = rd32(E1000_ICR);  	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is -	 * not set, then the adapter didn't send an interrupt */ +	 * not set, then the adapter didn't send an interrupt +	 */  	if (!(icr & E1000_ICR_INT_ASSERTED))  		return IRQ_NONE; @@ -5866,15 +5939,15 @@ static void igb_ring_irq_enable(struct igb_q_vector *q_vector)  }  /** - * igb_poll - NAPI Rx polling callback - * @napi: napi polling structure - * @budget: count of how many packets we should handle + *  igb_poll - NAPI Rx polling callback + *  @napi: napi polling structure + *  @budget: count of how many packets we should handle   **/  static int igb_poll(struct napi_struct *napi, int budget)  {  	struct igb_q_vector *q_vector = container_of(napi, -	                                             struct igb_q_vector, -	                                             napi); +						     struct igb_q_vector, +						     napi);  	bool clean_complete = true;  #ifdef CONFIG_IGB_DCA @@ -5899,10 +5972,10 @@ static int igb_poll(struct napi_struct *napi, int budget)  }  /** - * igb_clean_tx_irq - Reclaim resources after transmit completes - * @q_vector: pointer to q_vector containing needed info + *  igb_clean_tx_irq - Reclaim resources after transmit completes + *  @q_vector: pointer to q_vector containing needed info   * - * returns true if ring is completely cleaned + *  returns true if ring is completely cleaned   **/  static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)  { @@ -6008,7 +6081,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)  		struct e1000_hw *hw = &adapter->hw;  		/* Detect a transmit hang in hardware, this serializes the -		 * check with the clearing of time_stamp and movement of i */ +		 * check with the clearing of time_stamp and movement of i +		 */  		clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);  		if (tx_buffer->next_to_watch &&  		    time_after(jiffies, tx_buffer->time_stamp + @@ -6047,8 +6121,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)  #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)  	if (unlikely(total_packets && -		     netif_carrier_ok(tx_ring->netdev) && -		     igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { +	    netif_carrier_ok(tx_ring->netdev) && +	    igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {  		/* Make sure that anybody stopping the queue after this  		 * sees the new next_to_clean.  		 */ @@ -6069,11 +6143,11 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)  }  /** - * igb_reuse_rx_page - page flip buffer and store it back on the ring - * @rx_ring: rx descriptor ring to store buffers on - * @old_buff: donor buffer to have page reused + *  igb_reuse_rx_page - page flip buffer and store it back on the ring + *  @rx_ring: rx descriptor ring to store buffers on + *  @old_buff: donor buffer to have page reused   * - * Synchronizes page for reuse by the adapter + *  Synchronizes page for reuse by the adapter   **/  static void igb_reuse_rx_page(struct igb_ring *rx_ring,  			      struct igb_rx_buffer *old_buff) @@ -6133,19 +6207,19 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,  }  /** - * igb_add_rx_frag - Add contents of Rx buffer to sk_buff - * @rx_ring: rx descriptor ring to transact packets on - * @rx_buffer: buffer containing page to add - * @rx_desc: descriptor containing length of buffer written by hardware - * @skb: sk_buff to place the data into + *  igb_add_rx_frag - Add contents of Rx buffer to sk_buff + *  @rx_ring: rx descriptor ring to transact packets on + *  @rx_buffer: buffer containing page to add + *  @rx_desc: descriptor containing length of buffer written by hardware + *  @skb: sk_buff to place the data into   * - * This function will add the data contained in rx_buffer->page to the skb. - * This is done either through a direct copy if the data in the buffer is - * less than the skb header size, otherwise it will just attach the page as - * a frag to the skb. + *  This function will add the data contained in rx_buffer->page to the skb. + *  This is done either through a direct copy if the data in the buffer is + *  less than the skb header size, otherwise it will just attach the page as + *  a frag to the skb.   * - * The function will then update the page offset if necessary and return - * true if the buffer can be reused by the adapter. + *  The function will then update the page offset if necessary and return + *  true if the buffer can be reused by the adapter.   **/  static bool igb_add_rx_frag(struct igb_ring *rx_ring,  			    struct igb_rx_buffer *rx_buffer, @@ -6216,8 +6290,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,  			return NULL;  		} -		/* -		 * we will be copying header into skb->data in +		/* we will be copying header into skb->data in  		 * pskb_may_pull so it is in our interest to prefetch  		 * it now to avoid a possible cache miss  		 */ @@ -6265,8 +6338,7 @@ static inline void igb_rx_checksum(struct igb_ring *ring,  	if (igb_test_staterr(rx_desc,  			     E1000_RXDEXT_STATERR_TCPE |  			     E1000_RXDEXT_STATERR_IPE)) { -		/* -		 * work around errata with sctp packets where the TCPE aka +		/* work around errata with sctp packets where the TCPE aka  		 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)  		 * packets, (aka let the stack check the crc32c)  		 */ @@ -6297,15 +6369,15 @@ static inline void igb_rx_hash(struct igb_ring *ring,  }  /** - * igb_is_non_eop - process handling of non-EOP buffers - * @rx_ring: Rx ring being processed - * @rx_desc: Rx descriptor for current buffer - * @skb: current socket buffer containing buffer in progress + *  igb_is_non_eop - process handling of non-EOP buffers + *  @rx_ring: Rx ring being processed + *  @rx_desc: Rx descriptor for current buffer + *  @skb: current socket buffer containing buffer in progress   * - * This function updates next to clean.  If the buffer is an EOP buffer - * this function exits returning false, otherwise it will place the - * sk_buff in the next buffer to be chained and return true indicating - * that this is in fact a non-EOP buffer. + *  This function updates next to clean.  If the buffer is an EOP buffer + *  this function exits returning false, otherwise it will place the + *  sk_buff in the next buffer to be chained and return true indicating + *  that this is in fact a non-EOP buffer.   **/  static bool igb_is_non_eop(struct igb_ring *rx_ring,  			   union e1000_adv_rx_desc *rx_desc) @@ -6325,15 +6397,15 @@ static bool igb_is_non_eop(struct igb_ring *rx_ring,  }  /** - * igb_get_headlen - determine size of header for LRO/GRO - * @data: pointer to the start of the headers - * @max_len: total length of section to find headers in + *  igb_get_headlen - determine size of header for LRO/GRO + *  @data: pointer to the start of the headers + *  @max_len: total length of section to find headers in   * - * This function is meant to determine the length of headers that will - * be recognized by hardware for LRO, and GRO offloads.  The main - * motivation of doing this is to only perform one pull for IPv4 TCP - * packets so that we can do basic things like calculating the gso_size - * based on the average data per packet. + *  This function is meant to determine the length of headers that will + *  be recognized by hardware for LRO, and GRO offloads.  The main + *  motivation of doing this is to only perform one pull for IPv4 TCP + *  packets so that we can do basic things like calculating the gso_size + *  based on the average data per packet.   **/  static unsigned int igb_get_headlen(unsigned char *data,  				    unsigned int max_len) @@ -6384,7 +6456,7 @@ static unsigned int igb_get_headlen(unsigned char *data,  			return hdr.network - data;  		/* record next protocol if header is present */ -		if (!hdr.ipv4->frag_off) +		if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))  			nexthdr = hdr.ipv4->protocol;  	} else if (protocol == __constant_htons(ETH_P_IPV6)) {  		if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) @@ -6420,8 +6492,7 @@ static unsigned int igb_get_headlen(unsigned char *data,  		hdr.network += sizeof(struct udphdr);  	} -	/* -	 * If everything has gone correctly hdr.network should be the +	/* If everything has gone correctly hdr.network should be the  	 * data section of the packet and will be the end of the header.  	 * If not then it probably represents the end of the last recognized  	 * header. @@ -6433,17 +6504,17 @@ static unsigned int igb_get_headlen(unsigned char *data,  }  /** - * igb_pull_tail - igb specific version of skb_pull_tail - * @rx_ring: rx descriptor ring packet is being transacted on - * @rx_desc: pointer to the EOP Rx descriptor - * @skb: pointer to current skb being adjusted + *  igb_pull_tail - igb specific version of skb_pull_tail + *  @rx_ring: rx descriptor ring packet is being transacted on + *  @rx_desc: pointer to the EOP Rx descriptor + *  @skb: pointer to current skb being adjusted   * - * This function is an igb specific version of __pskb_pull_tail.  The - * main difference between this version and the original function is that - * this function can make several assumptions about the state of things - * that allow for significant optimizations versus the standard function. - * As a result we can do things like drop a frag and maintain an accurate - * truesize for the skb. + *  This function is an igb specific version of __pskb_pull_tail.  The + *  main difference between this version and the original function is that + *  this function can make several assumptions about the state of things + *  that allow for significant optimizations versus the standard function. + *  As a result we can do things like drop a frag and maintain an accurate + *  truesize for the skb.   */  static void igb_pull_tail(struct igb_ring *rx_ring,  			  union e1000_adv_rx_desc *rx_desc, @@ -6453,8 +6524,7 @@ static void igb_pull_tail(struct igb_ring *rx_ring,  	unsigned char *va;  	unsigned int pull_len; -	/* -	 * it is valid to use page_address instead of kmap since we are +	/* it is valid to use page_address instead of kmap since we are  	 * working with pages allocated out of the lomem pool per  	 * alloc_page(GFP_ATOMIC)  	 */ @@ -6474,8 +6544,7 @@ static void igb_pull_tail(struct igb_ring *rx_ring,  		va += IGB_TS_HDR_LEN;  	} -	/* -	 * we need the header to contain the greater of either ETH_HLEN or +	/* we need the header to contain the greater of either ETH_HLEN or  	 * 60 bytes if the skb->len is less than 60 for skb_pad.  	 */  	pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN); @@ -6491,24 +6560,23 @@ static void igb_pull_tail(struct igb_ring *rx_ring,  }  /** - * igb_cleanup_headers - Correct corrupted or empty headers - * @rx_ring: rx descriptor ring packet is being transacted on - * @rx_desc: pointer to the EOP Rx descriptor - * @skb: pointer to current skb being fixed + *  igb_cleanup_headers - Correct corrupted or empty headers + *  @rx_ring: rx descriptor ring packet is being transacted on + *  @rx_desc: pointer to the EOP Rx descriptor + *  @skb: pointer to current skb being fixed   * - * Address the case where we are pulling data in on pages only - * and as such no data is present in the skb header. + *  Address the case where we are pulling data in on pages only + *  and as such no data is present in the skb header.   * - * In addition if skb is not at least 60 bytes we need to pad it so that - * it is large enough to qualify as a valid Ethernet frame. + *  In addition if skb is not at least 60 bytes we need to pad it so that + *  it is large enough to qualify as a valid Ethernet frame.   * - * Returns true if an error was encountered and skb was freed. + *  Returns true if an error was encountered and skb was freed.   **/  static bool igb_cleanup_headers(struct igb_ring *rx_ring,  				union e1000_adv_rx_desc *rx_desc,  				struct sk_buff *skb)  { -  	if (unlikely((igb_test_staterr(rx_desc,  				       E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {  		struct net_device *netdev = rx_ring->netdev; @@ -6535,14 +6603,14 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,  }  /** - * igb_process_skb_fields - Populate skb header fields from Rx descriptor - * @rx_ring: rx descriptor ring packet is being transacted on - * @rx_desc: pointer to the EOP Rx descriptor - * @skb: pointer to current skb being populated + *  igb_process_skb_fields - Populate skb header fields from Rx descriptor + *  @rx_ring: rx descriptor ring packet is being transacted on + *  @rx_desc: pointer to the EOP Rx descriptor + *  @skb: pointer to current skb being populated   * - * This function checks the ring, descriptor, and packet information in - * order to populate the hash, checksum, VLAN, timestamp, protocol, and - * other fields within the skb. + *  This function checks the ring, descriptor, and packet information in + *  order to populate the hash, checksum, VLAN, timestamp, protocol, and + *  other fields within the skb.   **/  static void igb_process_skb_fields(struct igb_ring *rx_ring,  				   union e1000_adv_rx_desc *rx_desc, @@ -6556,7 +6624,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,  	igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb); -	if ((dev->features & NETIF_F_HW_VLAN_RX) && +	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&  	    igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {  		u16 vid;  		if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && @@ -6565,7 +6633,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,  		else  			vid = le16_to_cpu(rx_desc->wb.upper.vlan); -		__vlan_hwaccel_put_tag(skb, vid); +		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);  	}  	skb_record_rx_queue(skb, rx_ring->queue_index); @@ -6670,8 +6738,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,  	/* map page for use */  	dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); -	/* -	 * if mapping failed free memory back to system since +	/* if mapping failed free memory back to system since  	 * there isn't much point in holding memory we can't use  	 */  	if (dma_mapping_error(rx_ring->dev, dma)) { @@ -6689,8 +6756,8 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,  }  /** - * igb_alloc_rx_buffers - Replace used receive buffers; packet split - * @adapter: address of board private structure + *  igb_alloc_rx_buffers - Replace used receive buffers; packet split + *  @adapter: address of board private structure   **/  void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)  { @@ -6710,8 +6777,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)  		if (!igb_alloc_mapped_page(rx_ring, bi))  			break; -		/* -		 * Refresh the desc even if buffer_addrs didn't change +		/* Refresh the desc even if buffer_addrs didn't change  		 * because each write-back erases this info.  		 */  		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); @@ -6740,8 +6806,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)  		/* update next to alloc since we have filled the ring */  		rx_ring->next_to_alloc = i; -		/* -		 * Force memory writes to complete before letting h/w +		/* Force memory writes to complete before letting h/w  		 * know there are new descriptors to fetch.  (Only  		 * applicable for weak-ordered memory model archs,  		 * such as IA-64). @@ -6826,7 +6891,7 @@ static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)  	struct igb_adapter *adapter = netdev_priv(netdev);  	struct e1000_hw *hw = &adapter->hw;  	u32 ctrl, rctl; -	bool enable = !!(features & NETIF_F_HW_VLAN_RX); +	bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);  	if (enable) {  		/* enable VLAN tag insert/strip */ @@ -6848,7 +6913,8 @@ static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)  	igb_rlpml_set(adapter);  } -static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +static int igb_vlan_rx_add_vid(struct net_device *netdev, +			       __be16 proto, u16 vid)  {  	struct igb_adapter *adapter = netdev_priv(netdev);  	struct e1000_hw *hw = &adapter->hw; @@ -6865,7 +6931,8 @@ static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)  	return 0;  } -static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +static int igb_vlan_rx_kill_vid(struct net_device *netdev, +				__be16 proto, u16 vid)  {  	struct igb_adapter *adapter = netdev_priv(netdev);  	struct e1000_hw *hw = &adapter->hw; @@ -6891,7 +6958,7 @@ static void igb_restore_vlan(struct igb_adapter *adapter)  	igb_vlan_mode(adapter->netdev, adapter->netdev->features);  	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) -		igb_vlan_rx_add_vid(adapter->netdev, vid); +		igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);  }  int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) @@ -6902,15 +6969,24 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)  	mac->autoneg = 0;  	/* Make sure dplx is at most 1 bit and lsb of speed is not set -	 * for the switch() below to work */ +	 * for the switch() below to work +	 */  	if ((spd & 1) || (dplx & ~1))  		goto err_inval; -	/* Fiber NIC's only allow 1000 Gbps Full duplex */ -	if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) && -	    spd != SPEED_1000 && -	    dplx != DUPLEX_FULL) -		goto err_inval; +	/* Fiber NIC's only allow 1000 gbps Full duplex +	 * and 100Mbps Full duplex for 100baseFx sfp +	 */ +	if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { +		switch (spd + dplx) { +		case SPEED_10 + DUPLEX_HALF: +		case SPEED_10 + DUPLEX_FULL: +		case SPEED_100 + DUPLEX_HALF: +			goto err_inval; +		default: +			break; +		} +	}  	switch (spd + dplx) {  	case SPEED_10 + DUPLEX_HALF: @@ -7009,7 +7085,8 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,  		igb_power_up_link(adapter);  	/* Release control of h/w to f/w.  If f/w is AMT enabled, this -	 * would have already happened in close and is redundant. */ +	 * would have already happened in close and is redundant. +	 */  	igb_release_hw_control(adapter);  	pci_disable_device(pdev); @@ -7071,7 +7148,8 @@ static int igb_resume(struct device *dev)  	igb_reset(adapter);  	/* let the f/w know that the h/w is now under the control of the -	 * driver. */ +	 * driver. +	 */  	igb_get_hw_control(adapter);  	wr32(E1000_WUS, ~0); @@ -7207,8 +7285,7 @@ static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)  }  #ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling 'interrupt' - used by things like netconsole to send skbs +/* Polling 'interrupt' - used by things like netconsole to send skbs   * without having to re-enable interrupts. It's not called while   * the interrupt routine is executing.   */ @@ -7231,13 +7308,13 @@ static void igb_netpoll(struct net_device *netdev)  #endif /* CONFIG_NET_POLL_CONTROLLER */  /** - * igb_io_error_detected - called when PCI error is detected - * @pdev: Pointer to PCI device - * @state: The current pci connection state + *  igb_io_error_detected - called when PCI error is detected + *  @pdev: Pointer to PCI device + *  @state: The current pci connection state   * - * This function is called after a PCI bus error affecting - * this device has been detected. - */ + *  This function is called after a PCI bus error affecting + *  this device has been detected. + **/  static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,  					      pci_channel_state_t state)  { @@ -7258,12 +7335,12 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,  }  /** - * igb_io_slot_reset - called after the pci bus has been reset. - * @pdev: Pointer to PCI device + *  igb_io_slot_reset - called after the pci bus has been reset. + *  @pdev: Pointer to PCI device   * - * Restart the card from scratch, as if from a cold-boot. Implementation - * resembles the first-half of the igb_resume routine. - */ + *  Restart the card from scratch, as if from a cold-boot. Implementation + *  resembles the first-half of the igb_resume routine. + **/  static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)  {  	struct net_device *netdev = pci_get_drvdata(pdev); @@ -7291,8 +7368,9 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)  	err = pci_cleanup_aer_uncorrect_error_status(pdev);  	if (err) { -		dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status " -		        "failed 0x%0x\n", err); +		dev_err(&pdev->dev, +			"pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", +			err);  		/* non-fatal, continue */  	} @@ -7300,12 +7378,12 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)  }  /** - * igb_io_resume - called when traffic can start flowing again. - * @pdev: Pointer to PCI device + *  igb_io_resume - called when traffic can start flowing again. + *  @pdev: Pointer to PCI device   * - * This callback is called when the error recovery driver tells us that - * its OK to resume normal operation. Implementation resembles the - * second-half of the igb_resume routine. + *  This callback is called when the error recovery driver tells us that + *  its OK to resume normal operation. Implementation resembles the + *  second-half of the igb_resume routine.   */  static void igb_io_resume(struct pci_dev *pdev)  { @@ -7322,12 +7400,13 @@ static void igb_io_resume(struct pci_dev *pdev)  	netif_device_attach(netdev);  	/* let the f/w know that the h/w is now under the control of the -	 * driver. */ +	 * driver. +	 */  	igb_get_hw_control(adapter);  }  static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, -                             u8 qsel) +			     u8 qsel)  {  	u32 rar_low, rar_high;  	struct e1000_hw *hw = &adapter->hw; @@ -7336,7 +7415,7 @@ static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,  	 * from network order (big endian) to little endian  	 */  	rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | -	          ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); +		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));  	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));  	/* Indicate to hardware the Address is Valid. */ @@ -7354,11 +7433,12 @@ static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,  }  static int igb_set_vf_mac(struct igb_adapter *adapter, -                          int vf, unsigned char *mac_addr) +			  int vf, unsigned char *mac_addr)  {  	struct e1000_hw *hw = &adapter->hw;  	/* VF MAC addresses start at end of receive addresses and moves -	 * torwards the first, as a result a collision should not be possible */ +	 * towards the first, as a result a collision should not be possible +	 */  	int rar_entry = hw->mac.rar_entry_count - (vf + 1);  	memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); @@ -7375,13 +7455,13 @@ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)  		return -EINVAL;  	adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;  	dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); -	dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" -				      " change effective."); +	dev_info(&adapter->pdev->dev, +		 "Reload the VF driver to make this change effective.");  	if (test_bit(__IGB_DOWN, &adapter->state)) { -		dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," -			 " but the PF device is not up.\n"); -		dev_warn(&adapter->pdev->dev, "Bring the PF device up before" -			 " attempting to use the VF device.\n"); +		dev_warn(&adapter->pdev->dev, +			 "The VF MAC address has been set, but the PF device is not up.\n"); +		dev_warn(&adapter->pdev->dev, +			 "Bring the PF device up before attempting to use the VF device.\n");  	}  	return igb_set_vf_mac(adapter, vf, mac);  } @@ -7408,19 +7488,19 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,  		/* Calculate the rate factor values to set */  		rf_int = link_speed / tx_rate;  		rf_dec = (link_speed - (rf_int * tx_rate)); -		rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate; +		rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) / +			 tx_rate;  		bcnrc_val = E1000_RTTBCNRC_RS_ENA; -		bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) & -		               E1000_RTTBCNRC_RF_INT_MASK); +		bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) & +			      E1000_RTTBCNRC_RF_INT_MASK);  		bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);  	} else {  		bcnrc_val = 0;  	}  	wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */ -	/* -	 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM +	/* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM  	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.  	 */  	wr32(E1000_RTTBCNRM, 0x14); @@ -7442,8 +7522,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter)  		reset_rate = true;  		adapter->vf_rate_link_speed = 0;  		dev_info(&adapter->pdev->dev, -		         "Link speed has been changed. VF Transmit " -		         "rate is disabled\n"); +			 "Link speed has been changed. VF Transmit rate is disabled\n");  	}  	for (i = 0; i < adapter->vfs_allocated_count; i++) { @@ -7451,8 +7530,8 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter)  			adapter->vf_data[i].tx_rate = 0;  		igb_set_vf_rate_limit(&adapter->hw, i, -		                      adapter->vf_data[i].tx_rate, -		                      actual_link_speed); +				      adapter->vf_data[i].tx_rate, +				      actual_link_speed);  	}  } @@ -7478,6 +7557,33 @@ static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)  	return 0;  } +static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, +				   bool setting) +{ +	struct igb_adapter *adapter = netdev_priv(netdev); +	struct e1000_hw *hw = &adapter->hw; +	u32 reg_val, reg_offset; + +	if (!adapter->vfs_allocated_count) +		return -EOPNOTSUPP; + +	if (vf >= adapter->vfs_allocated_count) +		return -EINVAL; + +	reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC; +	reg_val = rd32(reg_offset); +	if (setting) +		reg_val |= ((1 << vf) | +			    (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); +	else +		reg_val &= ~((1 << vf) | +			     (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); +	wr32(reg_offset, reg_val); + +	adapter->vf_data[vf].spoofchk_enabled = setting; +	return E1000_SUCCESS; +} +  static int igb_ndo_get_vf_config(struct net_device *netdev,  				 int vf, struct ifla_vf_info *ivi)  { @@ -7489,6 +7595,7 @@ static int igb_ndo_get_vf_config(struct net_device *netdev,  	ivi->tx_rate = adapter->vf_data[vf].tx_rate;  	ivi->vlan = adapter->vf_data[vf].pf_vlan;  	ivi->qos = adapter->vf_data[vf].pf_qos; +	ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;  	return 0;  } @@ -7501,6 +7608,7 @@ static void igb_vmm_control(struct igb_adapter *adapter)  	case e1000_82575:  	case e1000_i210:  	case e1000_i211: +	case e1000_i354:  	default:  		/* replication is not supported for 82575 */  		return; @@ -7523,7 +7631,7 @@ static void igb_vmm_control(struct igb_adapter *adapter)  		igb_vmdq_set_loopback_pf(hw, true);  		igb_vmdq_set_replication_pf(hw, true);  		igb_vmdq_set_anti_spoofing_pf(hw, true, -						adapter->vfs_allocated_count); +					      adapter->vfs_allocated_count);  	} else {  		igb_vmdq_set_loopback_pf(hw, false);  		igb_vmdq_set_replication_pf(hw, false); @@ -7543,8 +7651,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)  			/* force threshold to 0. */  			wr32(E1000_DMCTXTH, 0); -			/* -			 * DMA Coalescing high water mark needs to be greater +			/* DMA Coalescing high water mark needs to be greater  			 * than the Rx threshold. Set hwm to PBA - max frame  			 * size in 16B units, capping it at PBA - 6KB.  			 */ @@ -7557,8 +7664,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)  				& E1000_FCRTC_RTH_COAL_MASK);  			wr32(E1000_FCRTC, reg); -			/* -			 * Set the DMA Coalescing Rx threshold to PBA - 2 * max +			/* Set the DMA Coalescing Rx threshold to PBA - 2 * max  			 * frame size, capping it at PBA - 10KB.  			 */  			dmac_thr = pba - adapter->max_frame_size / 512; @@ -7576,11 +7682,12 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)  			reg |= (1000 >> 5);  			/* Disable BMC-to-OS Watchdog Enable */ -			reg &= ~E1000_DMACR_DC_BMC2OSW_EN; +			if (hw->mac.type != e1000_i354) +				reg &= ~E1000_DMACR_DC_BMC2OSW_EN; +  			wr32(E1000_DMACR, reg); -			/* -			 * no lower threshold to disable +			/* no lower threshold to disable  			 * coalescing(smart fifb)-UTRESH=0  			 */  			wr32(E1000_DMCRTRH, 0); @@ -7589,15 +7696,13 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)  			wr32(E1000_DMCTLX, reg); -			/* -			 * free space in tx packet buffer to wake from +			/* free space in tx packet buffer to wake from  			 * DMA coal  			 */  			wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -  			     (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); -			/* -			 * make low power state decision controlled +			/* make low power state decision controlled  			 * by DMA coal  			 */  			reg = rd32(E1000_PCIEMISC); @@ -7611,7 +7716,8 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)  	}  } -/*  igb_read_i2c_byte - Reads 8 bit word over I2C +/** + *  igb_read_i2c_byte - Reads 8 bit word over I2C   *  @hw: pointer to hardware structure   *  @byte_offset: byte offset to read   *  @dev_addr: device address @@ -7619,9 +7725,9 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)   *   *  Performs byte read operation over I2C interface at   *  a specified device address. - */ + **/  s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, -				u8 dev_addr, u8 *data) +		      u8 dev_addr, u8 *data)  {  	struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);  	struct i2c_client *this_client = adapter->i2c_client; @@ -7648,7 +7754,8 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,  	}  } -/*  igb_write_i2c_byte - Writes 8 bit word over I2C +/** + *  igb_write_i2c_byte - Writes 8 bit word over I2C   *  @hw: pointer to hardware structure   *  @byte_offset: byte offset to write   *  @dev_addr: device address @@ -7656,9 +7763,9 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,   *   *  Performs byte write operation over I2C interface at   *  a specified device address. - */ + **/  s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, -				 u8 dev_addr, u8 data) +		       u8 dev_addr, u8 data)  {  	struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);  	struct i2c_client *this_client = adapter->i2c_client; diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 0a237507ee8..7e8c477b0ab 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -1,5 +1,4 @@ -/* - * PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580 +/* PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580   *   * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com>   * @@ -27,8 +26,7 @@  #define INCVALUE_MASK		0x7fffffff  #define ISGN			0x80000000 -/* - * The 82580 timesync updates the system timer every 8ns by 8ns, +/* The 82580 timesync updates the system timer every 8ns by 8ns,   * and this update value cannot be reprogrammed.   *   * Neither the 82576 nor the 82580 offer registers wide enough to hold @@ -77,10 +75,7 @@  #define INCVALUE_82576			(16 << IGB_82576_TSYNC_SHIFT)  #define IGB_NBITS_82580			40 -/* - * SYSTIM read access for the 82576 - */ - +/* SYSTIM read access for the 82576 */  static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)  {  	struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); @@ -97,10 +92,7 @@ static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)  	return val;  } -/* - * SYSTIM read access for the 82580 - */ - +/* SYSTIM read access for the 82580 */  static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)  {  	struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); @@ -108,8 +100,7 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)  	u64 val;  	u32 lo, hi, jk; -	/* -	 * The timestamp latches on lowest register read. For the 82580 +	/* The timestamp latches on lowest register read. For the 82580  	 * the lowest register is SYSTIMR instead of SYSTIML.  However we only  	 * need to provide nanosecond resolution, so we just ignore it.  	 */ @@ -123,17 +114,13 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)  	return val;  } -/* - * SYSTIM read access for I210/I211 - */ - +/* SYSTIM read access for I210/I211 */  static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)  {  	struct e1000_hw *hw = &adapter->hw;  	u32 sec, nsec, jk; -	/* -	 * The timestamp latches on lowest register read. For I210/I211, the +	/* The timestamp latches on lowest register read. For I210/I211, the  	 * lowest register is SYSTIMR. Since we only need to provide nanosecond  	 * resolution, we can ignore it.  	 */ @@ -150,8 +137,7 @@ static void igb_ptp_write_i210(struct igb_adapter *adapter,  {  	struct e1000_hw *hw = &adapter->hw; -	/* -	 * Writing the SYSTIMR register is not necessary as it only provides +	/* Writing the SYSTIMR register is not necessary as it only provides  	 * sub-nanosecond resolution.  	 */  	wr32(E1000_SYSTIML, ts->tv_nsec); @@ -185,6 +171,7 @@ static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,  	switch (adapter->hw.mac.type) {  	case e1000_82576:  	case e1000_82580: +	case e1000_i354:  	case e1000_i350:  		spin_lock_irqsave(&adapter->tmreg_lock, flags); @@ -207,10 +194,7 @@ static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,  	}  } -/* - * PTP clock operations - */ - +/* PTP clock operations */  static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)  {  	struct igb_adapter *igb = container_of(ptp, struct igb_adapter, @@ -387,7 +371,7 @@ static int igb_ptp_enable(struct ptp_clock_info *ptp,   *   * This work function polls the TSYNCTXCTL valid bit to determine when a   * timestamp has been taken for the current stored skb. - */ + **/  void igb_ptp_tx_work(struct work_struct *work)  {  	struct igb_adapter *adapter = container_of(work, struct igb_adapter, @@ -437,7 +421,7 @@ static void igb_ptp_overflow_check(struct work_struct *work)   * dropped an Rx packet that was timestamped when the ring is full. The   * particular error is rare but leaves the device in a state unable to timestamp   * any future packets. - */ + **/  void igb_ptp_rx_hang(struct igb_adapter *adapter)  {  	struct e1000_hw *hw = &adapter->hw; @@ -481,7 +465,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)   * If we were asked to do hardware stamping and such a time stamp is   * available, then it must have been for this skb here because we only   * allow only one such packet into the queue. - */ + **/  void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)  {  	struct e1000_hw *hw = &adapter->hw; @@ -506,15 +490,14 @@ void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)   * This function is meant to retrieve a timestamp from the first buffer of an   * incoming frame.  The value is stored in little endian format starting on   * byte 8. - */ + **/  void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,  			 unsigned char *va,  			 struct sk_buff *skb)  {  	__le64 *regval = (__le64 *)va; -	/* -	 * The timestamp is recorded in little endian format. +	/* The timestamp is recorded in little endian format.  	 * DWORD: 0        1        2        3  	 * Field: Reserved Reserved SYSTIML  SYSTIMH  	 */ @@ -529,7 +512,7 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,   *   * This function is meant to retrieve a timestamp from the internal registers   * of the adapter and store it in the skb. - */ + **/  void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,  			 struct sk_buff *skb)  { @@ -537,8 +520,7 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,  	struct e1000_hw *hw = &adapter->hw;  	u64 regval; -	/* -	 * If this bit is set, then the RX registers contain the time stamp. No +	/* If this bit is set, then the RX registers contain the time stamp. No  	 * other packet will be time stamped until we read these registers, so  	 * read the registers to make them available again. Because only one  	 * packet can be time stamped at a time, we know that the register @@ -574,7 +556,6 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,   * type has to be specified. Matching the kind of event packet is   * not supported, with the exception of "all V2 events regardless of   * level 2 or 4". - *   **/  int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,  			   struct ifreq *ifr, int cmd) @@ -655,10 +636,9 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,  		return 0;  	} -	/* -	 * Per-packet timestamping only works if all packets are +	/* Per-packet timestamping only works if all packets are  	 * timestamped, so enable timestamping in all packets as -	 * long as one rx filter was configured. +	 * long as one Rx filter was configured.  	 */  	if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {  		tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; @@ -756,6 +736,7 @@ void igb_ptp_init(struct igb_adapter *adapter)  		wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);  		break;  	case e1000_82580: +	case e1000_i354:  	case e1000_i350:  		snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);  		adapter->ptp_caps.owner = THIS_MODULE; @@ -844,6 +825,7 @@ void igb_ptp_stop(struct igb_adapter *adapter)  	switch (adapter->hw.mac.type) {  	case e1000_82576:  	case e1000_82580: +	case e1000_i354:  	case e1000_i350:  		cancel_delayed_work_sync(&adapter->ptp_overflow_work);  		break; @@ -888,6 +870,7 @@ void igb_ptp_reset(struct igb_adapter *adapter)  		wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);  		break;  	case e1000_82580: +	case e1000_i354:  	case e1000_i350:  	case e1000_i210:  	case e1000_i211:  |