diff options
| author | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2011-04-07 07:42:33 -0700 | 
|---|---|---|
| committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2011-08-10 20:03:27 -0700 | 
| commit | dee1ad47f2ee75f5146d83ca757c1b7861c34c3b (patch) | |
| tree | 47cbdefe3d0f9b729724e378ad6a96eaddfd5fbc /drivers/net/ethernet/intel/ixgbe | |
| parent | f7917c009c28c941ba151ee66f04dc7f6a2e1e0b (diff) | |
| download | olio-linux-3.10-dee1ad47f2ee75f5146d83ca757c1b7861c34c3b.tar.xz olio-linux-3.10-dee1ad47f2ee75f5146d83ca757c1b7861c34c3b.zip  | |
intel: Move the Intel wired LAN drivers
Moves the Intel wired LAN drivers into drivers/net/ethernet/intel/ and
the necessary Kconfig and Makefile changes.
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe')
25 files changed, 28510 insertions, 0 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile new file mode 100644 index 00000000000..7d7387fbdec --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -0,0 +1,42 @@ +################################################################################ +# +# Intel 10 Gigabit PCI Express Linux driver +# Copyright(c) 1999 - 2010 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# Linux NICS <linux.nics@intel.com> +# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) 10GbE PCI Express ethernet driver +# + +obj-$(CONFIG_IXGBE) += ixgbe.o + +ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ +              ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ +              ixgbe_mbx.o ixgbe_x540.o + +ixgbe-$(CONFIG_IXGBE_DCB) +=  ixgbe_dcb.o ixgbe_dcb_82598.o \ +                              ixgbe_dcb_82599.o ixgbe_dcb_nl.o + +ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h new file mode 100644 index 00000000000..e04a8e49e6d --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -0,0 +1,617 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_H_ +#define _IXGBE_H_ + +#include <linux/bitops.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/cpumask.h> +#include <linux/aer.h> +#include <linux/if_vlan.h> + +#include "ixgbe_type.h" +#include "ixgbe_common.h" +#include "ixgbe_dcb.h" +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#define IXGBE_FCOE +#include "ixgbe_fcoe.h" +#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */ +#ifdef CONFIG_IXGBE_DCA +#include <linux/dca.h> +#endif + +/* common prefix used by pr_<> macros */ +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +/* TX/RX descriptor defines */ +#define IXGBE_DEFAULT_TXD		    512 +#define IXGBE_MAX_TXD			   4096 +#define IXGBE_MIN_TXD			     64 + +#define IXGBE_DEFAULT_RXD		    512 +#define IXGBE_MAX_RXD			   4096 +#define IXGBE_MIN_RXD			     64 + +/* flow control */ +#define IXGBE_MIN_FCRTL			   0x40 +#define IXGBE_MAX_FCRTL			0x7FF80 +#define IXGBE_MIN_FCRTH			  0x600 +#define IXGBE_MAX_FCRTH			0x7FFF0 +#define IXGBE_DEFAULT_FCPAUSE		 0xFFFF +#define IXGBE_MIN_FCPAUSE		      0 +#define IXGBE_MAX_FCPAUSE		 0xFFFF + +/* Supported Rx Buffer Sizes */ +#define IXGBE_RXBUFFER_512   512    /* Used for packet split */ +#define IXGBE_RXBUFFER_2048  2048 +#define IXGBE_RXBUFFER_4096  4096 +#define IXGBE_RXBUFFER_8192  8192 +#define IXGBE_MAX_RXBUFFER   16384  /* largest size for a single descriptor */ + +/* + * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we + * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, + * this adds up to 512 bytes of extra data meaning the smallest allocation + * we could have is 1K. + * i.e. RXBUFFER_512 --> size-1024 slab + */ +#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512 + +#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IXGBE_RX_BUFFER_WRITE	16	/* Must be power of 2 */ + +#define IXGBE_TX_FLAGS_CSUM		(u32)(1) +#define IXGBE_TX_FLAGS_VLAN		(u32)(1 << 1) +#define IXGBE_TX_FLAGS_TSO		(u32)(1 << 2) +#define IXGBE_TX_FLAGS_IPV4		(u32)(1 << 3) +#define IXGBE_TX_FLAGS_FCOE		(u32)(1 << 4) +#define IXGBE_TX_FLAGS_FSO		(u32)(1 << 5) +#define IXGBE_TX_FLAGS_VLAN_MASK	0xffff0000 +#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK   0x0000e000 +#define IXGBE_TX_FLAGS_VLAN_SHIFT	16 + +#define IXGBE_MAX_RSC_INT_RATE          162760 + +#define IXGBE_MAX_VF_MC_ENTRIES         30 +#define IXGBE_MAX_VF_FUNCTIONS          64 +#define IXGBE_MAX_VFTA_ENTRIES          128 +#define MAX_EMULATION_MAC_ADDRS         16 +#define IXGBE_MAX_PF_MACVLANS           15 +#define VMDQ_P(p)   ((p) + adapter->num_vfs) + +struct vf_data_storage { +	unsigned char vf_mac_addresses[ETH_ALEN]; +	u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; +	u16 num_vf_mc_hashes; +	u16 default_vf_vlan_id; +	u16 vlans_enabled; +	bool clear_to_send; +	bool pf_set_mac; +	u16 pf_vlan; /* When set, guest VLAN config not allowed. */ +	u16 pf_qos; +	u16 tx_rate; +}; + +struct vf_macvlans { +	struct list_head l; +	int vf; +	int rar_entry; +	bool free; +	bool is_macvlan; +	u8 vf_macvlan[ETH_ALEN]; +}; + +#define IXGBE_MAX_TXD_PWR	14 +#define IXGBE_MAX_DATA_PER_TXD	(1 << IXGBE_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) +#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer */ +struct ixgbe_tx_buffer { +	struct sk_buff *skb; +	dma_addr_t dma; +	unsigned long time_stamp; +	u16 length; +	u16 next_to_watch; +	unsigned int bytecount; +	u16 gso_segs; +	u8 mapped_as_page; +}; + +struct ixgbe_rx_buffer { +	struct sk_buff *skb; +	dma_addr_t dma; +	struct page *page; +	dma_addr_t page_dma; +	unsigned int page_offset; +}; + +struct ixgbe_queue_stats { +	u64 packets; +	u64 bytes; +}; + +struct ixgbe_tx_queue_stats { +	u64 restart_queue; +	u64 tx_busy; +	u64 completed; +	u64 tx_done_old; +}; + +struct ixgbe_rx_queue_stats { +	u64 rsc_count; +	u64 rsc_flush; +	u64 non_eop_descs; +	u64 alloc_rx_page_failed; +	u64 alloc_rx_buff_failed; +}; + +enum ixbge_ring_state_t { +	__IXGBE_TX_FDIR_INIT_DONE, +	__IXGBE_TX_DETECT_HANG, +	__IXGBE_HANG_CHECK_ARMED, +	__IXGBE_RX_PS_ENABLED, +	__IXGBE_RX_RSC_ENABLED, +}; + +#define ring_is_ps_enabled(ring) \ +	test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) +#define set_ring_ps_enabled(ring) \ +	set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) +#define clear_ring_ps_enabled(ring) \ +	clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) +#define check_for_tx_hang(ring) \ +	test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ +	set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ +	clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) +#define ring_is_rsc_enabled(ring) \ +	test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) +#define set_ring_rsc_enabled(ring) \ +	set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) +#define clear_ring_rsc_enabled(ring) \ +	clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) +struct ixgbe_ring { +	void *desc;			/* descriptor ring memory */ +	struct device *dev;             /* device for DMA mapping */ +	struct net_device *netdev;      /* netdev ring belongs to */ +	union { +		struct ixgbe_tx_buffer *tx_buffer_info; +		struct ixgbe_rx_buffer *rx_buffer_info; +	}; +	unsigned long state; +	u8 __iomem *tail; + +	u16 count;			/* amount of descriptors */ +	u16 rx_buf_len; + +	u8 queue_index; /* needed for multiqueue queue management */ +	u8 reg_idx;			/* holds the special value that gets +					 * the hardware register offset +					 * associated with this ring, which is +					 * different for DCB and RSS modes +					 */ +	u8 atr_sample_rate; +	u8 atr_count; + +	u16 next_to_use; +	u16 next_to_clean; + +	u8 dcb_tc; +	struct ixgbe_queue_stats stats; +	struct u64_stats_sync syncp; +	union { +		struct ixgbe_tx_queue_stats tx_stats; +		struct ixgbe_rx_queue_stats rx_stats; +	}; +	int numa_node; +	unsigned int size;		/* length in bytes */ +	dma_addr_t dma;			/* phys. address of descriptor ring */ +	struct rcu_head rcu; +	struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */ +} ____cacheline_internodealigned_in_smp; + +enum ixgbe_ring_f_enum { +	RING_F_NONE = 0, +	RING_F_VMDQ,  /* SR-IOV uses the same ring feature */ +	RING_F_RSS, +	RING_F_FDIR, +#ifdef IXGBE_FCOE +	RING_F_FCOE, +#endif /* IXGBE_FCOE */ + +	RING_F_ARRAY_SIZE      /* must be last in enum set */ +}; + +#define IXGBE_MAX_RSS_INDICES  16 +#define IXGBE_MAX_VMDQ_INDICES 64 +#define IXGBE_MAX_FDIR_INDICES 64 +#ifdef IXGBE_FCOE +#define IXGBE_MAX_FCOE_INDICES  8 +#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) +#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) +#else +#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES +#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES +#endif /* IXGBE_FCOE */ +struct ixgbe_ring_feature { +	int indices; +	int mask; +} ____cacheline_internodealigned_in_smp; + +struct ixgbe_ring_container { +#if MAX_RX_QUEUES > MAX_TX_QUEUES +	DECLARE_BITMAP(idx, MAX_RX_QUEUES); +#else +	DECLARE_BITMAP(idx, MAX_TX_QUEUES); +#endif +	unsigned int total_bytes;	/* total bytes processed this int */ +	unsigned int total_packets;	/* total packets processed this int */ +	u16 work_limit;			/* total work allowed per interrupt */ +	u8 count;			/* total number of rings in vector */ +	u8 itr;				/* current ITR setting for ring */ +}; + +#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ +                              ? 8 : 1) +#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS + +/* MAX_MSIX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ +struct ixgbe_q_vector { +	struct ixgbe_adapter *adapter; +	unsigned int v_idx; /* index of q_vector within array, also used for +	                     * finding the bit in EICR and friends that +	                     * represents the vector for this ring */ +#ifdef CONFIG_IXGBE_DCA +	int cpu;	    /* CPU for DCA */ +#endif +	struct napi_struct napi; +	struct ixgbe_ring_container rx, tx; +	u32 eitr; +	cpumask_var_t affinity_mask; +	char name[IFNAMSIZ + 9]; +}; + +/* Helper macros to switch between ints/sec and what the register uses. + * And yes, it's the same math going both ways.  The lowest value + * supported by all of the ixgbe hardware is 8. + */ +#define EITR_INTS_PER_SEC_TO_REG(_eitr) \ +	((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) +#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG + +static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) +{ +	u16 ntc = ring->next_to_clean; +	u16 ntu = ring->next_to_use; + +	return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +#define IXGBE_RX_DESC_ADV(R, i)	    \ +	(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) +#define IXGBE_TX_DESC_ADV(R, i)	    \ +	(&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) +#define IXGBE_TX_CTXTDESC_ADV(R, i)	    \ +	(&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) + +#define IXGBE_MAX_JUMBO_FRAME_SIZE        16128 +#ifdef IXGBE_FCOE +/* Use 3K as the baby jumbo frame size for FCoE */ +#define IXGBE_FCOE_JUMBO_FRAME_SIZE       3072 +#endif /* IXGBE_FCOE */ + +#define OTHER_VECTOR 1 +#define NON_Q_VECTORS (OTHER_VECTOR) + +#define MAX_MSIX_VECTORS_82599 64 +#define MAX_MSIX_Q_VECTORS_82599 64 +#define MAX_MSIX_VECTORS_82598 18 +#define MAX_MSIX_Q_VECTORS_82598 16 + +#define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82599 +#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599 + +#define MIN_MSIX_Q_VECTORS 2 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) + +/* board specific private data structure */ +struct ixgbe_adapter { +	unsigned long state; + +	/* Some features need tri-state capability, +	 * thus the additional *_CAPABLE flags. +	 */ +	u32 flags; +#define IXGBE_FLAG_RX_CSUM_ENABLED              (u32)(1) +#define IXGBE_FLAG_MSI_CAPABLE                  (u32)(1 << 1) +#define IXGBE_FLAG_MSI_ENABLED                  (u32)(1 << 2) +#define IXGBE_FLAG_MSIX_CAPABLE                 (u32)(1 << 3) +#define IXGBE_FLAG_MSIX_ENABLED                 (u32)(1 << 4) +#define IXGBE_FLAG_RX_1BUF_CAPABLE              (u32)(1 << 6) +#define IXGBE_FLAG_RX_PS_CAPABLE                (u32)(1 << 7) +#define IXGBE_FLAG_RX_PS_ENABLED                (u32)(1 << 8) +#define IXGBE_FLAG_IN_NETPOLL                   (u32)(1 << 9) +#define IXGBE_FLAG_DCA_ENABLED                  (u32)(1 << 10) +#define IXGBE_FLAG_DCA_CAPABLE                  (u32)(1 << 11) +#define IXGBE_FLAG_IMIR_ENABLED                 (u32)(1 << 12) +#define IXGBE_FLAG_MQ_CAPABLE                   (u32)(1 << 13) +#define IXGBE_FLAG_DCB_ENABLED                  (u32)(1 << 14) +#define IXGBE_FLAG_RSS_ENABLED                  (u32)(1 << 16) +#define IXGBE_FLAG_RSS_CAPABLE                  (u32)(1 << 17) +#define IXGBE_FLAG_VMDQ_CAPABLE                 (u32)(1 << 18) +#define IXGBE_FLAG_VMDQ_ENABLED                 (u32)(1 << 19) +#define IXGBE_FLAG_FAN_FAIL_CAPABLE             (u32)(1 << 20) +#define IXGBE_FLAG_NEED_LINK_UPDATE             (u32)(1 << 22) +#define IXGBE_FLAG_NEED_LINK_CONFIG             (u32)(1 << 23) +#define IXGBE_FLAG_FDIR_HASH_CAPABLE            (u32)(1 << 24) +#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE         (u32)(1 << 25) +#define IXGBE_FLAG_FCOE_CAPABLE                 (u32)(1 << 26) +#define IXGBE_FLAG_FCOE_ENABLED                 (u32)(1 << 27) +#define IXGBE_FLAG_SRIOV_CAPABLE                (u32)(1 << 28) +#define IXGBE_FLAG_SRIOV_ENABLED                (u32)(1 << 29) + +	u32 flags2; +#define IXGBE_FLAG2_RSC_CAPABLE                 (u32)(1) +#define IXGBE_FLAG2_RSC_ENABLED                 (u32)(1 << 1) +#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE         (u32)(1 << 2) +#define IXGBE_FLAG2_TEMP_SENSOR_EVENT           (u32)(1 << 3) +#define IXGBE_FLAG2_SEARCH_FOR_SFP              (u32)(1 << 4) +#define IXGBE_FLAG2_SFP_NEEDS_RESET             (u32)(1 << 5) +#define IXGBE_FLAG2_RESET_REQUESTED             (u32)(1 << 6) +#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT        (u32)(1 << 7) + +	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; +	u16 bd_number; +	struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; + +	/* DCB parameters */ +	struct ieee_pfc *ixgbe_ieee_pfc; +	struct ieee_ets *ixgbe_ieee_ets; +	struct ixgbe_dcb_config dcb_cfg; +	struct ixgbe_dcb_config temp_dcb_cfg; +	u8 dcb_set_bitmap; +	u8 dcbx_cap; +	enum ixgbe_fc_mode last_lfc_mode; + +	/* Interrupt Throttle Rate */ +	u32 rx_itr_setting; +	u32 tx_itr_setting; +	u16 eitr_low; +	u16 eitr_high; + +	/* Work limits */ +	u16 tx_work_limit; + +	/* TX */ +	struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; +	int num_tx_queues; +	u32 tx_timeout_count; +	bool detect_tx_hung; + +	u64 restart_queue; +	u64 lsc_int; + +	/* RX */ +	struct ixgbe_ring *rx_ring[MAX_RX_QUEUES] ____cacheline_aligned_in_smp; +	int num_rx_queues; +	int num_rx_pools;		/* == num_rx_queues in 82598 */ +	int num_rx_queues_per_pool;	/* 1 if 82598, can be many if 82599 */ +	u64 hw_csum_rx_error; +	u64 hw_rx_no_dma_resources; +	u64 non_eop_descs; +	int num_msix_vectors; +	int max_msix_q_vectors;         /* true count of q_vectors for device */ +	struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; +	struct msix_entry *msix_entries; + +	u32 alloc_rx_page_failed; +	u32 alloc_rx_buff_failed; + +/* default to trying for four seconds */ +#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) + +	/* OS defined structs */ +	struct net_device *netdev; +	struct pci_dev *pdev; + +	u32 test_icr; +	struct ixgbe_ring test_tx_ring; +	struct ixgbe_ring test_rx_ring; + +	/* structs defined in ixgbe_hw.h */ +	struct ixgbe_hw hw; +	u16 msg_enable; +	struct ixgbe_hw_stats stats; + +	/* Interrupt Throttle Rate */ +	u32 rx_eitr_param; +	u32 tx_eitr_param; + +	u64 tx_busy; +	unsigned int tx_ring_count; +	unsigned int rx_ring_count; + +	u32 link_speed; +	bool link_up; +	unsigned long link_check_timeout; + +	struct work_struct service_task; +	struct timer_list service_timer; +	u32 fdir_pballoc; +	u32 atr_sample_rate; +	unsigned long fdir_overflow; /* number of times ATR was backed off */ +	spinlock_t fdir_perfect_lock; +#ifdef IXGBE_FCOE +	struct ixgbe_fcoe fcoe; +#endif /* IXGBE_FCOE */ +	u64 rsc_total_count; +	u64 rsc_total_flush; +	u32 wol; +	u16 eeprom_version; + +	int node; +	u32 led_reg; +	u32 interrupt_event; +	char lsc_int_name[IFNAMSIZ + 9]; + +	/* SR-IOV */ +	DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); +	unsigned int num_vfs; +	struct vf_data_storage *vfinfo; +	int vf_rate_link_speed; +	struct vf_macvlans vf_mvs; +	struct vf_macvlans *mv_list; +	bool antispoofing_enabled; + +	struct hlist_head fdir_filter_list; +	union ixgbe_atr_input fdir_mask; +	int fdir_filter_count; +}; + +struct ixgbe_fdir_filter { +	struct hlist_node fdir_node; +	union ixgbe_atr_input filter; +	u16 sw_idx; +	u16 action; +}; + +enum ixbge_state_t { +	__IXGBE_TESTING, +	__IXGBE_RESETTING, +	__IXGBE_DOWN, +	__IXGBE_SERVICE_SCHED, +	__IXGBE_IN_SFP_INIT, +}; + +struct ixgbe_rsc_cb { +	dma_addr_t dma; +	u16 skb_cnt; +	bool delay_unmap; +}; +#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) + +enum ixgbe_boards { +	board_82598, +	board_82599, +	board_X540, +}; + +extern struct ixgbe_info ixgbe_82598_info; +extern struct ixgbe_info ixgbe_82599_info; +extern struct ixgbe_info ixgbe_X540_info; +#ifdef CONFIG_IXGBE_DCB +extern const struct dcbnl_rtnl_ops dcbnl_ops; +extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, +                              struct ixgbe_dcb_config *dst_dcb_cfg, +                              int tc_max); +#endif + +extern char ixgbe_driver_name[]; +extern const char ixgbe_driver_version[]; + +extern int ixgbe_up(struct ixgbe_adapter *adapter); +extern void ixgbe_down(struct ixgbe_adapter *adapter); +extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); +extern void ixgbe_reset(struct ixgbe_adapter *adapter); +extern void ixgbe_set_ethtool_ops(struct net_device *netdev); +extern int ixgbe_setup_rx_resources(struct ixgbe_ring *); +extern int ixgbe_setup_tx_resources(struct ixgbe_ring *); +extern void ixgbe_free_rx_resources(struct ixgbe_ring *); +extern void ixgbe_free_tx_resources(struct ixgbe_ring *); +extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); +extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); +extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, +				   struct ixgbe_ring *); +extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); +extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); +extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); +extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, +					 struct ixgbe_adapter *, +					 struct ixgbe_ring *); +extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, +                                             struct ixgbe_tx_buffer *); +extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); +extern void ixgbe_write_eitr(struct ixgbe_q_vector *); +extern int ethtool_ioctl(struct ifreq *ifr); +extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); +extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); +extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl); +extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, +						 union ixgbe_atr_hash_dword input, +						 union ixgbe_atr_hash_dword common, +                                                 u8 queue); +extern s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, +					   union ixgbe_atr_input *input_mask); +extern s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, +						 union ixgbe_atr_input *input, +						 u16 soft_id, u8 queue); +extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, +						 union ixgbe_atr_input *input, +						 u16 soft_id); +extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, +						 union ixgbe_atr_input *mask); +extern void ixgbe_set_rx_mode(struct net_device *netdev); +extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); +extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); +extern void ixgbe_do_reset(struct net_device *netdev); +#ifdef IXGBE_FCOE +extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); +extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, +                     u32 tx_flags, u8 *hdr_len); +extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter); +extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, +			  union ixgbe_adv_rx_desc *rx_desc, +			  struct sk_buff *skb, +			  u32 staterr); +extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, +                              struct scatterlist *sgl, unsigned int sgc); +extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, +				 struct scatterlist *sgl, unsigned int sgc); +extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); +extern int ixgbe_fcoe_enable(struct net_device *netdev); +extern int ixgbe_fcoe_disable(struct net_device *netdev); +#ifdef CONFIG_IXGBE_DCB +extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter); +extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); +#endif /* CONFIG_IXGBE_DCB */ +extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); +#endif /* IXGBE_FCOE */ + +#endif /* _IXGBE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c new file mode 100644 index 00000000000..0d4e3826449 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -0,0 +1,1353 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/sched.h> + +#include "ixgbe.h" +#include "ixgbe_phy.h" + +#define IXGBE_82598_MAX_TX_QUEUES 32 +#define IXGBE_82598_MAX_RX_QUEUES 64 +#define IXGBE_82598_RAR_ENTRIES   16 +#define IXGBE_82598_MC_TBL_SIZE  128 +#define IXGBE_82598_VFT_TBL_SIZE 128 +#define IXGBE_82598_RX_PB_SIZE	 512 + +static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, +                                         ixgbe_link_speed speed, +                                         bool autoneg, +                                         bool autoneg_wait_to_complete); +static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, +                                       u8 *eeprom_data); + +/** + *  ixgbe_set_pcie_completion_timeout - set pci-e completion timeout + *  @hw: pointer to the HW structure + * + *  The defaults for 82598 should be in the range of 50us to 50ms, + *  however the hardware default for these parts is 500us to 1ms which is less + *  than the 10ms recommended by the pci-e spec.  To address this we need to + *  increase the value to either 10ms to 250ms for capability version 1 config, + *  or 16ms to 55ms for version 2. + **/ +static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) +{ +	struct ixgbe_adapter *adapter = hw->back; +	u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); +	u16 pcie_devctl2; + +	/* only take action if timeout value is defaulted to 0 */ +	if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) +		goto out; + +	/* +	 * if capababilities version is type 1 we can write the +	 * timeout of 10ms to 250ms through the GCR register +	 */ +	if (!(gcr & IXGBE_GCR_CAP_VER2)) { +		gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; +		goto out; +	} + +	/* +	 * for version 2 capabilities we need to write the config space +	 * directly in order to set the completion timeout value for +	 * 16ms to 55ms +	 */ +	pci_read_config_word(adapter->pdev, +	                     IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2); +	pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; +	pci_write_config_word(adapter->pdev, +	                      IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); +out: +	/* disable completion timeout resend */ +	gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; +	IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); +} + +/** + *  ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count + *  @hw: pointer to hardware structure + * + *  Read PCIe configuration space, and get the MSI-X vector count from + *  the capabilities table. + **/ +static u16 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw) +{ +	struct ixgbe_adapter *adapter = hw->back; +	u16 msix_count; +	pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82598_CAPS, +	                     &msix_count); +	msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; + +	/* MSI-X count is zero-based in HW, so increment to give proper value */ +	msix_count++; + +	return msix_count; +} + +/** + */ +static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) +{ +	struct ixgbe_mac_info *mac = &hw->mac; + +	/* Call PHY identify routine to get the phy type */ +	ixgbe_identify_phy_generic(hw); + +	mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; +	mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; +	mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; +	mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; +	mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; +	mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw); + +	return 0; +} + +/** + *  ixgbe_init_phy_ops_82598 - PHY/SFP specific init + *  @hw: pointer to hardware structure + * + *  Initialize any function pointers that were not able to be + *  set during get_invariants because the PHY/SFP type was + *  not known.  Perform the SFP init if necessary. + * + **/ +static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) +{ +	struct ixgbe_mac_info *mac = &hw->mac; +	struct ixgbe_phy_info *phy = &hw->phy; +	s32 ret_val = 0; +	u16 list_offset, data_offset; + +	/* Identify the PHY */ +	phy->ops.identify(hw); + +	/* Overwrite the link function pointers if copper PHY */ +	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { +		mac->ops.setup_link = &ixgbe_setup_copper_link_82598; +		mac->ops.get_link_capabilities = +			&ixgbe_get_copper_link_capabilities_generic; +	} + +	switch (hw->phy.type) { +	case ixgbe_phy_tn: +		phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; +		phy->ops.check_link = &ixgbe_check_phy_link_tnx; +		phy->ops.get_firmware_version = +		             &ixgbe_get_phy_firmware_version_tnx; +		break; +	case ixgbe_phy_nl: +		phy->ops.reset = &ixgbe_reset_phy_nl; + +		/* Call SFP+ identify routine to get the SFP+ module type */ +		ret_val = phy->ops.identify_sfp(hw); +		if (ret_val != 0) +			goto out; +		else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) { +			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; +			goto out; +		} + +		/* Check to see if SFP+ module is supported */ +		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, +		                                            &list_offset, +		                                            &data_offset); +		if (ret_val != 0) { +			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; +			goto out; +		} +		break; +	default: +		break; +	} + +out: +	return ret_val; +} + +/** + *  ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx + *  @hw: pointer to hardware structure + * + *  Starts the hardware using the generic start_hw function. + *  Disables relaxed ordering Then set pcie completion timeout + * + **/ +static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) +{ +	u32 regval; +	u32 i; +	s32 ret_val = 0; + +	ret_val = ixgbe_start_hw_generic(hw); + +	/* Disable relaxed ordering */ +	for (i = 0; ((i < hw->mac.max_tx_queues) && +	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { +		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); +		regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; +		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); +	} + +	for (i = 0; ((i < hw->mac.max_rx_queues) && +	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { +		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); +		regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | +			    IXGBE_DCA_RXCTRL_DESC_HSRO_EN); +		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); +	} + +	hw->mac.rx_pb_size = IXGBE_82598_RX_PB_SIZE; + +	/* set the completion timeout for interface */ +	if (ret_val == 0) +		ixgbe_set_pcie_completion_timeout(hw); + +	return ret_val; +} + +/** + *  ixgbe_get_link_capabilities_82598 - Determines link capabilities + *  @hw: pointer to hardware structure + *  @speed: pointer to link speed + *  @autoneg: boolean auto-negotiation value + * + *  Determines the link capabilities by reading the AUTOC register. + **/ +static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, +                                             ixgbe_link_speed *speed, +                                             bool *autoneg) +{ +	s32 status = 0; +	u32 autoc = 0; + +	/* +	 * Determine link capabilities based on the stored value of AUTOC, +	 * which represents EEPROM defaults.  If AUTOC value has not been +	 * stored, use the current register value. +	 */ +	if (hw->mac.orig_link_settings_stored) +		autoc = hw->mac.orig_autoc; +	else +		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + +	switch (autoc & IXGBE_AUTOC_LMS_MASK) { +	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: +		*speed = IXGBE_LINK_SPEED_1GB_FULL; +		*autoneg = false; +		break; + +	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: +		*speed = IXGBE_LINK_SPEED_10GB_FULL; +		*autoneg = false; +		break; + +	case IXGBE_AUTOC_LMS_1G_AN: +		*speed = IXGBE_LINK_SPEED_1GB_FULL; +		*autoneg = true; +		break; + +	case IXGBE_AUTOC_LMS_KX4_AN: +	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: +		*speed = IXGBE_LINK_SPEED_UNKNOWN; +		if (autoc & IXGBE_AUTOC_KX4_SUPP) +			*speed |= IXGBE_LINK_SPEED_10GB_FULL; +		if (autoc & IXGBE_AUTOC_KX_SUPP) +			*speed |= IXGBE_LINK_SPEED_1GB_FULL; +		*autoneg = true; +		break; + +	default: +		status = IXGBE_ERR_LINK_SETUP; +		break; +	} + +	return status; +} + +/** + *  ixgbe_get_media_type_82598 - Determines media type + *  @hw: pointer to hardware structure + * + *  Returns the media type (fiber, copper, backplane) + **/ +static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) +{ +	enum ixgbe_media_type media_type; + +	/* Detect if there is a copper PHY attached. */ +	switch (hw->phy.type) { +	case ixgbe_phy_cu_unknown: +	case ixgbe_phy_tn: +	case ixgbe_phy_aq: +		media_type = ixgbe_media_type_copper; +		goto out; +	default: +		break; +	} + +	/* Media type for I82598 is based on device ID */ +	switch (hw->device_id) { +	case IXGBE_DEV_ID_82598: +	case IXGBE_DEV_ID_82598_BX: +		/* Default device ID is mezzanine card KX/KX4 */ +		media_type = ixgbe_media_type_backplane; +		break; +	case IXGBE_DEV_ID_82598AF_DUAL_PORT: +	case IXGBE_DEV_ID_82598AF_SINGLE_PORT: +	case IXGBE_DEV_ID_82598_DA_DUAL_PORT: +	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: +	case IXGBE_DEV_ID_82598EB_XF_LR: +	case IXGBE_DEV_ID_82598EB_SFP_LOM: +		media_type = ixgbe_media_type_fiber; +		break; +	case IXGBE_DEV_ID_82598EB_CX4: +	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: +		media_type = ixgbe_media_type_cx4; +		break; +	case IXGBE_DEV_ID_82598AT: +	case IXGBE_DEV_ID_82598AT2: +		media_type = ixgbe_media_type_copper; +		break; +	default: +		media_type = ixgbe_media_type_unknown; +		break; +	} +out: +	return media_type; +} + +/** + *  ixgbe_fc_enable_82598 - Enable flow control + *  @hw: pointer to hardware structure + *  @packetbuf_num: packet buffer number (0-7) + * + *  Enable flow control according to the current settings. + **/ +static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) +{ +	s32 ret_val = 0; +	u32 fctrl_reg; +	u32 rmcs_reg; +	u32 reg; +	u32 rx_pba_size; +	u32 link_speed = 0; +	bool link_up; + +#ifdef CONFIG_DCB +	if (hw->fc.requested_mode == ixgbe_fc_pfc) +		goto out; + +#endif /* CONFIG_DCB */ +	/* +	 * On 82598 having Rx FC on causes resets while doing 1G +	 * so if it's on turn it off once we know link_speed. For +	 * more details see 82598 Specification update. +	 */ +	hw->mac.ops.check_link(hw, &link_speed, &link_up, false); +	if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { +		switch (hw->fc.requested_mode) { +		case ixgbe_fc_full: +			hw->fc.requested_mode = ixgbe_fc_tx_pause; +			break; +		case ixgbe_fc_rx_pause: +			hw->fc.requested_mode = ixgbe_fc_none; +			break; +		default: +			/* no change */ +			break; +		} +	} + +	/* Negotiate the fc mode to use */ +	ret_val = ixgbe_fc_autoneg(hw); +	if (ret_val == IXGBE_ERR_FLOW_CONTROL) +		goto out; + +	/* Disable any previous flow control settings */ +	fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); +	fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); + +	rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); +	rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); + +	/* +	 * The possible values of fc.current_mode are: +	 * 0: Flow control is completely disabled +	 * 1: Rx flow control is enabled (we can receive pause frames, +	 *    but not send pause frames). +	 * 2: Tx flow control is enabled (we can send pause frames but +	 *     we do not support receiving pause frames). +	 * 3: Both Rx and Tx flow control (symmetric) are enabled. +#ifdef CONFIG_DCB +	 * 4: Priority Flow Control is enabled. +#endif +	 * other: Invalid. +	 */ +	switch (hw->fc.current_mode) { +	case ixgbe_fc_none: +		/* +		 * Flow control is disabled by software override or autoneg. +		 * The code below will actually disable it in the HW. +		 */ +		break; +	case ixgbe_fc_rx_pause: +		/* +		 * Rx Flow control is enabled and Tx Flow control is +		 * disabled by software override. Since there really +		 * isn't a way to advertise that we are capable of RX +		 * Pause ONLY, we will advertise that we support both +		 * symmetric and asymmetric Rx PAUSE.  Later, we will +		 * disable the adapter's ability to send PAUSE frames. +		 */ +		fctrl_reg |= IXGBE_FCTRL_RFCE; +		break; +	case ixgbe_fc_tx_pause: +		/* +		 * Tx Flow control is enabled, and Rx Flow control is +		 * disabled by software override. +		 */ +		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; +		break; +	case ixgbe_fc_full: +		/* Flow control (both Rx and Tx) is enabled by SW override. */ +		fctrl_reg |= IXGBE_FCTRL_RFCE; +		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; +		break; +#ifdef CONFIG_DCB +	case ixgbe_fc_pfc: +		goto out; +		break; +#endif /* CONFIG_DCB */ +	default: +		hw_dbg(hw, "Flow control param set incorrectly\n"); +		ret_val = IXGBE_ERR_CONFIG; +		goto out; +		break; +	} + +	/* Set 802.3x based flow control settings. */ +	fctrl_reg |= IXGBE_FCTRL_DPF; +	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); +	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); + +	/* Set up and enable Rx high/low water mark thresholds, enable XON. */ +	if (hw->fc.current_mode & ixgbe_fc_tx_pause) { +		rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)); +		rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; + +		reg = (rx_pba_size - hw->fc.low_water) << 6; +		if (hw->fc.send_xon) +			reg |= IXGBE_FCRTL_XONE; + +		IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg); + +		reg = (rx_pba_size - hw->fc.high_water) << 6; +		reg |= IXGBE_FCRTH_FCEN; + +		IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg); +	} + +	/* Configure pause time (2 TCs per register) */ +	reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); +	if ((packetbuf_num & 1) == 0) +		reg = (reg & 0xFFFF0000) | hw->fc.pause_time; +	else +		reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16); +	IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg); + +	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); + +out: +	return ret_val; +} + +/** + *  ixgbe_start_mac_link_82598 - Configures MAC link settings + *  @hw: pointer to hardware structure + * + *  Configures link settings based on values in the ixgbe_hw struct. + *  Restarts the link.  Performs autonegotiation if needed. + **/ +static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, +                                      bool autoneg_wait_to_complete) +{ +	u32 autoc_reg; +	u32 links_reg; +	u32 i; +	s32 status = 0; + +	/* Restart link */ +	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); +	autoc_reg |= IXGBE_AUTOC_AN_RESTART; +	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + +	/* Only poll for autoneg to complete if specified to do so */ +	if (autoneg_wait_to_complete) { +		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == +		     IXGBE_AUTOC_LMS_KX4_AN || +		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) == +		     IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { +			links_reg = 0; /* Just in case Autoneg time = 0 */ +			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { +				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); +				if (links_reg & IXGBE_LINKS_KX_AN_COMP) +					break; +				msleep(100); +			} +			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { +				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; +				hw_dbg(hw, "Autonegotiation did not complete.\n"); +			} +		} +	} + +	/* Add delay to filter out noises during initial link setup */ +	msleep(50); + +	return status; +} + +/** + *  ixgbe_validate_link_ready - Function looks for phy link + *  @hw: pointer to hardware structure + * + *  Function indicates success when phy link is available. If phy is not ready + *  within 5 seconds of MAC indicating link, the function returns error. + **/ +static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) +{ +	u32 timeout; +	u16 an_reg; + +	if (hw->device_id != IXGBE_DEV_ID_82598AT2) +		return 0; + +	for (timeout = 0; +	     timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) { +		hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &an_reg); + +		if ((an_reg & MDIO_AN_STAT1_COMPLETE) && +		    (an_reg & MDIO_STAT1_LSTATUS)) +			break; + +		msleep(100); +	} + +	if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { +		hw_dbg(hw, "Link was indicated but link is down\n"); +		return IXGBE_ERR_LINK_SETUP; +	} + +	return 0; +} + +/** + *  ixgbe_check_mac_link_82598 - Get link/speed status + *  @hw: pointer to hardware structure + *  @speed: pointer to link speed + *  @link_up: true is link is up, false otherwise + *  @link_up_wait_to_complete: bool used to wait for link up or not + * + *  Reads the links register to determine if link is up and the current speed + **/ +static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, +                                      ixgbe_link_speed *speed, bool *link_up, +                                      bool link_up_wait_to_complete) +{ +	u32 links_reg; +	u32 i; +	u16 link_reg, adapt_comp_reg; + +	/* +	 * SERDES PHY requires us to read link status from register 0xC79F. +	 * Bit 0 set indicates link is up/ready; clear indicates link down. +	 * 0xC00C is read to check that the XAUI lanes are active.  Bit 0 +	 * clear indicates active; set indicates inactive. +	 */ +	if (hw->phy.type == ixgbe_phy_nl) { +		hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); +		hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); +		hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD, +		                     &adapt_comp_reg); +		if (link_up_wait_to_complete) { +			for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { +				if ((link_reg & 1) && +				    ((adapt_comp_reg & 1) == 0)) { +					*link_up = true; +					break; +				} else { +					*link_up = false; +				} +				msleep(100); +				hw->phy.ops.read_reg(hw, 0xC79F, +				                     MDIO_MMD_PMAPMD, +				                     &link_reg); +				hw->phy.ops.read_reg(hw, 0xC00C, +				                     MDIO_MMD_PMAPMD, +				                     &adapt_comp_reg); +			} +		} else { +			if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) +				*link_up = true; +			else +				*link_up = false; +		} + +		if (*link_up == false) +			goto out; +	} + +	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); +	if (link_up_wait_to_complete) { +		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { +			if (links_reg & IXGBE_LINKS_UP) { +				*link_up = true; +				break; +			} else { +				*link_up = false; +			} +			msleep(100); +			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); +		} +	} else { +		if (links_reg & IXGBE_LINKS_UP) +			*link_up = true; +		else +			*link_up = false; +	} + +	if (links_reg & IXGBE_LINKS_SPEED) +		*speed = IXGBE_LINK_SPEED_10GB_FULL; +	else +		*speed = IXGBE_LINK_SPEED_1GB_FULL; + +	if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) && +	    (ixgbe_validate_link_ready(hw) != 0)) +		*link_up = false; + +	/* if link is down, zero out the current_mode */ +	if (*link_up == false) { +		hw->fc.current_mode = ixgbe_fc_none; +		hw->fc.fc_was_autonegged = false; +	} +out: +	return 0; +} + +/** + *  ixgbe_setup_mac_link_82598 - Set MAC link speed + *  @hw: pointer to hardware structure + *  @speed: new link speed + *  @autoneg: true if auto-negotiation enabled + *  @autoneg_wait_to_complete: true when waiting for completion is needed + * + *  Set the link speed in the AUTOC register and restarts link. + **/ +static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, +                                           ixgbe_link_speed speed, bool autoneg, +                                           bool autoneg_wait_to_complete) +{ +	s32              status            = 0; +	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; +	u32              curr_autoc        = IXGBE_READ_REG(hw, IXGBE_AUTOC); +	u32              autoc             = curr_autoc; +	u32              link_mode         = autoc & IXGBE_AUTOC_LMS_MASK; + +	/* Check to see if speed passed in is supported. */ +	ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg); +	speed &= link_capabilities; + +	if (speed == IXGBE_LINK_SPEED_UNKNOWN) +		status = IXGBE_ERR_LINK_SETUP; + +	/* Set KX4/KX support according to speed requested */ +	else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || +	         link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { +		autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; +		if (speed & IXGBE_LINK_SPEED_10GB_FULL) +			autoc |= IXGBE_AUTOC_KX4_SUPP; +		if (speed & IXGBE_LINK_SPEED_1GB_FULL) +			autoc |= IXGBE_AUTOC_KX_SUPP; +		if (autoc != curr_autoc) +			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); +	} + +	if (status == 0) { +		/* +		 * Setup and restart the link based on the new values in +		 * ixgbe_hw This will write the AUTOC register based on the new +		 * stored values +		 */ +		status = ixgbe_start_mac_link_82598(hw, +						    autoneg_wait_to_complete); +	} + +	return status; +} + + +/** + *  ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field + *  @hw: pointer to hardware structure + *  @speed: new link speed + *  @autoneg: true if autonegotiation enabled + *  @autoneg_wait_to_complete: true if waiting is needed to complete + * + *  Sets the link speed in the AUTOC register in the MAC and restarts link. + **/ +static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, +                                               ixgbe_link_speed speed, +                                               bool autoneg, +                                               bool autoneg_wait_to_complete) +{ +	s32 status; + +	/* Setup the PHY according to input speed */ +	status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, +	                                      autoneg_wait_to_complete); +	/* Set up MAC */ +	ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); + +	return status; +} + +/** + *  ixgbe_reset_hw_82598 - Performs hardware reset + *  @hw: pointer to hardware structure + * + *  Resets the hardware by resetting the transmit and receive units, masks and + *  clears all interrupts, performing a PHY reset, and performing a link (MAC) + *  reset. + **/ +static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) +{ +	s32 status = 0; +	s32 phy_status = 0; +	u32 ctrl; +	u32 gheccr; +	u32 i; +	u32 autoc; +	u8  analog_val; + +	/* Call adapter stop to disable tx/rx and clear interrupts */ +	hw->mac.ops.stop_adapter(hw); + +	/* +	 * Power up the Atlas Tx lanes if they are currently powered down. +	 * Atlas Tx lanes are powered down for MAC loopback tests, but +	 * they are not automatically restored on reset. +	 */ +	hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); +	if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { +		/* Enable Tx Atlas so packets can be transmitted again */ +		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, +		                             &analog_val); +		analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; +		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, +		                              analog_val); + +		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, +		                             &analog_val); +		analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; +		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, +		                              analog_val); + +		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, +		                             &analog_val); +		analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; +		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, +		                              analog_val); + +		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, +		                             &analog_val); +		analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; +		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, +		                              analog_val); +	} + +	/* Reset PHY */ +	if (hw->phy.reset_disable == false) { +		/* PHY ops must be identified and initialized prior to reset */ + +		/* Init PHY and function pointers, perform SFP setup */ +		phy_status = hw->phy.ops.init(hw); +		if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) +			goto reset_hw_out; +		else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) +			goto no_phy_reset; + +		hw->phy.ops.reset(hw); +	} + +no_phy_reset: +	/* +	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master +	 * access and verify no pending requests before reset +	 */ +	ixgbe_disable_pcie_master(hw); + +mac_reset_top: +	/* +	 * Issue global reset to the MAC.  This needs to be a SW reset. +	 * If link reset is used, it might reset the MAC when mng is using it +	 */ +	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); +	IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); +	IXGBE_WRITE_FLUSH(hw); + +	/* Poll for reset bit to self-clear indicating reset is complete */ +	for (i = 0; i < 10; i++) { +		udelay(1); +		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); +		if (!(ctrl & IXGBE_CTRL_RST)) +			break; +	} +	if (ctrl & IXGBE_CTRL_RST) { +		status = IXGBE_ERR_RESET_FAILED; +		hw_dbg(hw, "Reset polling failed to complete.\n"); +	} + +	/* +	 * Double resets are required for recovery from certain error +	 * conditions.  Between resets, it is necessary to stall to allow time +	 * for any pending HW events to complete.  We use 1usec since that is +	 * what is needed for ixgbe_disable_pcie_master().  The second reset +	 * then clears out any effects of those events. +	 */ +	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { +		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; +		udelay(1); +		goto mac_reset_top; +	} + +	msleep(50); + +	gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); +	gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); +	IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); + +	/* +	 * Store the original AUTOC value if it has not been +	 * stored off yet.  Otherwise restore the stored original +	 * AUTOC value since the reset operation sets back to deaults. +	 */ +	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); +	if (hw->mac.orig_link_settings_stored == false) { +		hw->mac.orig_autoc = autoc; +		hw->mac.orig_link_settings_stored = true; +	} else if (autoc != hw->mac.orig_autoc) { +		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); +	} + +	/* Store the permanent mac address */ +	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + +	/* +	 * Store MAC address from RAR0, clear receive address registers, and +	 * clear the multicast table +	 */ +	hw->mac.ops.init_rx_addrs(hw); + +reset_hw_out: +	if (phy_status) +		status = phy_status; + +	return status; +} + +/** + *  ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address + *  @hw: pointer to hardware struct + *  @rar: receive address register index to associate with a VMDq index + *  @vmdq: VMDq set index + **/ +static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ +	u32 rar_high; +	u32 rar_entries = hw->mac.num_rar_entries; + +	/* Make sure we are using a valid rar index range */ +	if (rar >= rar_entries) { +		hw_dbg(hw, "RAR index %d is out of range.\n", rar); +		return IXGBE_ERR_INVALID_ARGUMENT; +	} + +	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); +	rar_high &= ~IXGBE_RAH_VIND_MASK; +	rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); +	IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); +	return 0; +} + +/** + *  ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address + *  @hw: pointer to hardware struct + *  @rar: receive address register index to associate with a VMDq index + *  @vmdq: VMDq clear index (not used in 82598, but elsewhere) + **/ +static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ +	u32 rar_high; +	u32 rar_entries = hw->mac.num_rar_entries; + + +	/* Make sure we are using a valid rar index range */ +	if (rar >= rar_entries) { +		hw_dbg(hw, "RAR index %d is out of range.\n", rar); +		return IXGBE_ERR_INVALID_ARGUMENT; +	} + +	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); +	if (rar_high & IXGBE_RAH_VIND_MASK) { +		rar_high &= ~IXGBE_RAH_VIND_MASK; +		IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); +	} + +	return 0; +} + +/** + *  ixgbe_set_vfta_82598 - Set VLAN filter table + *  @hw: pointer to hardware structure + *  @vlan: VLAN id to write to VLAN filter + *  @vind: VMDq output index that maps queue to VLAN id in VFTA + *  @vlan_on: boolean flag to turn on/off VLAN in VFTA + * + *  Turn on/off specified VLAN in the VLAN filter table. + **/ +static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, +				bool vlan_on) +{ +	u32 regindex; +	u32 bitindex; +	u32 bits; +	u32 vftabyte; + +	if (vlan > 4095) +		return IXGBE_ERR_PARAM; + +	/* Determine 32-bit word position in array */ +	regindex = (vlan >> 5) & 0x7F;   /* upper seven bits */ + +	/* Determine the location of the (VMD) queue index */ +	vftabyte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ +	bitindex = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */ + +	/* Set the nibble for VMD queue index */ +	bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); +	bits &= (~(0x0F << bitindex)); +	bits |= (vind << bitindex); +	IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); + +	/* Determine the location of the bit for this VLAN id */ +	bitindex = vlan & 0x1F;   /* lower five bits */ + +	bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); +	if (vlan_on) +		/* Turn on this VLAN id */ +		bits |= (1 << bitindex); +	else +		/* Turn off this VLAN id */ +		bits &= ~(1 << bitindex); +	IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); + +	return 0; +} + +/** + *  ixgbe_clear_vfta_82598 - Clear VLAN filter table + *  @hw: pointer to hardware structure + * + *  Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) +{ +	u32 offset; +	u32 vlanbyte; + +	for (offset = 0; offset < hw->mac.vft_size; offset++) +		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); + +	for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) +		for (offset = 0; offset < hw->mac.vft_size; offset++) +			IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), +			                0); + +	return 0; +} + +/** + *  ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register + *  @hw: pointer to hardware structure + *  @reg: analog register to read + *  @val: read value + * + *  Performs read operation to Atlas analog register specified. + **/ +static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) +{ +	u32  atlas_ctl; + +	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, +	                IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); +	IXGBE_WRITE_FLUSH(hw); +	udelay(10); +	atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); +	*val = (u8)atlas_ctl; + +	return 0; +} + +/** + *  ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register + *  @hw: pointer to hardware structure + *  @reg: atlas register to write + *  @val: value to write + * + *  Performs write operation to Atlas analog register specified. + **/ +static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) +{ +	u32  atlas_ctl; + +	atlas_ctl = (reg << 8) | val; +	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); +	IXGBE_WRITE_FLUSH(hw); +	udelay(10); + +	return 0; +} + +/** + *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. + *  @hw: pointer to hardware structure + *  @byte_offset: EEPROM byte offset to read + *  @eeprom_data: value read + * + *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface. + **/ +static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, +				       u8 *eeprom_data) +{ +	s32 status = 0; +	u16 sfp_addr = 0; +	u16 sfp_data = 0; +	u16 sfp_stat = 0; +	u32 i; + +	if (hw->phy.type == ixgbe_phy_nl) { +		/* +		 * phy SDA/SCL registers are at addresses 0xC30A to +		 * 0xC30D.  These registers are used to talk to the SFP+ +		 * module's EEPROM through the SDA/SCL (I2C) interface. +		 */ +		sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset; +		sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); +		hw->phy.ops.write_reg(hw, +		                      IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, +		                      MDIO_MMD_PMAPMD, +		                      sfp_addr); + +		/* Poll status */ +		for (i = 0; i < 100; i++) { +			hw->phy.ops.read_reg(hw, +			                     IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, +			                     MDIO_MMD_PMAPMD, +			                     &sfp_stat); +			sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; +			if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) +				break; +			usleep_range(10000, 20000); +		} + +		if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { +			hw_dbg(hw, "EEPROM read did not pass.\n"); +			status = IXGBE_ERR_SFP_NOT_PRESENT; +			goto out; +		} + +		/* Read data */ +		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, +		                     MDIO_MMD_PMAPMD, &sfp_data); + +		*eeprom_data = (u8)(sfp_data >> 8); +	} else { +		status = IXGBE_ERR_PHY; +		goto out; +	} + +out: +	return status; +} + +/** + *  ixgbe_get_supported_physical_layer_82598 - Returns physical layer type + *  @hw: pointer to hardware structure + * + *  Determines physical layer capabilities of the current configuration. + **/ +static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) +{ +	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; +	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); +	u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; +	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; +	u16 ext_ability = 0; + +	hw->phy.ops.identify(hw); + +	/* Copper PHY must be checked before AUTOC LMS to determine correct +	 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ +	switch (hw->phy.type) { +	case ixgbe_phy_tn: +	case ixgbe_phy_aq: +	case ixgbe_phy_cu_unknown: +		hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, +		MDIO_MMD_PMAPMD, &ext_ability); +		if (ext_ability & MDIO_PMA_EXTABLE_10GBT) +			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; +		if (ext_ability & MDIO_PMA_EXTABLE_1000BT) +			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; +		if (ext_ability & MDIO_PMA_EXTABLE_100BTX) +			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; +		goto out; +	default: +		break; +	} + +	switch (autoc & IXGBE_AUTOC_LMS_MASK) { +	case IXGBE_AUTOC_LMS_1G_AN: +	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: +		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX) +			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; +		else +			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX; +		break; +	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: +		if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4) +			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; +		else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4) +			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; +		else /* XAUI */ +			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; +		break; +	case IXGBE_AUTOC_LMS_KX4_AN: +	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: +		if (autoc & IXGBE_AUTOC_KX_SUPP) +			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; +		if (autoc & IXGBE_AUTOC_KX4_SUPP) +			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; +		break; +	default: +		break; +	} + +	if (hw->phy.type == ixgbe_phy_nl) { +		hw->phy.ops.identify_sfp(hw); + +		switch (hw->phy.sfp_type) { +		case ixgbe_sfp_type_da_cu: +			physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; +			break; +		case ixgbe_sfp_type_sr: +			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; +			break; +		case ixgbe_sfp_type_lr: +			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; +			break; +		default: +			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; +			break; +		} +	} + +	switch (hw->device_id) { +	case IXGBE_DEV_ID_82598_DA_DUAL_PORT: +		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; +		break; +	case IXGBE_DEV_ID_82598AF_DUAL_PORT: +	case IXGBE_DEV_ID_82598AF_SINGLE_PORT: +	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: +		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; +		break; +	case IXGBE_DEV_ID_82598EB_XF_LR: +		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; +		break; +	default: +		break; +	} + +out: +	return physical_layer; +} + +/** + *  ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple + *  port devices. + *  @hw: pointer to the HW structure + * + *  Calls common function and corrects issue with some single port devices + *  that enable LAN1 but not LAN0. + **/ +static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) +{ +	struct ixgbe_bus_info *bus = &hw->bus; +	u16 pci_gen = 0; +	u16 pci_ctrl2 = 0; + +	ixgbe_set_lan_id_multi_port_pcie(hw); + +	/* check if LAN0 is disabled */ +	hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen); +	if ((pci_gen != 0) && (pci_gen != 0xFFFF)) { + +		hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2); + +		/* if LAN0 is completely disabled force function to 0 */ +		if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) && +		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) && +		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) { + +			bus->func = 0; +		} +	} +} + +/** + * ixgbe_set_rxpba_82598 - Configure packet buffers + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure packet buffers. + */ +static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom, +				  int strategy) +{ +	u32 rxpktsize = IXGBE_RXPBSIZE_64KB; +	u8  i = 0; + +	if (!num_pb) +		return; + +	/* Setup Rx packet buffer sizes */ +	switch (strategy) { +	case PBA_STRATEGY_WEIGHTED: +		/* Setup the first four at 80KB */ +		rxpktsize = IXGBE_RXPBSIZE_80KB; +		for (; i < 4; i++) +			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); +		/* Setup the last four at 48KB...don't re-init i */ +		rxpktsize = IXGBE_RXPBSIZE_48KB; +		/* Fall Through */ +	case PBA_STRATEGY_EQUAL: +	default: +		/* Divide the remaining Rx packet buffer evenly among the TCs */ +		for (; i < IXGBE_MAX_PACKET_BUFFERS; i++) +			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); +		break; +	} + +	/* Setup Tx packet buffer sizes */ +	for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) +		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); + +	return; +} + +static struct ixgbe_mac_operations mac_ops_82598 = { +	.init_hw		= &ixgbe_init_hw_generic, +	.reset_hw		= &ixgbe_reset_hw_82598, +	.start_hw		= &ixgbe_start_hw_82598, +	.clear_hw_cntrs		= &ixgbe_clear_hw_cntrs_generic, +	.get_media_type		= &ixgbe_get_media_type_82598, +	.get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598, +	.enable_rx_dma          = &ixgbe_enable_rx_dma_generic, +	.get_mac_addr		= &ixgbe_get_mac_addr_generic, +	.stop_adapter		= &ixgbe_stop_adapter_generic, +	.get_bus_info           = &ixgbe_get_bus_info_generic, +	.set_lan_id             = &ixgbe_set_lan_id_multi_port_pcie_82598, +	.read_analog_reg8	= &ixgbe_read_analog_reg8_82598, +	.write_analog_reg8	= &ixgbe_write_analog_reg8_82598, +	.setup_link		= &ixgbe_setup_mac_link_82598, +	.set_rxpba		= &ixgbe_set_rxpba_82598, +	.check_link		= &ixgbe_check_mac_link_82598, +	.get_link_capabilities	= &ixgbe_get_link_capabilities_82598, +	.led_on			= &ixgbe_led_on_generic, +	.led_off		= &ixgbe_led_off_generic, +	.blink_led_start	= &ixgbe_blink_led_start_generic, +	.blink_led_stop		= &ixgbe_blink_led_stop_generic, +	.set_rar		= &ixgbe_set_rar_generic, +	.clear_rar		= &ixgbe_clear_rar_generic, +	.set_vmdq		= &ixgbe_set_vmdq_82598, +	.clear_vmdq		= &ixgbe_clear_vmdq_82598, +	.init_rx_addrs		= &ixgbe_init_rx_addrs_generic, +	.update_mc_addr_list	= &ixgbe_update_mc_addr_list_generic, +	.enable_mc		= &ixgbe_enable_mc_generic, +	.disable_mc		= &ixgbe_disable_mc_generic, +	.clear_vfta		= &ixgbe_clear_vfta_82598, +	.set_vfta		= &ixgbe_set_vfta_82598, +	.fc_enable		= &ixgbe_fc_enable_82598, +	.set_fw_drv_ver         = NULL, +	.acquire_swfw_sync      = &ixgbe_acquire_swfw_sync, +	.release_swfw_sync      = &ixgbe_release_swfw_sync, +}; + +static struct ixgbe_eeprom_operations eeprom_ops_82598 = { +	.init_params		= &ixgbe_init_eeprom_params_generic, +	.read			= &ixgbe_read_eerd_generic, +	.read_buffer		= &ixgbe_read_eerd_buffer_generic, +	.calc_checksum          = &ixgbe_calc_eeprom_checksum_generic, +	.validate_checksum	= &ixgbe_validate_eeprom_checksum_generic, +	.update_checksum	= &ixgbe_update_eeprom_checksum_generic, +}; + +static struct ixgbe_phy_operations phy_ops_82598 = { +	.identify		= &ixgbe_identify_phy_generic, +	.identify_sfp		= &ixgbe_identify_sfp_module_generic, +	.init			= &ixgbe_init_phy_ops_82598, +	.reset			= &ixgbe_reset_phy_generic, +	.read_reg		= &ixgbe_read_phy_reg_generic, +	.write_reg		= &ixgbe_write_phy_reg_generic, +	.setup_link		= &ixgbe_setup_phy_link_generic, +	.setup_link_speed	= &ixgbe_setup_phy_link_speed_generic, +	.read_i2c_eeprom	= &ixgbe_read_i2c_eeprom_82598, +	.check_overtemp   = &ixgbe_tn_check_overtemp, +}; + +struct ixgbe_info ixgbe_82598_info = { +	.mac			= ixgbe_mac_82598EB, +	.get_invariants		= &ixgbe_get_invariants_82598, +	.mac_ops		= &mac_ops_82598, +	.eeprom_ops		= &eeprom_ops_82598, +	.phy_ops		= &phy_ops_82598, +}; + diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c new file mode 100644 index 00000000000..34f30ec79c2 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -0,0 +1,2263 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/sched.h> + +#include "ixgbe.h" +#include "ixgbe_phy.h" +#include "ixgbe_mbx.h" + +#define IXGBE_82599_MAX_TX_QUEUES 128 +#define IXGBE_82599_MAX_RX_QUEUES 128 +#define IXGBE_82599_RAR_ENTRIES   128 +#define IXGBE_82599_MC_TBL_SIZE   128 +#define IXGBE_82599_VFT_TBL_SIZE  128 +#define IXGBE_82599_RX_PB_SIZE	  512 + +static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, +						 ixgbe_link_speed speed, +						 bool autoneg, +						 bool autoneg_wait_to_complete); +static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, +                                           ixgbe_link_speed speed, +                                           bool autoneg, +                                           bool autoneg_wait_to_complete); +static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, +				      bool autoneg_wait_to_complete); +static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, +                               ixgbe_link_speed speed, +                               bool autoneg, +                               bool autoneg_wait_to_complete); +static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, +                                         ixgbe_link_speed speed, +                                         bool autoneg, +                                         bool autoneg_wait_to_complete); +static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); +static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); + +static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) +{ +	struct ixgbe_mac_info *mac = &hw->mac; + +	/* enable the laser control functions for SFP+ fiber */ +	if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) { +		mac->ops.disable_tx_laser = +		                       &ixgbe_disable_tx_laser_multispeed_fiber; +		mac->ops.enable_tx_laser = +		                        &ixgbe_enable_tx_laser_multispeed_fiber; +		mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; +	} else { +		mac->ops.disable_tx_laser = NULL; +		mac->ops.enable_tx_laser = NULL; +		mac->ops.flap_tx_laser = NULL; +	} + +	if (hw->phy.multispeed_fiber) { +		/* Set up dual speed SFP+ support */ +		mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; +	} else { +		if ((mac->ops.get_media_type(hw) == +		     ixgbe_media_type_backplane) && +		    (hw->phy.smart_speed == ixgbe_smart_speed_auto || +		     hw->phy.smart_speed == ixgbe_smart_speed_on) && +		     !ixgbe_verify_lesm_fw_enabled_82599(hw)) +			mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; +		else +			mac->ops.setup_link = &ixgbe_setup_mac_link_82599; +	} +} + +static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) +{ +	s32 ret_val = 0; +	u32 reg_anlp1 = 0; +	u32 i = 0; +	u16 list_offset, data_offset, data_value; + +	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { +		ixgbe_init_mac_link_ops_82599(hw); + +		hw->phy.ops.reset = NULL; + +		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, +		                                              &data_offset); +		if (ret_val != 0) +			goto setup_sfp_out; + +		/* PHY config will finish before releasing the semaphore */ +		ret_val = hw->mac.ops.acquire_swfw_sync(hw, +		                                        IXGBE_GSSR_MAC_CSR_SM); +		if (ret_val != 0) { +			ret_val = IXGBE_ERR_SWFW_SYNC; +			goto setup_sfp_out; +		} + +		hw->eeprom.ops.read(hw, ++data_offset, &data_value); +		while (data_value != 0xffff) { +			IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); +			IXGBE_WRITE_FLUSH(hw); +			hw->eeprom.ops.read(hw, ++data_offset, &data_value); +		} + +		/* Release the semaphore */ +		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); +		/* +		 * Delay obtaining semaphore again to allow FW access, +		 * semaphore_delay is in ms usleep_range needs us. +		 */ +		usleep_range(hw->eeprom.semaphore_delay * 1000, +			     hw->eeprom.semaphore_delay * 2000); + +		/* Now restart DSP by setting Restart_AN and clearing LMS */ +		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw, +		                IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) | +		                IXGBE_AUTOC_AN_RESTART)); + +		/* Wait for AN to leave state 0 */ +		for (i = 0; i < 10; i++) { +			usleep_range(4000, 8000); +			reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1); +			if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK) +				break; +		} +		if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) { +			hw_dbg(hw, "sfp module setup not complete\n"); +			ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; +			goto setup_sfp_out; +		} + +		/* Restart DSP by setting Restart_AN and return to SFI mode */ +		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw, +		                IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL | +		                IXGBE_AUTOC_AN_RESTART)); +	} + +setup_sfp_out: +	return ret_val; +} + +static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) +{ +	struct ixgbe_mac_info *mac = &hw->mac; + +	ixgbe_init_mac_link_ops_82599(hw); + +	mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; +	mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; +	mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; +	mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; +	mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; +	mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + +	return 0; +} + +/** + *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init + *  @hw: pointer to hardware structure + * + *  Initialize any function pointers that were not able to be + *  set during get_invariants because the PHY/SFP type was + *  not known.  Perform the SFP init if necessary. + * + **/ +static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) +{ +	struct ixgbe_mac_info *mac = &hw->mac; +	struct ixgbe_phy_info *phy = &hw->phy; +	s32 ret_val = 0; + +	/* Identify the PHY or SFP module */ +	ret_val = phy->ops.identify(hw); + +	/* Setup function pointers based on detected SFP module and speeds */ +	ixgbe_init_mac_link_ops_82599(hw); + +	/* If copper media, overwrite with copper function pointers */ +	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { +		mac->ops.setup_link = &ixgbe_setup_copper_link_82599; +		mac->ops.get_link_capabilities = +			&ixgbe_get_copper_link_capabilities_generic; +	} + +	/* Set necessary function pointers based on phy type */ +	switch (hw->phy.type) { +	case ixgbe_phy_tn: +		phy->ops.check_link = &ixgbe_check_phy_link_tnx; +		phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; +		phy->ops.get_firmware_version = +		             &ixgbe_get_phy_firmware_version_tnx; +		break; +	case ixgbe_phy_aq: +		phy->ops.get_firmware_version = +			&ixgbe_get_phy_firmware_version_generic; +		break; +	default: +		break; +	} + +	return ret_val; +} + +/** + *  ixgbe_get_link_capabilities_82599 - Determines link capabilities + *  @hw: pointer to hardware structure + *  @speed: pointer to link speed + *  @negotiation: true when autoneg or autotry is enabled + * + *  Determines the link capabilities by reading the AUTOC register. + **/ +static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, +                                             ixgbe_link_speed *speed, +                                             bool *negotiation) +{ +	s32 status = 0; +	u32 autoc = 0; + +	/* Determine 1G link capabilities off of SFP+ type */ +	if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || +	    hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) { +		*speed = IXGBE_LINK_SPEED_1GB_FULL; +		*negotiation = true; +		goto out; +	} + +	/* +	 * Determine link capabilities based on the stored value of AUTOC, +	 * which represents EEPROM defaults.  If AUTOC value has not been +	 * stored, use the current register value. +	 */ +	if (hw->mac.orig_link_settings_stored) +		autoc = hw->mac.orig_autoc; +	else +		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + +	switch (autoc & IXGBE_AUTOC_LMS_MASK) { +	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: +		*speed = IXGBE_LINK_SPEED_1GB_FULL; +		*negotiation = false; +		break; + +	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: +		*speed = IXGBE_LINK_SPEED_10GB_FULL; +		*negotiation = false; +		break; + +	case IXGBE_AUTOC_LMS_1G_AN: +		*speed = IXGBE_LINK_SPEED_1GB_FULL; +		*negotiation = true; +		break; + +	case IXGBE_AUTOC_LMS_10G_SERIAL: +		*speed = IXGBE_LINK_SPEED_10GB_FULL; +		*negotiation = false; +		break; + +	case IXGBE_AUTOC_LMS_KX4_KX_KR: +	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: +		*speed = IXGBE_LINK_SPEED_UNKNOWN; +		if (autoc & IXGBE_AUTOC_KR_SUPP) +			*speed |= IXGBE_LINK_SPEED_10GB_FULL; +		if (autoc & IXGBE_AUTOC_KX4_SUPP) +			*speed |= IXGBE_LINK_SPEED_10GB_FULL; +		if (autoc & IXGBE_AUTOC_KX_SUPP) +			*speed |= IXGBE_LINK_SPEED_1GB_FULL; +		*negotiation = true; +		break; + +	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: +		*speed = IXGBE_LINK_SPEED_100_FULL; +		if (autoc & IXGBE_AUTOC_KR_SUPP) +			*speed |= IXGBE_LINK_SPEED_10GB_FULL; +		if (autoc & IXGBE_AUTOC_KX4_SUPP) +			*speed |= IXGBE_LINK_SPEED_10GB_FULL; +		if (autoc & IXGBE_AUTOC_KX_SUPP) +			*speed |= IXGBE_LINK_SPEED_1GB_FULL; +		*negotiation = true; +		break; + +	case IXGBE_AUTOC_LMS_SGMII_1G_100M: +		*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; +		*negotiation = false; +		break; + +	default: +		status = IXGBE_ERR_LINK_SETUP; +		goto out; +		break; +	} + +	if (hw->phy.multispeed_fiber) { +		*speed |= IXGBE_LINK_SPEED_10GB_FULL | +		          IXGBE_LINK_SPEED_1GB_FULL; +		*negotiation = true; +	} + +out: +	return status; +} + +/** + *  ixgbe_get_media_type_82599 - Get media type + *  @hw: pointer to hardware structure + * + *  Returns the media type (fiber, copper, backplane) + **/ +static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) +{ +	enum ixgbe_media_type media_type; + +	/* Detect if there is a copper PHY attached. */ +	switch (hw->phy.type) { +	case ixgbe_phy_cu_unknown: +	case ixgbe_phy_tn: +	case ixgbe_phy_aq: +		media_type = ixgbe_media_type_copper; +		goto out; +	default: +		break; +	} + +	switch (hw->device_id) { +	case IXGBE_DEV_ID_82599_KX4: +	case IXGBE_DEV_ID_82599_KX4_MEZZ: +	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: +	case IXGBE_DEV_ID_82599_KR: +	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: +	case IXGBE_DEV_ID_82599_XAUI_LOM: +		/* Default device ID is mezzanine card KX/KX4 */ +		media_type = ixgbe_media_type_backplane; +		break; +	case IXGBE_DEV_ID_82599_SFP: +	case IXGBE_DEV_ID_82599_SFP_FCOE: +	case IXGBE_DEV_ID_82599_SFP_EM: +	case IXGBE_DEV_ID_82599_SFP_SF2: +		media_type = ixgbe_media_type_fiber; +		break; +	case IXGBE_DEV_ID_82599_CX4: +		media_type = ixgbe_media_type_cx4; +		break; +	case IXGBE_DEV_ID_82599_T3_LOM: +		media_type = ixgbe_media_type_copper; +		break; +	case IXGBE_DEV_ID_82599_LS: +		media_type = ixgbe_media_type_fiber_lco; +		break; +	default: +		media_type = ixgbe_media_type_unknown; +		break; +	} +out: +	return media_type; +} + +/** + *  ixgbe_start_mac_link_82599 - Setup MAC link settings + *  @hw: pointer to hardware structure + *  @autoneg_wait_to_complete: true when waiting for completion is needed + * + *  Configures link settings based on values in the ixgbe_hw struct. + *  Restarts the link.  Performs autonegotiation if needed. + **/ +static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, +                               bool autoneg_wait_to_complete) +{ +	u32 autoc_reg; +	u32 links_reg; +	u32 i; +	s32 status = 0; + +	/* Restart link */ +	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); +	autoc_reg |= IXGBE_AUTOC_AN_RESTART; +	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + +	/* Only poll for autoneg to complete if specified to do so */ +	if (autoneg_wait_to_complete) { +		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == +		     IXGBE_AUTOC_LMS_KX4_KX_KR || +		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) == +		     IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || +		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) == +		     IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { +			links_reg = 0; /* Just in case Autoneg time = 0 */ +			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { +				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); +				if (links_reg & IXGBE_LINKS_KX_AN_COMP) +					break; +				msleep(100); +			} +			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { +				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; +				hw_dbg(hw, "Autoneg did not complete.\n"); +			} +		} +	} + +	/* Add delay to filter out noises during initial link setup */ +	msleep(50); + +	return status; +} + +/** + *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser + *  @hw: pointer to hardware structure + * + *  The base drivers may require better control over SFP+ module + *  PHY states.  This includes selectively shutting down the Tx + *  laser on the PHY, effectively halting physical link. + **/ +static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +{ +	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + +	/* Disable tx laser; allow 100us to go dark per spec */ +	esdp_reg |= IXGBE_ESDP_SDP3; +	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); +	IXGBE_WRITE_FLUSH(hw); +	udelay(100); +} + +/** + *  ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser + *  @hw: pointer to hardware structure + * + *  The base drivers may require better control over SFP+ module + *  PHY states.  This includes selectively turning on the Tx + *  laser on the PHY, effectively starting physical link. + **/ +static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +{ +	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + +	/* Enable tx laser; allow 100ms to light up */ +	esdp_reg &= ~IXGBE_ESDP_SDP3; +	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); +	IXGBE_WRITE_FLUSH(hw); +	msleep(100); +} + +/** + *  ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser + *  @hw: pointer to hardware structure + * + *  When the driver changes the link speeds that it can support, + *  it sets autotry_restart to true to indicate that we need to + *  initiate a new autotry session with the link partner.  To do + *  so, we set the speed then disable and re-enable the tx laser, to + *  alert the link partner that it also needs to restart autotry on its + *  end.  This is consistent with true clause 37 autoneg, which also + *  involves a loss of signal. + **/ +static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +{ +	if (hw->mac.autotry_restart) { +		ixgbe_disable_tx_laser_multispeed_fiber(hw); +		ixgbe_enable_tx_laser_multispeed_fiber(hw); +		hw->mac.autotry_restart = false; +	} +} + +/** + *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed + *  @hw: pointer to hardware structure + *  @speed: new link speed + *  @autoneg: true if autonegotiation enabled + *  @autoneg_wait_to_complete: true when waiting for completion is needed + * + *  Set the link speed in the AUTOC register and restarts link. + **/ +static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, +                                          ixgbe_link_speed speed, +                                          bool autoneg, +                                          bool autoneg_wait_to_complete) +{ +	s32 status = 0; +	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; +	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; +	u32 speedcnt = 0; +	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); +	u32 i = 0; +	bool link_up = false; +	bool negotiation; + +	/* Mask off requested but non-supported speeds */ +	status = hw->mac.ops.get_link_capabilities(hw, &link_speed, +						   &negotiation); +	if (status != 0) +		return status; + +	speed &= link_speed; + +	/* +	 * Try each speed one by one, highest priority first.  We do this in +	 * software because 10gb fiber doesn't support speed autonegotiation. +	 */ +	if (speed & IXGBE_LINK_SPEED_10GB_FULL) { +		speedcnt++; +		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; + +		/* If we already have link at this speed, just jump out */ +		status = hw->mac.ops.check_link(hw, &link_speed, &link_up, +						false); +		if (status != 0) +			return status; + +		if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) +			goto out; + +		/* Set the module link speed */ +		esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); +		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); +		IXGBE_WRITE_FLUSH(hw); + +		/* Allow module to change analog characteristics (1G->10G) */ +		msleep(40); + +		status = ixgbe_setup_mac_link_82599(hw, +						    IXGBE_LINK_SPEED_10GB_FULL, +						    autoneg, +						    autoneg_wait_to_complete); +		if (status != 0) +			return status; + +		/* Flap the tx laser if it has not already been done */ +		hw->mac.ops.flap_tx_laser(hw); + +		/* +		 * Wait for the controller to acquire link.  Per IEEE 802.3ap, +		 * Section 73.10.2, we may have to wait up to 500ms if KR is +		 * attempted.  82599 uses the same timing for 10g SFI. +		 */ +		for (i = 0; i < 5; i++) { +			/* Wait for the link partner to also set speed */ +			msleep(100); + +			/* If we have link, just jump out */ +			status = hw->mac.ops.check_link(hw, &link_speed, +							&link_up, false); +			if (status != 0) +				return status; + +			if (link_up) +				goto out; +		} +	} + +	if (speed & IXGBE_LINK_SPEED_1GB_FULL) { +		speedcnt++; +		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) +			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; + +		/* If we already have link at this speed, just jump out */ +		status = hw->mac.ops.check_link(hw, &link_speed, &link_up, +						false); +		if (status != 0) +			return status; + +		if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) +			goto out; + +		/* Set the module link speed */ +		esdp_reg &= ~IXGBE_ESDP_SDP5; +		esdp_reg |= IXGBE_ESDP_SDP5_DIR; +		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); +		IXGBE_WRITE_FLUSH(hw); + +		/* Allow module to change analog characteristics (10G->1G) */ +		msleep(40); + +		status = ixgbe_setup_mac_link_82599(hw, +						    IXGBE_LINK_SPEED_1GB_FULL, +						    autoneg, +						    autoneg_wait_to_complete); +		if (status != 0) +			return status; + +		/* Flap the tx laser if it has not already been done */ +		hw->mac.ops.flap_tx_laser(hw); + +		/* Wait for the link partner to also set speed */ +		msleep(100); + +		/* If we have link, just jump out */ +		status = hw->mac.ops.check_link(hw, &link_speed, &link_up, +						false); +		if (status != 0) +			return status; + +		if (link_up) +			goto out; +	} + +	/* +	 * We didn't get link.  Configure back to the highest speed we tried, +	 * (if there was more than one).  We call ourselves back with just the +	 * single highest speed that the user requested. +	 */ +	if (speedcnt > 1) +		status = ixgbe_setup_mac_link_multispeed_fiber(hw, +		                                               highest_link_speed, +		                                               autoneg, +		                                               autoneg_wait_to_complete); + +out: +	/* Set autoneg_advertised value based on input link speed */ +	hw->phy.autoneg_advertised = 0; + +	if (speed & IXGBE_LINK_SPEED_10GB_FULL) +		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + +	if (speed & IXGBE_LINK_SPEED_1GB_FULL) +		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + +	return status; +} + +/** + *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed + *  @hw: pointer to hardware structure + *  @speed: new link speed + *  @autoneg: true if autonegotiation enabled + *  @autoneg_wait_to_complete: true when waiting for completion is needed + * + *  Implements the Intel SmartSpeed algorithm. + **/ +static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, +				     ixgbe_link_speed speed, bool autoneg, +				     bool autoneg_wait_to_complete) +{ +	s32 status = 0; +	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; +	s32 i, j; +	bool link_up = false; +	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + +	 /* Set autoneg_advertised value based on input link speed */ +	hw->phy.autoneg_advertised = 0; + +	if (speed & IXGBE_LINK_SPEED_10GB_FULL) +		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + +	if (speed & IXGBE_LINK_SPEED_1GB_FULL) +		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + +	if (speed & IXGBE_LINK_SPEED_100_FULL) +		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; + +	/* +	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the +	 * autoneg advertisement if link is unable to be established at the +	 * highest negotiated rate.  This can sometimes happen due to integrity +	 * issues with the physical media connection. +	 */ + +	/* First, try to get link with full advertisement */ +	hw->phy.smart_speed_active = false; +	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { +		status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, +						    autoneg_wait_to_complete); +		if (status != 0) +			goto out; + +		/* +		 * Wait for the controller to acquire link.  Per IEEE 802.3ap, +		 * Section 73.10.2, we may have to wait up to 500ms if KR is +		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per +		 * Table 9 in the AN MAS. +		 */ +		for (i = 0; i < 5; i++) { +			mdelay(100); + +			/* If we have link, just jump out */ +			status = hw->mac.ops.check_link(hw, &link_speed, +							&link_up, false); +			if (status != 0) +				goto out; + +			if (link_up) +				goto out; +		} +	} + +	/* +	 * We didn't get link.  If we advertised KR plus one of KX4/KX +	 * (or BX4/BX), then disable KR and try again. +	 */ +	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || +	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) +		goto out; + +	/* Turn SmartSpeed on to disable KR support */ +	hw->phy.smart_speed_active = true; +	status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, +					    autoneg_wait_to_complete); +	if (status != 0) +		goto out; + +	/* +	 * Wait for the controller to acquire link.  600ms will allow for +	 * the AN link_fail_inhibit_timer as well for multiple cycles of +	 * parallel detect, both 10g and 1g. This allows for the maximum +	 * connect attempts as defined in the AN MAS table 73-7. +	 */ +	for (i = 0; i < 6; i++) { +		mdelay(100); + +		/* If we have link, just jump out */ +		status = hw->mac.ops.check_link(hw, &link_speed, +						&link_up, false); +		if (status != 0) +			goto out; + +		if (link_up) +			goto out; +	} + +	/* We didn't get link.  Turn SmartSpeed back off. */ +	hw->phy.smart_speed_active = false; +	status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, +					    autoneg_wait_to_complete); + +out: +	if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) +		hw_dbg(hw, "Smartspeed has downgraded the link speed from " +		       "the maximum advertised\n"); +	return status; +} + +/** + *  ixgbe_setup_mac_link_82599 - Set MAC link speed + *  @hw: pointer to hardware structure + *  @speed: new link speed + *  @autoneg: true if autonegotiation enabled + *  @autoneg_wait_to_complete: true when waiting for completion is needed + * + *  Set the link speed in the AUTOC register and restarts link. + **/ +static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, +                               ixgbe_link_speed speed, bool autoneg, +                               bool autoneg_wait_to_complete) +{ +	s32 status = 0; +	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); +	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); +	u32 start_autoc = autoc; +	u32 orig_autoc = 0; +	u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; +	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; +	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; +	u32 links_reg; +	u32 i; +	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; + +	/* Check to see if speed passed in is supported. */ +	hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg); +	if (status != 0) +		goto out; + +	speed &= link_capabilities; + +	if (speed == IXGBE_LINK_SPEED_UNKNOWN) { +		status = IXGBE_ERR_LINK_SETUP; +		goto out; +	} + +	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ +	if (hw->mac.orig_link_settings_stored) +		orig_autoc = hw->mac.orig_autoc; +	else +		orig_autoc = autoc; + +	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || +	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || +	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { +		/* Set KX4/KX/KR support according to speed requested */ +		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); +		if (speed & IXGBE_LINK_SPEED_10GB_FULL) +			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) +				autoc |= IXGBE_AUTOC_KX4_SUPP; +			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && +			    (hw->phy.smart_speed_active == false)) +				autoc |= IXGBE_AUTOC_KR_SUPP; +		if (speed & IXGBE_LINK_SPEED_1GB_FULL) +			autoc |= IXGBE_AUTOC_KX_SUPP; +	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && +	           (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || +	            link_mode == IXGBE_AUTOC_LMS_1G_AN)) { +		/* Switch from 1G SFI to 10G SFI if requested */ +		if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && +		    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { +			autoc &= ~IXGBE_AUTOC_LMS_MASK; +			autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; +		} +	} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && +	           (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { +		/* Switch from 10G SFI to 1G SFI if requested */ +		if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && +		    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { +			autoc &= ~IXGBE_AUTOC_LMS_MASK; +			if (autoneg) +				autoc |= IXGBE_AUTOC_LMS_1G_AN; +			else +				autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; +		} +	} + +	if (autoc != start_autoc) { +		/* Restart link */ +		autoc |= IXGBE_AUTOC_AN_RESTART; +		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); + +		/* Only poll for autoneg to complete if specified to do so */ +		if (autoneg_wait_to_complete) { +			if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || +			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || +			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { +				links_reg = 0; /*Just in case Autoneg time=0*/ +				for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { +					links_reg = +					       IXGBE_READ_REG(hw, IXGBE_LINKS); +					if (links_reg & IXGBE_LINKS_KX_AN_COMP) +						break; +					msleep(100); +				} +				if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { +					status = +					        IXGBE_ERR_AUTONEG_NOT_COMPLETE; +					hw_dbg(hw, "Autoneg did not " +					       "complete.\n"); +				} +			} +		} + +		/* Add delay to filter out noises during initial link setup */ +		msleep(50); +	} + +out: +	return status; +} + +/** + *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field + *  @hw: pointer to hardware structure + *  @speed: new link speed + *  @autoneg: true if autonegotiation enabled + *  @autoneg_wait_to_complete: true if waiting is needed to complete + * + *  Restarts link on PHY and MAC based on settings passed in. + **/ +static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, +                                         ixgbe_link_speed speed, +                                         bool autoneg, +                                         bool autoneg_wait_to_complete) +{ +	s32 status; + +	/* Setup the PHY according to input speed */ +	status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, +	                                      autoneg_wait_to_complete); +	/* Set up MAC */ +	ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); + +	return status; +} + +/** + *  ixgbe_reset_hw_82599 - Perform hardware reset + *  @hw: pointer to hardware structure + * + *  Resets the hardware by resetting the transmit and receive units, masks + *  and clears all interrupts, perform a PHY reset, and perform a link (MAC) + *  reset. + **/ +static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) +{ +	s32 status = 0; +	u32 ctrl; +	u32 i; +	u32 autoc; +	u32 autoc2; + +	/* Call adapter stop to disable tx/rx and clear interrupts */ +	hw->mac.ops.stop_adapter(hw); + +	/* PHY ops must be identified and initialized prior to reset */ + +	/* Identify PHY and related function pointers */ +	status = hw->phy.ops.init(hw); + +	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) +		goto reset_hw_out; + +	/* Setup SFP module if there is one present. */ +	if (hw->phy.sfp_setup_needed) { +		status = hw->mac.ops.setup_sfp(hw); +		hw->phy.sfp_setup_needed = false; +	} + +	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) +		goto reset_hw_out; + +	/* Reset PHY */ +	if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) +		hw->phy.ops.reset(hw); + +	/* +	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master +	 * access and verify no pending requests before reset +	 */ +	ixgbe_disable_pcie_master(hw); + +mac_reset_top: +	/* +	 * Issue global reset to the MAC.  This needs to be a SW reset. +	 * If link reset is used, it might reset the MAC when mng is using it +	 */ +	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); +	IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); +	IXGBE_WRITE_FLUSH(hw); + +	/* Poll for reset bit to self-clear indicating reset is complete */ +	for (i = 0; i < 10; i++) { +		udelay(1); +		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); +		if (!(ctrl & IXGBE_CTRL_RST)) +			break; +	} +	if (ctrl & IXGBE_CTRL_RST) { +		status = IXGBE_ERR_RESET_FAILED; +		hw_dbg(hw, "Reset polling failed to complete.\n"); +	} + +	/* +	 * Double resets are required for recovery from certain error +	 * conditions.  Between resets, it is necessary to stall to allow time +	 * for any pending HW events to complete.  We use 1usec since that is +	 * what is needed for ixgbe_disable_pcie_master().  The second reset +	 * then clears out any effects of those events. +	 */ +	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { +		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; +		udelay(1); +		goto mac_reset_top; +	} + +	msleep(50); + +	/* +	 * Store the original AUTOC/AUTOC2 values if they have not been +	 * stored off yet.  Otherwise restore the stored original +	 * values since the reset operation sets back to defaults. +	 */ +	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); +	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); +	if (hw->mac.orig_link_settings_stored == false) { +		hw->mac.orig_autoc = autoc; +		hw->mac.orig_autoc2 = autoc2; +		hw->mac.orig_link_settings_stored = true; +	} else { +		if (autoc != hw->mac.orig_autoc) +			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | +			                IXGBE_AUTOC_AN_RESTART)); + +		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != +		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { +			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; +			autoc2 |= (hw->mac.orig_autoc2 & +			           IXGBE_AUTOC2_UPPER_MASK); +			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); +		} +	} + +	/* Store the permanent mac address */ +	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + +	/* +	 * Store MAC address from RAR0, clear receive address registers, and +	 * clear the multicast table.  Also reset num_rar_entries to 128, +	 * since we modify this value when programming the SAN MAC address. +	 */ +	hw->mac.num_rar_entries = 128; +	hw->mac.ops.init_rx_addrs(hw); + +	/* Store the permanent SAN mac address */ +	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); + +	/* Add the SAN MAC address to the RAR only if it's a valid address */ +	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { +		hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, +		                    hw->mac.san_addr, 0, IXGBE_RAH_AV); + +		/* Reserve the last RAR for the SAN MAC address */ +		hw->mac.num_rar_entries--; +	} + +	/* Store the alternative WWNN/WWPN prefix */ +	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, +	                               &hw->mac.wwpn_prefix); + +reset_hw_out: +	return status; +} + +/** + *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. + *  @hw: pointer to hardware structure + **/ +s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) +{ +	int i; +	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); +	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; + +	/* +	 * Before starting reinitialization process, +	 * FDIRCMD.CMD must be zero. +	 */ +	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { +		if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & +		      IXGBE_FDIRCMD_CMD_MASK)) +			break; +		udelay(10); +	} +	if (i >= IXGBE_FDIRCMD_CMD_POLL) { +		hw_dbg(hw, "Flow Director previous command isn't complete, " +		       "aborting table re-initialization.\n"); +		return IXGBE_ERR_FDIR_REINIT_FAILED; +	} + +	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); +	IXGBE_WRITE_FLUSH(hw); +	/* +	 * 82599 adapters flow director init flow cannot be restarted, +	 * Workaround 82599 silicon errata by performing the following steps +	 * before re-writing the FDIRCTRL control register with the same value. +	 * - write 1 to bit 8 of FDIRCMD register & +	 * - write 0 to bit 8 of FDIRCMD register +	 */ +	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, +	                (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | +	                 IXGBE_FDIRCMD_CLEARHT)); +	IXGBE_WRITE_FLUSH(hw); +	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, +	                (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & +	                 ~IXGBE_FDIRCMD_CLEARHT)); +	IXGBE_WRITE_FLUSH(hw); +	/* +	 * Clear FDIR Hash register to clear any leftover hashes +	 * waiting to be programmed. +	 */ +	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); +	IXGBE_WRITE_FLUSH(hw); + +	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); +	IXGBE_WRITE_FLUSH(hw); + +	/* Poll init-done after we write FDIRCTRL register */ +	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { +		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & +		                   IXGBE_FDIRCTRL_INIT_DONE) +			break; +		udelay(10); +	} +	if (i >= IXGBE_FDIR_INIT_DONE_POLL) { +		hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); +		return IXGBE_ERR_FDIR_REINIT_FAILED; +	} + +	/* Clear FDIR statistics registers (read to clear) */ +	IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); +	IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); +	IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); +	IXGBE_READ_REG(hw, IXGBE_FDIRMISS); +	IXGBE_READ_REG(hw, IXGBE_FDIRLEN); + +	return 0; +} + +/** + *  ixgbe_set_fdir_rxpba_82599 - Initialize Flow Director Rx packet buffer + *  @hw: pointer to hardware structure + *  @pballoc: which mode to allocate filters with + **/ +static s32 ixgbe_set_fdir_rxpba_82599(struct ixgbe_hw *hw, const u32 pballoc) +{ +	u32 fdir_pbsize = hw->mac.rx_pb_size << IXGBE_RXPBSIZE_SHIFT; +	u32 current_rxpbsize = 0; +	int i; + +	/* reserve space for Flow Director filters */ +	switch (pballoc) { +	case IXGBE_FDIR_PBALLOC_256K: +		fdir_pbsize -= 256 << IXGBE_RXPBSIZE_SHIFT; +		break; +	case IXGBE_FDIR_PBALLOC_128K: +		fdir_pbsize -= 128 << IXGBE_RXPBSIZE_SHIFT; +		break; +	case IXGBE_FDIR_PBALLOC_64K: +		fdir_pbsize -= 64 << IXGBE_RXPBSIZE_SHIFT; +		break; +	case IXGBE_FDIR_PBALLOC_NONE: +	default: +		return IXGBE_ERR_PARAM; +	} + +	/* determine current RX packet buffer size */ +	for (i = 0; i < 8; i++) +		current_rxpbsize += IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); + +	/* if there is already room for the filters do nothing */ +	if (current_rxpbsize <= fdir_pbsize) +		return 0; + +	if (current_rxpbsize > hw->mac.rx_pb_size) { +		/* +		 * if rxpbsize is greater than max then HW max the Rx buffer +		 * sizes are unconfigured or misconfigured since HW default is +		 * to give the full buffer to each traffic class resulting in +		 * the total size being buffer size 8x actual size +		 * +		 * This assumes no DCB since the RXPBSIZE registers appear to +		 * be unconfigured. +		 */ +		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), fdir_pbsize); +		for (i = 1; i < 8; i++) +			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); +	} else { +		/* +		 * Since the Rx packet buffer appears to have already been +		 * configured we need to shrink each packet buffer by enough +		 * to make room for the filters.  As such we take each rxpbsize +		 * value and multiply it by a fraction representing the size +		 * needed over the size we currently have. +		 * +		 * We need to reduce fdir_pbsize and current_rxpbsize to +		 * 1/1024 of their original values in order to avoid +		 * overflowing the u32 being used to store rxpbsize. +		 */ +		fdir_pbsize >>= IXGBE_RXPBSIZE_SHIFT; +		current_rxpbsize >>= IXGBE_RXPBSIZE_SHIFT; +		for (i = 0; i < 8; i++) { +			u32 rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); +			rxpbsize *= fdir_pbsize; +			rxpbsize /= current_rxpbsize; +			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize); +		} +	} + +	return 0; +} + +/** + *  ixgbe_fdir_enable_82599 - Initialize Flow Director control registers + *  @hw: pointer to hardware structure + *  @fdirctrl: value to write to flow director control register + **/ +static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) +{ +	int i; + +	/* Prime the keys for hashing */ +	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); +	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); + +	/* +	 * Poll init-done after we write the register.  Estimated times: +	 *      10G: PBALLOC = 11b, timing is 60us +	 *       1G: PBALLOC = 11b, timing is 600us +	 *     100M: PBALLOC = 11b, timing is 6ms +	 * +	 *     Multiple these timings by 4 if under full Rx load +	 * +	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for +	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then +	 * this might not finish in our poll time, but we can live with that +	 * for now. +	 */ +	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); +	IXGBE_WRITE_FLUSH(hw); +	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { +		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & +		                   IXGBE_FDIRCTRL_INIT_DONE) +			break; +		usleep_range(1000, 2000); +	} + +	if (i >= IXGBE_FDIR_INIT_DONE_POLL) +		hw_dbg(hw, "Flow Director poll time exceeded!\n"); +} + +/** + *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters + *  @hw: pointer to hardware structure + *  @fdirctrl: value to write to flow director control register, initially + *             contains just the value of the Rx packet buffer allocation + **/ +s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) +{ +	s32 err; + +	/* Before enabling Flow Director, verify the Rx Packet Buffer size */ +	err = ixgbe_set_fdir_rxpba_82599(hw, fdirctrl); +	if (err) +		return err; + +	/* +	 * Continue setup of fdirctrl register bits: +	 *  Move the flexible bytes to use the ethertype - shift 6 words +	 *  Set the maximum length per hash bucket to 0xA filters +	 *  Send interrupt when 64 filters are left +	 */ +	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | +		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | +		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); + +	/* write hashes and fdirctrl register, poll for completion */ +	ixgbe_fdir_enable_82599(hw, fdirctrl); + +	return 0; +} + +/** + *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters + *  @hw: pointer to hardware structure + *  @fdirctrl: value to write to flow director control register, initially + *             contains just the value of the Rx packet buffer allocation + **/ +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) +{ +	s32 err; + +	/* Before enabling Flow Director, verify the Rx Packet Buffer size */ +	err = ixgbe_set_fdir_rxpba_82599(hw, fdirctrl); +	if (err) +		return err; + +	/* +	 * Continue setup of fdirctrl register bits: +	 *  Turn perfect match filtering on +	 *  Report hash in RSS field of Rx wb descriptor +	 *  Initialize the drop queue +	 *  Move the flexible bytes to use the ethertype - shift 6 words +	 *  Set the maximum length per hash bucket to 0xA filters +	 *  Send interrupt when 64 (0x4 * 16) filters are left +	 */ +	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | +		    IXGBE_FDIRCTRL_REPORT_STATUS | +		    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | +		    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | +		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | +		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); + +	/* write hashes and fdirctrl register, poll for completion */ +	ixgbe_fdir_enable_82599(hw, fdirctrl); + +	return 0; +} + +/* + * These defines allow us to quickly generate all of the necessary instructions + * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION + * for values 0 through 15 + */ +#define IXGBE_ATR_COMMON_HASH_KEY \ +		(IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) +#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ +do { \ +	u32 n = (_n); \ +	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ +		common_hash ^= lo_hash_dword >> n; \ +	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ +		bucket_hash ^= lo_hash_dword >> n; \ +	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ +		sig_hash ^= lo_hash_dword << (16 - n); \ +	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ +		common_hash ^= hi_hash_dword >> n; \ +	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ +		bucket_hash ^= hi_hash_dword >> n; \ +	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ +		sig_hash ^= hi_hash_dword << (16 - n); \ +} while (0); + +/** + *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash + *  @stream: input bitstream to compute the hash on + * + *  This function is almost identical to the function above but contains + *  several optomizations such as unwinding all of the loops, letting the + *  compiler work out all of the conditional ifs since the keys are static + *  defines, and computing two keys at once since the hashed dword stream + *  will be the same for both keys. + **/ +static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, +					    union ixgbe_atr_hash_dword common) +{ +	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; +	u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; + +	/* record the flow_vm_vlan bits as they are a key part to the hash */ +	flow_vm_vlan = ntohl(input.dword); + +	/* generate common hash dword */ +	hi_hash_dword = ntohl(common.dword); + +	/* low dword is word swapped version of common */ +	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + +	/* apply flow ID/VM pool/VLAN ID bits to hash words */ +	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + +	/* Process bits 0 and 16 */ +	IXGBE_COMPUTE_SIG_HASH_ITERATION(0); + +	/* +	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to +	 * delay this because bit 0 of the stream should not be processed +	 * so we do not add the vlan until after bit 0 was processed +	 */ +	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + +	/* Process remaining 30 bit of the key */ +	IXGBE_COMPUTE_SIG_HASH_ITERATION(1); +	IXGBE_COMPUTE_SIG_HASH_ITERATION(2); +	IXGBE_COMPUTE_SIG_HASH_ITERATION(3); +	IXGBE_COMPUTE_SIG_HASH_ITERATION(4); +	IXGBE_COMPUTE_SIG_HASH_ITERATION(5); +	IXGBE_COMPUTE_SIG_HASH_ITERATION(6); +	IXGBE_COMPUTE_SIG_HASH_ITERATION(7); +	IXGBE_COMPUTE_SIG_HASH_ITERATION(8); +	IXGBE_COMPUTE_SIG_HASH_ITERATION(9); +	IXGBE_COMPUTE_SIG_HASH_ITERATION(10); +	IXGBE_COMPUTE_SIG_HASH_ITERATION(11); +	IXGBE_COMPUTE_SIG_HASH_ITERATION(12); +	IXGBE_COMPUTE_SIG_HASH_ITERATION(13); +	IXGBE_COMPUTE_SIG_HASH_ITERATION(14); +	IXGBE_COMPUTE_SIG_HASH_ITERATION(15); + +	/* combine common_hash result with signature and bucket hashes */ +	bucket_hash ^= common_hash; +	bucket_hash &= IXGBE_ATR_HASH_MASK; + +	sig_hash ^= common_hash << 16; +	sig_hash &= IXGBE_ATR_HASH_MASK << 16; + +	/* return completed signature hash */ +	return sig_hash ^ bucket_hash; +} + +/** + *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter + *  @hw: pointer to hardware structure + *  @input: unique input dword + *  @common: compressed common input dword + *  @queue: queue index to direct traffic to + **/ +s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, +                                          union ixgbe_atr_hash_dword input, +                                          union ixgbe_atr_hash_dword common, +                                          u8 queue) +{ +	u64  fdirhashcmd; +	u32  fdircmd; + +	/* +	 * Get the flow_type in order to program FDIRCMD properly +	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 +	 */ +	switch (input.formatted.flow_type) { +	case IXGBE_ATR_FLOW_TYPE_TCPV4: +	case IXGBE_ATR_FLOW_TYPE_UDPV4: +	case IXGBE_ATR_FLOW_TYPE_SCTPV4: +	case IXGBE_ATR_FLOW_TYPE_TCPV6: +	case IXGBE_ATR_FLOW_TYPE_UDPV6: +	case IXGBE_ATR_FLOW_TYPE_SCTPV6: +		break; +	default: +		hw_dbg(hw, " Error on flow type input\n"); +		return IXGBE_ERR_CONFIG; +	} + +	/* configure FDIRCMD register */ +	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | +	          IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; +	fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; +	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + +	/* +	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits +	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH. +	 */ +	fdirhashcmd = (u64)fdircmd << 32; +	fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); +	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); + +	hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); + +	return 0; +} + +#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ +do { \ +	u32 n = (_n); \ +	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ +		bucket_hash ^= lo_hash_dword >> n; \ +	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ +		bucket_hash ^= hi_hash_dword >> n; \ +} while (0); + +/** + *  ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash + *  @atr_input: input bitstream to compute the hash on + *  @input_mask: mask for the input bitstream + * + *  This function serves two main purposes.  First it applys the input_mask + *  to the atr_input resulting in a cleaned up atr_input data stream. + *  Secondly it computes the hash and stores it in the bkt_hash field at + *  the end of the input byte stream.  This way it will be available for + *  future use without needing to recompute the hash. + **/ +void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, +					  union ixgbe_atr_input *input_mask) +{ + +	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; +	u32 bucket_hash = 0; + +	/* Apply masks to input data */ +	input->dword_stream[0]  &= input_mask->dword_stream[0]; +	input->dword_stream[1]  &= input_mask->dword_stream[1]; +	input->dword_stream[2]  &= input_mask->dword_stream[2]; +	input->dword_stream[3]  &= input_mask->dword_stream[3]; +	input->dword_stream[4]  &= input_mask->dword_stream[4]; +	input->dword_stream[5]  &= input_mask->dword_stream[5]; +	input->dword_stream[6]  &= input_mask->dword_stream[6]; +	input->dword_stream[7]  &= input_mask->dword_stream[7]; +	input->dword_stream[8]  &= input_mask->dword_stream[8]; +	input->dword_stream[9]  &= input_mask->dword_stream[9]; +	input->dword_stream[10] &= input_mask->dword_stream[10]; + +	/* record the flow_vm_vlan bits as they are a key part to the hash */ +	flow_vm_vlan = ntohl(input->dword_stream[0]); + +	/* generate common hash dword */ +	hi_hash_dword = ntohl(input->dword_stream[1] ^ +				    input->dword_stream[2] ^ +				    input->dword_stream[3] ^ +				    input->dword_stream[4] ^ +				    input->dword_stream[5] ^ +				    input->dword_stream[6] ^ +				    input->dword_stream[7] ^ +				    input->dword_stream[8] ^ +				    input->dword_stream[9] ^ +				    input->dword_stream[10]); + +	/* low dword is word swapped version of common */ +	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + +	/* apply flow ID/VM pool/VLAN ID bits to hash words */ +	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + +	/* Process bits 0 and 16 */ +	IXGBE_COMPUTE_BKT_HASH_ITERATION(0); + +	/* +	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to +	 * delay this because bit 0 of the stream should not be processed +	 * so we do not add the vlan until after bit 0 was processed +	 */ +	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + +	/* Process remaining 30 bit of the key */ +	IXGBE_COMPUTE_BKT_HASH_ITERATION(1); +	IXGBE_COMPUTE_BKT_HASH_ITERATION(2); +	IXGBE_COMPUTE_BKT_HASH_ITERATION(3); +	IXGBE_COMPUTE_BKT_HASH_ITERATION(4); +	IXGBE_COMPUTE_BKT_HASH_ITERATION(5); +	IXGBE_COMPUTE_BKT_HASH_ITERATION(6); +	IXGBE_COMPUTE_BKT_HASH_ITERATION(7); +	IXGBE_COMPUTE_BKT_HASH_ITERATION(8); +	IXGBE_COMPUTE_BKT_HASH_ITERATION(9); +	IXGBE_COMPUTE_BKT_HASH_ITERATION(10); +	IXGBE_COMPUTE_BKT_HASH_ITERATION(11); +	IXGBE_COMPUTE_BKT_HASH_ITERATION(12); +	IXGBE_COMPUTE_BKT_HASH_ITERATION(13); +	IXGBE_COMPUTE_BKT_HASH_ITERATION(14); +	IXGBE_COMPUTE_BKT_HASH_ITERATION(15); + +	/* +	 * Limit hash to 13 bits since max bucket count is 8K. +	 * Store result at the end of the input stream. +	 */ +	input->formatted.bkt_hash = bucket_hash & 0x1FFF; +} + +/** + *  ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks + *  @input_mask: mask to be bit swapped + * + *  The source and destination port masks for flow director are bit swapped + *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to + *  generate a correctly swapped value we need to bit swap the mask and that + *  is what is accomplished by this function. + **/ +static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) +{ +	u32 mask = ntohs(input_mask->formatted.dst_port); +	mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; +	mask |= ntohs(input_mask->formatted.src_port); +	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); +	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); +	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); +	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); +} + +/* + * These two macros are meant to address the fact that we have registers + * that are either all or in part big-endian.  As a result on big-endian + * systems we will end up byte swapping the value to little-endian before + * it is byte swapped again and written to the hardware in the original + * big-endian format. + */ +#define IXGBE_STORE_AS_BE32(_value) \ +	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ +	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) + +#define IXGBE_WRITE_REG_BE32(a, reg, value) \ +	IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value))) + +#define IXGBE_STORE_AS_BE16(_value) \ +	ntohs(((u16)(_value) >> 8) | ((u16)(_value) << 8)) + +s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, +				    union ixgbe_atr_input *input_mask) +{ +	/* mask IPv6 since it is currently not supported */ +	u32 fdirm = IXGBE_FDIRM_DIPv6; +	u32 fdirtcpm; + +	/* +	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr +	 * are zero, then assume a full mask for that field.  Also assume that +	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type +	 * cannot be masked out in this implementation. +	 * +	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this +	 * point in time. +	 */ + +	/* verify bucket hash is cleared on hash generation */ +	if (input_mask->formatted.bkt_hash) +		hw_dbg(hw, " bucket hash should always be 0 in mask\n"); + +	/* Program FDIRM and verify partial masks */ +	switch (input_mask->formatted.vm_pool & 0x7F) { +	case 0x0: +		fdirm |= IXGBE_FDIRM_POOL; +	case 0x7F: +		break; +	default: +		hw_dbg(hw, " Error on vm pool mask\n"); +		return IXGBE_ERR_CONFIG; +	} + +	switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) { +	case 0x0: +		fdirm |= IXGBE_FDIRM_L4P; +		if (input_mask->formatted.dst_port || +		    input_mask->formatted.src_port) { +			hw_dbg(hw, " Error on src/dst port mask\n"); +			return IXGBE_ERR_CONFIG; +		} +	case IXGBE_ATR_L4TYPE_MASK: +		break; +	default: +		hw_dbg(hw, " Error on flow type mask\n"); +		return IXGBE_ERR_CONFIG; +	} + +	switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) { +	case 0x0000: +		/* mask VLAN ID, fall through to mask VLAN priority */ +		fdirm |= IXGBE_FDIRM_VLANID; +	case 0x0FFF: +		/* mask VLAN priority */ +		fdirm |= IXGBE_FDIRM_VLANP; +		break; +	case 0xE000: +		/* mask VLAN ID only, fall through */ +		fdirm |= IXGBE_FDIRM_VLANID; +	case 0xEFFF: +		/* no VLAN fields masked */ +		break; +	default: +		hw_dbg(hw, " Error on VLAN mask\n"); +		return IXGBE_ERR_CONFIG; +	} + +	switch (input_mask->formatted.flex_bytes & 0xFFFF) { +	case 0x0000: +		/* Mask Flex Bytes, fall through */ +		fdirm |= IXGBE_FDIRM_FLEX; +	case 0xFFFF: +		break; +	default: +		hw_dbg(hw, " Error on flexible byte mask\n"); +		return IXGBE_ERR_CONFIG; +	} + +	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */ +	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); + +	/* store the TCP/UDP port masks, bit reversed from port layout */ +	fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); + +	/* write both the same so that UDP and TCP use the same mask */ +	IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); +	IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); + +	/* store source and destination IP masks (big-enian) */ +	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, +			     ~input_mask->formatted.src_ip[0]); +	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, +			     ~input_mask->formatted.dst_ip[0]); + +	return 0; +} + +s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, +					  union ixgbe_atr_input *input, +					  u16 soft_id, u8 queue) +{ +	u32 fdirport, fdirvlan, fdirhash, fdircmd; + +	/* currently IPv6 is not supported, must be programmed with 0 */ +	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), +			     input->formatted.src_ip[0]); +	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), +			     input->formatted.src_ip[1]); +	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), +			     input->formatted.src_ip[2]); + +	/* record the source address (big-endian) */ +	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); + +	/* record the first 32 bits of the destination address (big-endian) */ +	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); + +	/* record source and destination port (little-endian)*/ +	fdirport = ntohs(input->formatted.dst_port); +	fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; +	fdirport |= ntohs(input->formatted.src_port); +	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); + +	/* record vlan (little-endian) and flex_bytes(big-endian) */ +	fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); +	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; +	fdirvlan |= ntohs(input->formatted.vlan_id); +	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); + +	/* configure FDIRHASH register */ +	fdirhash = input->formatted.bkt_hash; +	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; +	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + +	/* +	 * flush all previous writes to make certain registers are +	 * programmed prior to issuing the command +	 */ +	IXGBE_WRITE_FLUSH(hw); + +	/* configure FDIRCMD register */ +	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | +		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; +	if (queue == IXGBE_FDIR_DROP_QUEUE) +		fdircmd |= IXGBE_FDIRCMD_DROP; +	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; +	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; +	fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; + +	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); + +	return 0; +} + +s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, +					  union ixgbe_atr_input *input, +					  u16 soft_id) +{ +	u32 fdirhash; +	u32 fdircmd = 0; +	u32 retry_count; +	s32 err = 0; + +	/* configure FDIRHASH register */ +	fdirhash = input->formatted.bkt_hash; +	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; +	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + +	/* flush hash to HW */ +	IXGBE_WRITE_FLUSH(hw); + +	/* Query if filter is present */ +	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); + +	for (retry_count = 10; retry_count; retry_count--) { +		/* allow 10us for query to process */ +		udelay(10); +		/* verify query completed successfully */ +		fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); +		if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK)) +			break; +	} + +	if (!retry_count) +		err = IXGBE_ERR_FDIR_REINIT_FAILED; + +	/* if filter exists in hardware then remove it */ +	if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { +		IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); +		IXGBE_WRITE_FLUSH(hw); +		IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, +				IXGBE_FDIRCMD_CMD_REMOVE_FLOW); +	} + +	return err; +} + +/** + *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register + *  @hw: pointer to hardware structure + *  @reg: analog register to read + *  @val: read value + * + *  Performs read operation to Omer analog register specified. + **/ +static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) +{ +	u32  core_ctl; + +	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | +	                (reg << 8)); +	IXGBE_WRITE_FLUSH(hw); +	udelay(10); +	core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); +	*val = (u8)core_ctl; + +	return 0; +} + +/** + *  ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register + *  @hw: pointer to hardware structure + *  @reg: atlas register to write + *  @val: value to write + * + *  Performs write operation to Omer analog register specified. + **/ +static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) +{ +	u32  core_ctl; + +	core_ctl = (reg << 8) | val; +	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); +	IXGBE_WRITE_FLUSH(hw); +	udelay(10); + +	return 0; +} + +/** + *  ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx + *  @hw: pointer to hardware structure + * + *  Starts the hardware using the generic start_hw function + *  and the generation start_hw function. + *  Then performs revision-specific operations, if any. + **/ +static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) +{ +	s32 ret_val = 0; + +	ret_val = ixgbe_start_hw_generic(hw); +	if (ret_val != 0) +		goto out; + +	ret_val = ixgbe_start_hw_gen2(hw); +	if (ret_val != 0) +		goto out; + +	/* We need to run link autotry after the driver loads */ +	hw->mac.autotry_restart = true; +	hw->mac.rx_pb_size = IXGBE_82599_RX_PB_SIZE; + +	if (ret_val == 0) +		ret_val = ixgbe_verify_fw_version_82599(hw); +out: +	return ret_val; +} + +/** + *  ixgbe_identify_phy_82599 - Get physical layer module + *  @hw: pointer to hardware structure + * + *  Determines the physical layer module found on the current adapter. + *  If PHY already detected, maintains current PHY type in hw struct, + *  otherwise executes the PHY detection routine. + **/ +static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) +{ +	s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + +	/* Detect PHY if not unknown - returns success if already detected. */ +	status = ixgbe_identify_phy_generic(hw); +	if (status != 0) { +		/* 82599 10GBASE-T requires an external PHY */ +		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) +			goto out; +		else +			status = ixgbe_identify_sfp_module_generic(hw); +	} + +	/* Set PHY type none if no PHY detected */ +	if (hw->phy.type == ixgbe_phy_unknown) { +		hw->phy.type = ixgbe_phy_none; +		status = 0; +	} + +	/* Return error if SFP module has been detected but is not supported */ +	if (hw->phy.type == ixgbe_phy_sfp_unsupported) +		status = IXGBE_ERR_SFP_NOT_SUPPORTED; + +out: +	return status; +} + +/** + *  ixgbe_get_supported_physical_layer_82599 - Returns physical layer type + *  @hw: pointer to hardware structure + * + *  Determines physical layer capabilities of the current configuration. + **/ +static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) +{ +	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; +	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); +	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); +	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; +	u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; +	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; +	u16 ext_ability = 0; +	u8 comp_codes_10g = 0; +	u8 comp_codes_1g = 0; + +	hw->phy.ops.identify(hw); + +	switch (hw->phy.type) { +	case ixgbe_phy_tn: +	case ixgbe_phy_aq: +	case ixgbe_phy_cu_unknown: +		hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, +							 &ext_ability); +		if (ext_ability & MDIO_PMA_EXTABLE_10GBT) +			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; +		if (ext_ability & MDIO_PMA_EXTABLE_1000BT) +			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; +		if (ext_ability & MDIO_PMA_EXTABLE_100BTX) +			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; +		goto out; +	default: +		break; +	} + +	switch (autoc & IXGBE_AUTOC_LMS_MASK) { +	case IXGBE_AUTOC_LMS_1G_AN: +	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: +		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { +			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | +			    IXGBE_PHYSICAL_LAYER_1000BASE_BX; +			goto out; +		} else +			/* SFI mode so read SFP module */ +			goto sfp_check; +		break; +	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: +		if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) +			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; +		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) +			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; +		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI) +			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI; +		goto out; +		break; +	case IXGBE_AUTOC_LMS_10G_SERIAL: +		if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { +			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; +			goto out; +		} else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) +			goto sfp_check; +		break; +	case IXGBE_AUTOC_LMS_KX4_KX_KR: +	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: +		if (autoc & IXGBE_AUTOC_KX_SUPP) +			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; +		if (autoc & IXGBE_AUTOC_KX4_SUPP) +			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; +		if (autoc & IXGBE_AUTOC_KR_SUPP) +			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; +		goto out; +		break; +	default: +		goto out; +		break; +	} + +sfp_check: +	/* SFP check must be done last since DA modules are sometimes used to +	 * test KR mode -  we need to id KR mode correctly before SFP module. +	 * Call identify_sfp because the pluggable module may have changed */ +	hw->phy.ops.identify_sfp(hw); +	if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) +		goto out; + +	switch (hw->phy.type) { +	case ixgbe_phy_sfp_passive_tyco: +	case ixgbe_phy_sfp_passive_unknown: +		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; +		break; +	case ixgbe_phy_sfp_ftl_active: +	case ixgbe_phy_sfp_active_unknown: +		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; +		break; +	case ixgbe_phy_sfp_avago: +	case ixgbe_phy_sfp_ftl: +	case ixgbe_phy_sfp_intel: +	case ixgbe_phy_sfp_unknown: +		hw->phy.ops.read_i2c_eeprom(hw, +		      IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); +		hw->phy.ops.read_i2c_eeprom(hw, +		      IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); +		if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) +			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; +		else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) +			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; +		else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) +			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; +		break; +	default: +		break; +	} + +out: +	return physical_layer; +} + +/** + *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 + *  @hw: pointer to hardware structure + *  @regval: register value to write to RXCTRL + * + *  Enables the Rx DMA unit for 82599 + **/ +static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) +{ +#define IXGBE_MAX_SECRX_POLL 30 +	int i; +	int secrxreg; + +	/* +	 * Workaround for 82599 silicon errata when enabling the Rx datapath. +	 * If traffic is incoming before we enable the Rx unit, it could hang +	 * the Rx DMA unit.  Therefore, make sure the security engine is +	 * completely disabled prior to enabling the Rx unit. +	 */ +	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); +	secrxreg |= IXGBE_SECRXCTRL_RX_DIS; +	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); +	for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { +		secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); +		if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) +			break; +		else +			/* Use interrupt-safe sleep just in case */ +			udelay(10); +	} + +	/* For informational purposes only */ +	if (i >= IXGBE_MAX_SECRX_POLL) +		hw_dbg(hw, "Rx unit being enabled before security " +		       "path fully disabled.  Continuing with init.\n"); + +	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); +	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); +	secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; +	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); +	IXGBE_WRITE_FLUSH(hw); + +	return 0; +} + +/** + *  ixgbe_verify_fw_version_82599 - verify fw version for 82599 + *  @hw: pointer to hardware structure + * + *  Verifies that installed the firmware version is 0.6 or higher + *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher. + * + *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or + *  if the FW version is not supported. + **/ +static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) +{ +	s32 status = IXGBE_ERR_EEPROM_VERSION; +	u16 fw_offset, fw_ptp_cfg_offset; +	u16 fw_version = 0; + +	/* firmware check is only necessary for SFI devices */ +	if (hw->phy.media_type != ixgbe_media_type_fiber) { +		status = 0; +		goto fw_version_out; +	} + +	/* get the offset to the Firmware Module block */ +	hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); + +	if ((fw_offset == 0) || (fw_offset == 0xFFFF)) +		goto fw_version_out; + +	/* get the offset to the Pass Through Patch Configuration block */ +	hw->eeprom.ops.read(hw, (fw_offset + +	                         IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), +	                         &fw_ptp_cfg_offset); + +	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) +		goto fw_version_out; + +	/* get the firmware version */ +	hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + +	                         IXGBE_FW_PATCH_VERSION_4), +	                         &fw_version); + +	if (fw_version > 0x5) +		status = 0; + +fw_version_out: +	return status; +} + +/** + *  ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. + *  @hw: pointer to hardware structure + * + *  Returns true if the LESM FW module is present and enabled. Otherwise + *  returns false. Smart Speed must be disabled if LESM FW module is enabled. + **/ +static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) +{ +	bool lesm_enabled = false; +	u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; +	s32 status; + +	/* get the offset to the Firmware Module block */ +	status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); + +	if ((status != 0) || +	    (fw_offset == 0) || (fw_offset == 0xFFFF)) +		goto out; + +	/* get the offset to the LESM Parameters block */ +	status = hw->eeprom.ops.read(hw, (fw_offset + +				     IXGBE_FW_LESM_PARAMETERS_PTR), +				     &fw_lesm_param_offset); + +	if ((status != 0) || +	    (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) +		goto out; + +	/* get the lesm state word */ +	status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + +				     IXGBE_FW_LESM_STATE_1), +				     &fw_lesm_state); + +	if ((status == 0) && +	    (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) +		lesm_enabled = true; + +out: +	return lesm_enabled; +} + +/** + *  ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using + *  fastest available method + * + *  @hw: pointer to hardware structure + *  @offset: offset of  word in EEPROM to read + *  @words: number of words + *  @data: word(s) read from the EEPROM + * + *  Retrieves 16 bit word(s) read from EEPROM + **/ +static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, +					  u16 words, u16 *data) +{ +	struct ixgbe_eeprom_info *eeprom = &hw->eeprom; +	s32 ret_val = IXGBE_ERR_CONFIG; + +	/* +	 * If EEPROM is detected and can be addressed using 14 bits, +	 * use EERD otherwise use bit bang +	 */ +	if ((eeprom->type == ixgbe_eeprom_spi) && +	    (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)) +		ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words, +							 data); +	else +		ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, +								    words, +								    data); + +	return ret_val; +} + +/** + *  ixgbe_read_eeprom_82599 - Read EEPROM word using + *  fastest available method + * + *  @hw: pointer to hardware structure + *  @offset: offset of  word in the EEPROM to read + *  @data: word read from the EEPROM + * + *  Reads a 16 bit word from the EEPROM + **/ +static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, +				   u16 offset, u16 *data) +{ +	struct ixgbe_eeprom_info *eeprom = &hw->eeprom; +	s32 ret_val = IXGBE_ERR_CONFIG; + +	/* +	 * If EEPROM is detected and can be addressed using 14 bits, +	 * use EERD otherwise use bit bang +	 */ +	if ((eeprom->type == ixgbe_eeprom_spi) && +	    (offset <= IXGBE_EERD_MAX_ADDR)) +		ret_val = ixgbe_read_eerd_generic(hw, offset, data); +	else +		ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); + +	return ret_val; +} + +static struct ixgbe_mac_operations mac_ops_82599 = { +	.init_hw                = &ixgbe_init_hw_generic, +	.reset_hw               = &ixgbe_reset_hw_82599, +	.start_hw               = &ixgbe_start_hw_82599, +	.clear_hw_cntrs         = &ixgbe_clear_hw_cntrs_generic, +	.get_media_type         = &ixgbe_get_media_type_82599, +	.get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599, +	.enable_rx_dma          = &ixgbe_enable_rx_dma_82599, +	.get_mac_addr           = &ixgbe_get_mac_addr_generic, +	.get_san_mac_addr       = &ixgbe_get_san_mac_addr_generic, +	.get_device_caps        = &ixgbe_get_device_caps_generic, +	.get_wwn_prefix         = &ixgbe_get_wwn_prefix_generic, +	.stop_adapter           = &ixgbe_stop_adapter_generic, +	.get_bus_info           = &ixgbe_get_bus_info_generic, +	.set_lan_id             = &ixgbe_set_lan_id_multi_port_pcie, +	.read_analog_reg8       = &ixgbe_read_analog_reg8_82599, +	.write_analog_reg8      = &ixgbe_write_analog_reg8_82599, +	.setup_link             = &ixgbe_setup_mac_link_82599, +	.set_rxpba		= &ixgbe_set_rxpba_generic, +	.check_link             = &ixgbe_check_mac_link_generic, +	.get_link_capabilities  = &ixgbe_get_link_capabilities_82599, +	.led_on                 = &ixgbe_led_on_generic, +	.led_off                = &ixgbe_led_off_generic, +	.blink_led_start        = &ixgbe_blink_led_start_generic, +	.blink_led_stop         = &ixgbe_blink_led_stop_generic, +	.set_rar                = &ixgbe_set_rar_generic, +	.clear_rar              = &ixgbe_clear_rar_generic, +	.set_vmdq               = &ixgbe_set_vmdq_generic, +	.clear_vmdq             = &ixgbe_clear_vmdq_generic, +	.init_rx_addrs          = &ixgbe_init_rx_addrs_generic, +	.update_mc_addr_list    = &ixgbe_update_mc_addr_list_generic, +	.enable_mc              = &ixgbe_enable_mc_generic, +	.disable_mc             = &ixgbe_disable_mc_generic, +	.clear_vfta             = &ixgbe_clear_vfta_generic, +	.set_vfta               = &ixgbe_set_vfta_generic, +	.fc_enable              = &ixgbe_fc_enable_generic, +	.set_fw_drv_ver         = &ixgbe_set_fw_drv_ver_generic, +	.init_uta_tables        = &ixgbe_init_uta_tables_generic, +	.setup_sfp              = &ixgbe_setup_sfp_modules_82599, +	.set_mac_anti_spoofing  = &ixgbe_set_mac_anti_spoofing, +	.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, +	.acquire_swfw_sync      = &ixgbe_acquire_swfw_sync, +	.release_swfw_sync      = &ixgbe_release_swfw_sync, + +}; + +static struct ixgbe_eeprom_operations eeprom_ops_82599 = { +	.init_params		= &ixgbe_init_eeprom_params_generic, +	.read			= &ixgbe_read_eeprom_82599, +	.read_buffer		= &ixgbe_read_eeprom_buffer_82599, +	.write			= &ixgbe_write_eeprom_generic, +	.write_buffer		= &ixgbe_write_eeprom_buffer_bit_bang_generic, +	.calc_checksum		= &ixgbe_calc_eeprom_checksum_generic, +	.validate_checksum	= &ixgbe_validate_eeprom_checksum_generic, +	.update_checksum	= &ixgbe_update_eeprom_checksum_generic, +}; + +static struct ixgbe_phy_operations phy_ops_82599 = { +	.identify		= &ixgbe_identify_phy_82599, +	.identify_sfp		= &ixgbe_identify_sfp_module_generic, +	.init			= &ixgbe_init_phy_ops_82599, +	.reset			= &ixgbe_reset_phy_generic, +	.read_reg		= &ixgbe_read_phy_reg_generic, +	.write_reg		= &ixgbe_write_phy_reg_generic, +	.setup_link		= &ixgbe_setup_phy_link_generic, +	.setup_link_speed	= &ixgbe_setup_phy_link_speed_generic, +	.read_i2c_byte		= &ixgbe_read_i2c_byte_generic, +	.write_i2c_byte		= &ixgbe_write_i2c_byte_generic, +	.read_i2c_eeprom	= &ixgbe_read_i2c_eeprom_generic, +	.write_i2c_eeprom	= &ixgbe_write_i2c_eeprom_generic, +	.check_overtemp		= &ixgbe_tn_check_overtemp, +}; + +struct ixgbe_info ixgbe_82599_info = { +	.mac                    = ixgbe_mac_82599EB, +	.get_invariants         = &ixgbe_get_invariants_82599, +	.mac_ops                = &mac_ops_82599, +	.eeprom_ops             = &eeprom_ops_82599, +	.phy_ops                = &phy_ops_82599, +	.mbx_ops                = &mbx_ops_generic, +}; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c new file mode 100644 index 00000000000..fc1375f26fe --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -0,0 +1,3510 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/netdevice.h> + +#include "ixgbe.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); +static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); +static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); +static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); +static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); +static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, +                                        u16 count); +static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); +static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); +static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); +static void ixgbe_release_eeprom(struct ixgbe_hw *hw); + +static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); +static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw); +static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw); +static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw); +static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); +static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, +			      u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); +static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); +static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); +static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, +					     u16 words, u16 *data); +static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, +					     u16 words, u16 *data); +static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, +						 u16 offset); + +/** + *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx + *  @hw: pointer to hardware structure + * + *  Starts the hardware by filling the bus info structure and media type, clears + *  all on chip counters, initializes receive address registers, multicast + *  table, VLAN filter table, calls routine to set up link and flow control + *  settings, and leaves transmit and receive units disabled and uninitialized + **/ +s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) +{ +	u32 ctrl_ext; + +	/* Set the media type */ +	hw->phy.media_type = hw->mac.ops.get_media_type(hw); + +	/* Identify the PHY */ +	hw->phy.ops.identify(hw); + +	/* Clear the VLAN filter table */ +	hw->mac.ops.clear_vfta(hw); + +	/* Clear statistics registers */ +	hw->mac.ops.clear_hw_cntrs(hw); + +	/* Set No Snoop Disable */ +	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); +	ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; +	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); +	IXGBE_WRITE_FLUSH(hw); + +	/* Setup flow control */ +	ixgbe_setup_fc(hw, 0); + +	/* Clear adapter stopped flag */ +	hw->adapter_stopped = false; + +	return 0; +} + +/** + *  ixgbe_start_hw_gen2 - Init sequence for common device family + *  @hw: pointer to hw structure + * + * Performs the init sequence common to the second generation + * of 10 GbE devices. + * Devices in the second generation: + *     82599 + *     X540 + **/ +s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) +{ +	u32 i; +	u32 regval; + +	/* Clear the rate limiters */ +	for (i = 0; i < hw->mac.max_tx_queues; i++) { +		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); +		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); +	} +	IXGBE_WRITE_FLUSH(hw); + +	/* Disable relaxed ordering */ +	for (i = 0; i < hw->mac.max_tx_queues; i++) { +		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); +		regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; +		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); +	} + +	for (i = 0; i < hw->mac.max_rx_queues; i++) { +		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); +		regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | +					IXGBE_DCA_RXCTRL_DESC_HSRO_EN); +		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); +	} + +	return 0; +} + +/** + *  ixgbe_init_hw_generic - Generic hardware initialization + *  @hw: pointer to hardware structure + * + *  Initialize the hardware by resetting the hardware, filling the bus info + *  structure and media type, clears all on chip counters, initializes receive + *  address registers, multicast table, VLAN filter table, calls routine to set + *  up link and flow control settings, and leaves transmit and receive units + *  disabled and uninitialized + **/ +s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) +{ +	s32 status; + +	/* Reset the hardware */ +	status = hw->mac.ops.reset_hw(hw); + +	if (status == 0) { +		/* Start the HW */ +		status = hw->mac.ops.start_hw(hw); +	} + +	return status; +} + +/** + *  ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters + *  @hw: pointer to hardware structure + * + *  Clears all hardware statistics counters by reading them from the hardware + *  Statistics counters are clear on read. + **/ +s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) +{ +	u16 i = 0; + +	IXGBE_READ_REG(hw, IXGBE_CRCERRS); +	IXGBE_READ_REG(hw, IXGBE_ILLERRC); +	IXGBE_READ_REG(hw, IXGBE_ERRBC); +	IXGBE_READ_REG(hw, IXGBE_MSPDC); +	for (i = 0; i < 8; i++) +		IXGBE_READ_REG(hw, IXGBE_MPC(i)); + +	IXGBE_READ_REG(hw, IXGBE_MLFC); +	IXGBE_READ_REG(hw, IXGBE_MRFC); +	IXGBE_READ_REG(hw, IXGBE_RLEC); +	IXGBE_READ_REG(hw, IXGBE_LXONTXC); +	IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); +	if (hw->mac.type >= ixgbe_mac_82599EB) { +		IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); +		IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); +	} else { +		IXGBE_READ_REG(hw, IXGBE_LXONRXC); +		IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); +	} + +	for (i = 0; i < 8; i++) { +		IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); +		IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); +		if (hw->mac.type >= ixgbe_mac_82599EB) { +			IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); +			IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); +		} else { +			IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); +			IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); +		} +	} +	if (hw->mac.type >= ixgbe_mac_82599EB) +		for (i = 0; i < 8; i++) +			IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); +	IXGBE_READ_REG(hw, IXGBE_PRC64); +	IXGBE_READ_REG(hw, IXGBE_PRC127); +	IXGBE_READ_REG(hw, IXGBE_PRC255); +	IXGBE_READ_REG(hw, IXGBE_PRC511); +	IXGBE_READ_REG(hw, IXGBE_PRC1023); +	IXGBE_READ_REG(hw, IXGBE_PRC1522); +	IXGBE_READ_REG(hw, IXGBE_GPRC); +	IXGBE_READ_REG(hw, IXGBE_BPRC); +	IXGBE_READ_REG(hw, IXGBE_MPRC); +	IXGBE_READ_REG(hw, IXGBE_GPTC); +	IXGBE_READ_REG(hw, IXGBE_GORCL); +	IXGBE_READ_REG(hw, IXGBE_GORCH); +	IXGBE_READ_REG(hw, IXGBE_GOTCL); +	IXGBE_READ_REG(hw, IXGBE_GOTCH); +	for (i = 0; i < 8; i++) +		IXGBE_READ_REG(hw, IXGBE_RNBC(i)); +	IXGBE_READ_REG(hw, IXGBE_RUC); +	IXGBE_READ_REG(hw, IXGBE_RFC); +	IXGBE_READ_REG(hw, IXGBE_ROC); +	IXGBE_READ_REG(hw, IXGBE_RJC); +	IXGBE_READ_REG(hw, IXGBE_MNGPRC); +	IXGBE_READ_REG(hw, IXGBE_MNGPDC); +	IXGBE_READ_REG(hw, IXGBE_MNGPTC); +	IXGBE_READ_REG(hw, IXGBE_TORL); +	IXGBE_READ_REG(hw, IXGBE_TORH); +	IXGBE_READ_REG(hw, IXGBE_TPR); +	IXGBE_READ_REG(hw, IXGBE_TPT); +	IXGBE_READ_REG(hw, IXGBE_PTC64); +	IXGBE_READ_REG(hw, IXGBE_PTC127); +	IXGBE_READ_REG(hw, IXGBE_PTC255); +	IXGBE_READ_REG(hw, IXGBE_PTC511); +	IXGBE_READ_REG(hw, IXGBE_PTC1023); +	IXGBE_READ_REG(hw, IXGBE_PTC1522); +	IXGBE_READ_REG(hw, IXGBE_MPTC); +	IXGBE_READ_REG(hw, IXGBE_BPTC); +	for (i = 0; i < 16; i++) { +		IXGBE_READ_REG(hw, IXGBE_QPRC(i)); +		IXGBE_READ_REG(hw, IXGBE_QPTC(i)); +		if (hw->mac.type >= ixgbe_mac_82599EB) { +			IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); +			IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); +			IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); +			IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); +			IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); +		} else { +			IXGBE_READ_REG(hw, IXGBE_QBRC(i)); +			IXGBE_READ_REG(hw, IXGBE_QBTC(i)); +		} +	} + +	if (hw->mac.type == ixgbe_mac_X540) { +		if (hw->phy.id == 0) +			hw->phy.ops.identify(hw); +		hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i); +		hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i); +		hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i); +		hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i); +	} + +	return 0; +} + +/** + *  ixgbe_read_pba_string_generic - Reads part number string from EEPROM + *  @hw: pointer to hardware structure + *  @pba_num: stores the part number string from the EEPROM + *  @pba_num_size: part number string buffer length + * + *  Reads the part number string from the EEPROM. + **/ +s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, +                                  u32 pba_num_size) +{ +	s32 ret_val; +	u16 data; +	u16 pba_ptr; +	u16 offset; +	u16 length; + +	if (pba_num == NULL) { +		hw_dbg(hw, "PBA string buffer was null\n"); +		return IXGBE_ERR_INVALID_ARGUMENT; +	} + +	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); +	if (ret_val) { +		hw_dbg(hw, "NVM Read Error\n"); +		return ret_val; +	} + +	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); +	if (ret_val) { +		hw_dbg(hw, "NVM Read Error\n"); +		return ret_val; +	} + +	/* +	 * if data is not ptr guard the PBA must be in legacy format which +	 * means pba_ptr is actually our second data word for the PBA number +	 * and we can decode it into an ascii string +	 */ +	if (data != IXGBE_PBANUM_PTR_GUARD) { +		hw_dbg(hw, "NVM PBA number is not stored as string\n"); + +		/* we will need 11 characters to store the PBA */ +		if (pba_num_size < 11) { +			hw_dbg(hw, "PBA string buffer too small\n"); +			return IXGBE_ERR_NO_SPACE; +		} + +		/* extract hex string from data and pba_ptr */ +		pba_num[0] = (data >> 12) & 0xF; +		pba_num[1] = (data >> 8) & 0xF; +		pba_num[2] = (data >> 4) & 0xF; +		pba_num[3] = data & 0xF; +		pba_num[4] = (pba_ptr >> 12) & 0xF; +		pba_num[5] = (pba_ptr >> 8) & 0xF; +		pba_num[6] = '-'; +		pba_num[7] = 0; +		pba_num[8] = (pba_ptr >> 4) & 0xF; +		pba_num[9] = pba_ptr & 0xF; + +		/* put a null character on the end of our string */ +		pba_num[10] = '\0'; + +		/* switch all the data but the '-' to hex char */ +		for (offset = 0; offset < 10; offset++) { +			if (pba_num[offset] < 0xA) +				pba_num[offset] += '0'; +			else if (pba_num[offset] < 0x10) +				pba_num[offset] += 'A' - 0xA; +		} + +		return 0; +	} + +	ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); +	if (ret_val) { +		hw_dbg(hw, "NVM Read Error\n"); +		return ret_val; +	} + +	if (length == 0xFFFF || length == 0) { +		hw_dbg(hw, "NVM PBA number section invalid length\n"); +		return IXGBE_ERR_PBA_SECTION; +	} + +	/* check if pba_num buffer is big enough */ +	if (pba_num_size  < (((u32)length * 2) - 1)) { +		hw_dbg(hw, "PBA string buffer too small\n"); +		return IXGBE_ERR_NO_SPACE; +	} + +	/* trim pba length from start of string */ +	pba_ptr++; +	length--; + +	for (offset = 0; offset < length; offset++) { +		ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); +		if (ret_val) { +			hw_dbg(hw, "NVM Read Error\n"); +			return ret_val; +		} +		pba_num[offset * 2] = (u8)(data >> 8); +		pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); +	} +	pba_num[offset * 2] = '\0'; + +	return 0; +} + +/** + *  ixgbe_get_mac_addr_generic - Generic get MAC address + *  @hw: pointer to hardware structure + *  @mac_addr: Adapter MAC address + * + *  Reads the adapter's MAC address from first Receive Address Register (RAR0) + *  A reset of the adapter must be performed prior to calling this function + *  in order for the MAC address to have been loaded from the EEPROM into RAR0 + **/ +s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) +{ +	u32 rar_high; +	u32 rar_low; +	u16 i; + +	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); +	rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); + +	for (i = 0; i < 4; i++) +		mac_addr[i] = (u8)(rar_low >> (i*8)); + +	for (i = 0; i < 2; i++) +		mac_addr[i+4] = (u8)(rar_high >> (i*8)); + +	return 0; +} + +/** + *  ixgbe_get_bus_info_generic - Generic set PCI bus info + *  @hw: pointer to hardware structure + * + *  Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure + **/ +s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) +{ +	struct ixgbe_adapter *adapter = hw->back; +	struct ixgbe_mac_info *mac = &hw->mac; +	u16 link_status; + +	hw->bus.type = ixgbe_bus_type_pci_express; + +	/* Get the negotiated link width and speed from PCI config space */ +	pci_read_config_word(adapter->pdev, IXGBE_PCI_LINK_STATUS, +	                     &link_status); + +	switch (link_status & IXGBE_PCI_LINK_WIDTH) { +	case IXGBE_PCI_LINK_WIDTH_1: +		hw->bus.width = ixgbe_bus_width_pcie_x1; +		break; +	case IXGBE_PCI_LINK_WIDTH_2: +		hw->bus.width = ixgbe_bus_width_pcie_x2; +		break; +	case IXGBE_PCI_LINK_WIDTH_4: +		hw->bus.width = ixgbe_bus_width_pcie_x4; +		break; +	case IXGBE_PCI_LINK_WIDTH_8: +		hw->bus.width = ixgbe_bus_width_pcie_x8; +		break; +	default: +		hw->bus.width = ixgbe_bus_width_unknown; +		break; +	} + +	switch (link_status & IXGBE_PCI_LINK_SPEED) { +	case IXGBE_PCI_LINK_SPEED_2500: +		hw->bus.speed = ixgbe_bus_speed_2500; +		break; +	case IXGBE_PCI_LINK_SPEED_5000: +		hw->bus.speed = ixgbe_bus_speed_5000; +		break; +	default: +		hw->bus.speed = ixgbe_bus_speed_unknown; +		break; +	} + +	mac->ops.set_lan_id(hw); + +	return 0; +} + +/** + *  ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + *  @hw: pointer to the HW structure + * + *  Determines the LAN function id by reading memory-mapped registers + *  and swaps the port value if requested. + **/ +void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) +{ +	struct ixgbe_bus_info *bus = &hw->bus; +	u32 reg; + +	reg = IXGBE_READ_REG(hw, IXGBE_STATUS); +	bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; +	bus->lan_id = bus->func; + +	/* check for a port swap */ +	reg = IXGBE_READ_REG(hw, IXGBE_FACTPS); +	if (reg & IXGBE_FACTPS_LFS) +		bus->func ^= 0x1; +} + +/** + *  ixgbe_stop_adapter_generic - Generic stop Tx/Rx units + *  @hw: pointer to hardware structure + * + *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, + *  disables transmit and receive units. The adapter_stopped flag is used by + *  the shared code and drivers to determine if the adapter is in a stopped + *  state and should not touch the hardware. + **/ +s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) +{ +	u32 number_of_queues; +	u32 reg_val; +	u16 i; + +	/* +	 * Set the adapter_stopped flag so other driver functions stop touching +	 * the hardware +	 */ +	hw->adapter_stopped = true; + +	/* Disable the receive unit */ +	reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); +	reg_val &= ~(IXGBE_RXCTRL_RXEN); +	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); +	IXGBE_WRITE_FLUSH(hw); +	usleep_range(2000, 4000); + +	/* Clear interrupt mask to stop from interrupts being generated */ +	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); + +	/* Clear any pending interrupts */ +	IXGBE_READ_REG(hw, IXGBE_EICR); + +	/* Disable the transmit unit.  Each queue must be disabled. */ +	number_of_queues = hw->mac.max_tx_queues; +	for (i = 0; i < number_of_queues; i++) { +		reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); +		if (reg_val & IXGBE_TXDCTL_ENABLE) { +			reg_val &= ~IXGBE_TXDCTL_ENABLE; +			IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val); +		} +	} + +	/* +	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master +	 * access and verify no pending requests +	 */ +	ixgbe_disable_pcie_master(hw); + +	return 0; +} + +/** + *  ixgbe_led_on_generic - Turns on the software controllable LEDs. + *  @hw: pointer to hardware structure + *  @index: led number to turn on + **/ +s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) +{ +	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + +	/* To turn on the LED, set mode to ON. */ +	led_reg &= ~IXGBE_LED_MODE_MASK(index); +	led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); +	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); +	IXGBE_WRITE_FLUSH(hw); + +	return 0; +} + +/** + *  ixgbe_led_off_generic - Turns off the software controllable LEDs. + *  @hw: pointer to hardware structure + *  @index: led number to turn off + **/ +s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) +{ +	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + +	/* To turn off the LED, set mode to OFF. */ +	led_reg &= ~IXGBE_LED_MODE_MASK(index); +	led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); +	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); +	IXGBE_WRITE_FLUSH(hw); + +	return 0; +} + +/** + *  ixgbe_init_eeprom_params_generic - Initialize EEPROM params + *  @hw: pointer to hardware structure + * + *  Initializes the EEPROM parameters ixgbe_eeprom_info within the + *  ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) +{ +	struct ixgbe_eeprom_info *eeprom = &hw->eeprom; +	u32 eec; +	u16 eeprom_size; + +	if (eeprom->type == ixgbe_eeprom_uninitialized) { +		eeprom->type = ixgbe_eeprom_none; +		/* Set default semaphore delay to 10ms which is a well +		 * tested value */ +		eeprom->semaphore_delay = 10; +		/* Clear EEPROM page size, it will be initialized as needed */ +		eeprom->word_page_size = 0; + +		/* +		 * Check for EEPROM present first. +		 * If not present leave as none +		 */ +		eec = IXGBE_READ_REG(hw, IXGBE_EEC); +		if (eec & IXGBE_EEC_PRES) { +			eeprom->type = ixgbe_eeprom_spi; + +			/* +			 * SPI EEPROM is assumed here.  This code would need to +			 * change if a future EEPROM is not SPI. +			 */ +			eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> +					    IXGBE_EEC_SIZE_SHIFT); +			eeprom->word_size = 1 << (eeprom_size + +						  IXGBE_EEPROM_WORD_SIZE_SHIFT); +		} + +		if (eec & IXGBE_EEC_ADDR_SIZE) +			eeprom->address_bits = 16; +		else +			eeprom->address_bits = 8; +		hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: " +			  "%d\n", eeprom->type, eeprom->word_size, +			  eeprom->address_bits); +	} + +	return 0; +} + +/** + *  ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang + *  @hw: pointer to hardware structure + *  @offset: offset within the EEPROM to write + *  @words: number of words + *  @data: 16 bit word(s) to write to EEPROM + * + *  Reads 16 bit word(s) from EEPROM through bit-bang method + **/ +s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, +					       u16 words, u16 *data) +{ +	s32 status = 0; +	u16 i, count; + +	hw->eeprom.ops.init_params(hw); + +	if (words == 0) { +		status = IXGBE_ERR_INVALID_ARGUMENT; +		goto out; +	} + +	if (offset + words > hw->eeprom.word_size) { +		status = IXGBE_ERR_EEPROM; +		goto out; +	} + +	/* +	 * The EEPROM page size cannot be queried from the chip. We do lazy +	 * initialization. It is worth to do that when we write large buffer. +	 */ +	if ((hw->eeprom.word_page_size == 0) && +	    (words > IXGBE_EEPROM_PAGE_SIZE_MAX)) +		ixgbe_detect_eeprom_page_size_generic(hw, offset); + +	/* +	 * We cannot hold synchronization semaphores for too long +	 * to avoid other entity starvation. However it is more efficient +	 * to read in bursts than synchronizing access for each word. +	 */ +	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { +		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? +			 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); +		status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, +							    count, &data[i]); + +		if (status != 0) +			break; +	} + +out: +	return status; +} + +/** + *  ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM + *  @hw: pointer to hardware structure + *  @offset: offset within the EEPROM to be written to + *  @words: number of word(s) + *  @data: 16 bit word(s) to be written to the EEPROM + * + *  If ixgbe_eeprom_update_checksum is not called after this function, the + *  EEPROM will most likely contain an invalid checksum. + **/ +static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, +					      u16 words, u16 *data) +{ +	s32 status; +	u16 word; +	u16 page_size; +	u16 i; +	u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; + +	/* Prepare the EEPROM for writing  */ +	status = ixgbe_acquire_eeprom(hw); + +	if (status == 0) { +		if (ixgbe_ready_eeprom(hw) != 0) { +			ixgbe_release_eeprom(hw); +			status = IXGBE_ERR_EEPROM; +		} +	} + +	if (status == 0) { +		for (i = 0; i < words; i++) { +			ixgbe_standby_eeprom(hw); + +			/*  Send the WRITE ENABLE command (8 bit opcode )  */ +			ixgbe_shift_out_eeprom_bits(hw, +						  IXGBE_EEPROM_WREN_OPCODE_SPI, +						  IXGBE_EEPROM_OPCODE_BITS); + +			ixgbe_standby_eeprom(hw); + +			/* +			 * Some SPI eeproms use the 8th address bit embedded +			 * in the opcode +			 */ +			if ((hw->eeprom.address_bits == 8) && +			    ((offset + i) >= 128)) +				write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; + +			/* Send the Write command (8-bit opcode + addr) */ +			ixgbe_shift_out_eeprom_bits(hw, write_opcode, +						    IXGBE_EEPROM_OPCODE_BITS); +			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), +						    hw->eeprom.address_bits); + +			page_size = hw->eeprom.word_page_size; + +			/* Send the data in burst via SPI*/ +			do { +				word = data[i]; +				word = (word >> 8) | (word << 8); +				ixgbe_shift_out_eeprom_bits(hw, word, 16); + +				if (page_size == 0) +					break; + +				/* do not wrap around page */ +				if (((offset + i) & (page_size - 1)) == +				    (page_size - 1)) +					break; +			} while (++i < words); + +			ixgbe_standby_eeprom(hw); +			usleep_range(10000, 20000); +		} +		/* Done with writing - release the EEPROM */ +		ixgbe_release_eeprom(hw); +	} + +	return status; +} + +/** + *  ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM + *  @hw: pointer to hardware structure + *  @offset: offset within the EEPROM to be written to + *  @data: 16 bit word to be written to the EEPROM + * + *  If ixgbe_eeprom_update_checksum is not called after this function, the + *  EEPROM will most likely contain an invalid checksum. + **/ +s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) +{ +	s32 status; + +	hw->eeprom.ops.init_params(hw); + +	if (offset >= hw->eeprom.word_size) { +		status = IXGBE_ERR_EEPROM; +		goto out; +	} + +	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); + +out: +	return status; +} + +/** + *  ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang + *  @hw: pointer to hardware structure + *  @offset: offset within the EEPROM to be read + *  @words: number of word(s) + *  @data: read 16 bit words(s) from EEPROM + * + *  Reads 16 bit word(s) from EEPROM through bit-bang method + **/ +s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, +					      u16 words, u16 *data) +{ +	s32 status = 0; +	u16 i, count; + +	hw->eeprom.ops.init_params(hw); + +	if (words == 0) { +		status = IXGBE_ERR_INVALID_ARGUMENT; +		goto out; +	} + +	if (offset + words > hw->eeprom.word_size) { +		status = IXGBE_ERR_EEPROM; +		goto out; +	} + +	/* +	 * We cannot hold synchronization semaphores for too long +	 * to avoid other entity starvation. However it is more efficient +	 * to read in bursts than synchronizing access for each word. +	 */ +	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { +		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? +			 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); + +		status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, +							   count, &data[i]); + +		if (status != 0) +			break; +	} + +out: +	return status; +} + +/** + *  ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang + *  @hw: pointer to hardware structure + *  @offset: offset within the EEPROM to be read + *  @words: number of word(s) + *  @data: read 16 bit word(s) from EEPROM + * + *  Reads 16 bit word(s) from EEPROM through bit-bang method + **/ +static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, +					     u16 words, u16 *data) +{ +	s32 status; +	u16 word_in; +	u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; +	u16 i; + +	/* Prepare the EEPROM for reading  */ +	status = ixgbe_acquire_eeprom(hw); + +	if (status == 0) { +		if (ixgbe_ready_eeprom(hw) != 0) { +			ixgbe_release_eeprom(hw); +			status = IXGBE_ERR_EEPROM; +		} +	} + +	if (status == 0) { +		for (i = 0; i < words; i++) { +			ixgbe_standby_eeprom(hw); +			/* +			 * Some SPI eeproms use the 8th address bit embedded +			 * in the opcode +			 */ +			if ((hw->eeprom.address_bits == 8) && +			    ((offset + i) >= 128)) +				read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; + +			/* Send the READ command (opcode + addr) */ +			ixgbe_shift_out_eeprom_bits(hw, read_opcode, +						    IXGBE_EEPROM_OPCODE_BITS); +			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), +						    hw->eeprom.address_bits); + +			/* Read the data. */ +			word_in = ixgbe_shift_in_eeprom_bits(hw, 16); +			data[i] = (word_in >> 8) | (word_in << 8); +		} + +		/* End this read operation */ +		ixgbe_release_eeprom(hw); +	} + +	return status; +} + +/** + *  ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang + *  @hw: pointer to hardware structure + *  @offset: offset within the EEPROM to be read + *  @data: read 16 bit value from EEPROM + * + *  Reads 16 bit value from EEPROM through bit-bang method + **/ +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, +				       u16 *data) +{ +	s32 status; + +	hw->eeprom.ops.init_params(hw); + +	if (offset >= hw->eeprom.word_size) { +		status = IXGBE_ERR_EEPROM; +		goto out; +	} + +	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); + +out: +	return status; +} + +/** + *  ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD + *  @hw: pointer to hardware structure + *  @offset: offset of word in the EEPROM to read + *  @words: number of word(s) + *  @data: 16 bit word(s) from the EEPROM + * + *  Reads a 16 bit word(s) from the EEPROM using the EERD register. + **/ +s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, +				   u16 words, u16 *data) +{ +	u32 eerd; +	s32 status = 0; +	u32 i; + +	hw->eeprom.ops.init_params(hw); + +	if (words == 0) { +		status = IXGBE_ERR_INVALID_ARGUMENT; +		goto out; +	} + +	if (offset >= hw->eeprom.word_size) { +		status = IXGBE_ERR_EEPROM; +		goto out; +	} + +	for (i = 0; i < words; i++) { +		eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) + +		       IXGBE_EEPROM_RW_REG_START; + +		IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); +		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); + +		if (status == 0) { +			data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> +				   IXGBE_EEPROM_RW_REG_DATA); +		} else { +			hw_dbg(hw, "Eeprom read timed out\n"); +			goto out; +		} +	} +out: +	return status; +} + +/** + *  ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size + *  @hw: pointer to hardware structure + *  @offset: offset within the EEPROM to be used as a scratch pad + * + *  Discover EEPROM page size by writing marching data at given offset. + *  This function is called only when we are writing a new large buffer + *  at given offset so the data would be overwritten anyway. + **/ +static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, +						 u16 offset) +{ +	u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; +	s32 status = 0; +	u16 i; + +	for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) +		data[i] = i; + +	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX; +	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, +					     IXGBE_EEPROM_PAGE_SIZE_MAX, data); +	hw->eeprom.word_page_size = 0; +	if (status != 0) +		goto out; + +	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); +	if (status != 0) +		goto out; + +	/* +	 * When writing in burst more than the actual page size +	 * EEPROM address wraps around current page. +	 */ +	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; + +	hw_dbg(hw, "Detected EEPROM page size = %d words.", +	       hw->eeprom.word_page_size); +out: +	return status; +} + +/** + *  ixgbe_read_eerd_generic - Read EEPROM word using EERD + *  @hw: pointer to hardware structure + *  @offset: offset of  word in the EEPROM to read + *  @data: word read from the EEPROM + * + *  Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ +	return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); +} + +/** + *  ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR + *  @hw: pointer to hardware structure + *  @offset: offset of  word in the EEPROM to write + *  @words: number of words + *  @data: word(s) write to the EEPROM + * + *  Write a 16 bit word(s) to the EEPROM using the EEWR register. + **/ +s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, +				    u16 words, u16 *data) +{ +	u32 eewr; +	s32 status = 0; +	u16 i; + +	hw->eeprom.ops.init_params(hw); + +	if (words == 0) { +		status = IXGBE_ERR_INVALID_ARGUMENT; +		goto out; +	} + +	if (offset >= hw->eeprom.word_size) { +		status = IXGBE_ERR_EEPROM; +		goto out; +	} + +	for (i = 0; i < words; i++) { +		eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | +		       (data[i] << IXGBE_EEPROM_RW_REG_DATA) | +		       IXGBE_EEPROM_RW_REG_START; + +		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); +		if (status != 0) { +			hw_dbg(hw, "Eeprom write EEWR timed out\n"); +			goto out; +		} + +		IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); + +		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); +		if (status != 0) { +			hw_dbg(hw, "Eeprom write EEWR timed out\n"); +			goto out; +		} +	} + +out: +	return status; +} + +/** + *  ixgbe_write_eewr_generic - Write EEPROM word using EEWR + *  @hw: pointer to hardware structure + *  @offset: offset of  word in the EEPROM to write + *  @data: word write to the EEPROM + * + *  Write a 16 bit word to the EEPROM using the EEWR register. + **/ +s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) +{ +	return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); +} + +/** + *  ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status + *  @hw: pointer to hardware structure + *  @ee_reg: EEPROM flag for polling + * + *  Polls the status bit (bit 1) of the EERD or EEWR to determine when the + *  read or write is done respectively. + **/ +static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) +{ +	u32 i; +	u32 reg; +	s32 status = IXGBE_ERR_EEPROM; + +	for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { +		if (ee_reg == IXGBE_NVM_POLL_READ) +			reg = IXGBE_READ_REG(hw, IXGBE_EERD); +		else +			reg = IXGBE_READ_REG(hw, IXGBE_EEWR); + +		if (reg & IXGBE_EEPROM_RW_REG_DONE) { +			status = 0; +			break; +		} +		udelay(5); +	} +	return status; +} + +/** + *  ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang + *  @hw: pointer to hardware structure + * + *  Prepares EEPROM for access using bit-bang method. This function should + *  be called before issuing a command to the EEPROM. + **/ +static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) +{ +	s32 status = 0; +	u32 eec; +	u32 i; + +	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) +		status = IXGBE_ERR_SWFW_SYNC; + +	if (status == 0) { +		eec = IXGBE_READ_REG(hw, IXGBE_EEC); + +		/* Request EEPROM Access */ +		eec |= IXGBE_EEC_REQ; +		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + +		for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { +			eec = IXGBE_READ_REG(hw, IXGBE_EEC); +			if (eec & IXGBE_EEC_GNT) +				break; +			udelay(5); +		} + +		/* Release if grant not acquired */ +		if (!(eec & IXGBE_EEC_GNT)) { +			eec &= ~IXGBE_EEC_REQ; +			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); +			hw_dbg(hw, "Could not acquire EEPROM grant\n"); + +			hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); +			status = IXGBE_ERR_EEPROM; +		} + +		/* Setup EEPROM for Read/Write */ +		if (status == 0) { +			/* Clear CS and SK */ +			eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); +			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); +			IXGBE_WRITE_FLUSH(hw); +			udelay(1); +		} +	} +	return status; +} + +/** + *  ixgbe_get_eeprom_semaphore - Get hardware semaphore + *  @hw: pointer to hardware structure + * + *  Sets the hardware semaphores so EEPROM access can occur for bit-bang method + **/ +static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) +{ +	s32 status = IXGBE_ERR_EEPROM; +	u32 timeout = 2000; +	u32 i; +	u32 swsm; + +	/* Get SMBI software semaphore between device drivers first */ +	for (i = 0; i < timeout; i++) { +		/* +		 * If the SMBI bit is 0 when we read it, then the bit will be +		 * set and we have the semaphore +		 */ +		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); +		if (!(swsm & IXGBE_SWSM_SMBI)) { +			status = 0; +			break; +		} +		udelay(50); +	} + +	if (i == timeout) { +		hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore " +		       "not granted.\n"); +		/* +		 * this release is particularly important because our attempts +		 * above to get the semaphore may have succeeded, and if there +		 * was a timeout, we should unconditionally clear the semaphore +		 * bits to free the driver to make progress +		 */ +		ixgbe_release_eeprom_semaphore(hw); + +		udelay(50); +		/* +		 * one last try +		 * If the SMBI bit is 0 when we read it, then the bit will be +		 * set and we have the semaphore +		 */ +		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); +		if (!(swsm & IXGBE_SWSM_SMBI)) +			status = 0; +	} + +	/* Now get the semaphore between SW/FW through the SWESMBI bit */ +	if (status == 0) { +		for (i = 0; i < timeout; i++) { +			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + +			/* Set the SW EEPROM semaphore bit to request access */ +			swsm |= IXGBE_SWSM_SWESMBI; +			IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); + +			/* +			 * If we set the bit successfully then we got the +			 * semaphore. +			 */ +			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); +			if (swsm & IXGBE_SWSM_SWESMBI) +				break; + +			udelay(50); +		} + +		/* +		 * Release semaphores and return error if SW EEPROM semaphore +		 * was not granted because we don't have access to the EEPROM +		 */ +		if (i >= timeout) { +			hw_dbg(hw, "SWESMBI Software EEPROM semaphore " +			       "not granted.\n"); +			ixgbe_release_eeprom_semaphore(hw); +			status = IXGBE_ERR_EEPROM; +		} +	} else { +		hw_dbg(hw, "Software semaphore SMBI between device drivers " +		       "not granted.\n"); +	} + +	return status; +} + +/** + *  ixgbe_release_eeprom_semaphore - Release hardware semaphore + *  @hw: pointer to hardware structure + * + *  This function clears hardware semaphore bits. + **/ +static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) +{ +	u32 swsm; + +	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + +	/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ +	swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); +	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); +	IXGBE_WRITE_FLUSH(hw); +} + +/** + *  ixgbe_ready_eeprom - Polls for EEPROM ready + *  @hw: pointer to hardware structure + **/ +static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) +{ +	s32 status = 0; +	u16 i; +	u8 spi_stat_reg; + +	/* +	 * Read "Status Register" repeatedly until the LSB is cleared.  The +	 * EEPROM will signal that the command has been completed by clearing +	 * bit 0 of the internal status register.  If it's not cleared within +	 * 5 milliseconds, then error out. +	 */ +	for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { +		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, +		                            IXGBE_EEPROM_OPCODE_BITS); +		spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); +		if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) +			break; + +		udelay(5); +		ixgbe_standby_eeprom(hw); +	} + +	/* +	 * On some parts, SPI write time could vary from 0-20mSec on 3.3V +	 * devices (and only 0-5mSec on 5V devices) +	 */ +	if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { +		hw_dbg(hw, "SPI EEPROM Status error\n"); +		status = IXGBE_ERR_EEPROM; +	} + +	return status; +} + +/** + *  ixgbe_standby_eeprom - Returns EEPROM to a "standby" state + *  @hw: pointer to hardware structure + **/ +static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) +{ +	u32 eec; + +	eec = IXGBE_READ_REG(hw, IXGBE_EEC); + +	/* Toggle CS to flush commands */ +	eec |= IXGBE_EEC_CS; +	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); +	IXGBE_WRITE_FLUSH(hw); +	udelay(1); +	eec &= ~IXGBE_EEC_CS; +	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); +	IXGBE_WRITE_FLUSH(hw); +	udelay(1); +} + +/** + *  ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. + *  @hw: pointer to hardware structure + *  @data: data to send to the EEPROM + *  @count: number of bits to shift out + **/ +static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, +                                        u16 count) +{ +	u32 eec; +	u32 mask; +	u32 i; + +	eec = IXGBE_READ_REG(hw, IXGBE_EEC); + +	/* +	 * Mask is used to shift "count" bits of "data" out to the EEPROM +	 * one bit at a time.  Determine the starting bit based on count +	 */ +	mask = 0x01 << (count - 1); + +	for (i = 0; i < count; i++) { +		/* +		 * A "1" is shifted out to the EEPROM by setting bit "DI" to a +		 * "1", and then raising and then lowering the clock (the SK +		 * bit controls the clock input to the EEPROM).  A "0" is +		 * shifted out to the EEPROM by setting "DI" to "0" and then +		 * raising and then lowering the clock. +		 */ +		if (data & mask) +			eec |= IXGBE_EEC_DI; +		else +			eec &= ~IXGBE_EEC_DI; + +		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); +		IXGBE_WRITE_FLUSH(hw); + +		udelay(1); + +		ixgbe_raise_eeprom_clk(hw, &eec); +		ixgbe_lower_eeprom_clk(hw, &eec); + +		/* +		 * Shift mask to signify next bit of data to shift in to the +		 * EEPROM +		 */ +		mask = mask >> 1; +	} + +	/* We leave the "DI" bit set to "0" when we leave this routine. */ +	eec &= ~IXGBE_EEC_DI; +	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); +	IXGBE_WRITE_FLUSH(hw); +} + +/** + *  ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM + *  @hw: pointer to hardware structure + **/ +static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) +{ +	u32 eec; +	u32 i; +	u16 data = 0; + +	/* +	 * In order to read a register from the EEPROM, we need to shift +	 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising +	 * the clock input to the EEPROM (setting the SK bit), and then reading +	 * the value of the "DO" bit.  During this "shifting in" process the +	 * "DI" bit should always be clear. +	 */ +	eec = IXGBE_READ_REG(hw, IXGBE_EEC); + +	eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); + +	for (i = 0; i < count; i++) { +		data = data << 1; +		ixgbe_raise_eeprom_clk(hw, &eec); + +		eec = IXGBE_READ_REG(hw, IXGBE_EEC); + +		eec &= ~(IXGBE_EEC_DI); +		if (eec & IXGBE_EEC_DO) +			data |= 1; + +		ixgbe_lower_eeprom_clk(hw, &eec); +	} + +	return data; +} + +/** + *  ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. + *  @hw: pointer to hardware structure + *  @eec: EEC register's current value + **/ +static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) +{ +	/* +	 * Raise the clock input to the EEPROM +	 * (setting the SK bit), then delay +	 */ +	*eec = *eec | IXGBE_EEC_SK; +	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); +	IXGBE_WRITE_FLUSH(hw); +	udelay(1); +} + +/** + *  ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. + *  @hw: pointer to hardware structure + *  @eecd: EECD's current value + **/ +static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) +{ +	/* +	 * Lower the clock input to the EEPROM (clearing the SK bit), then +	 * delay +	 */ +	*eec = *eec & ~IXGBE_EEC_SK; +	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); +	IXGBE_WRITE_FLUSH(hw); +	udelay(1); +} + +/** + *  ixgbe_release_eeprom - Release EEPROM, release semaphores + *  @hw: pointer to hardware structure + **/ +static void ixgbe_release_eeprom(struct ixgbe_hw *hw) +{ +	u32 eec; + +	eec = IXGBE_READ_REG(hw, IXGBE_EEC); + +	eec |= IXGBE_EEC_CS;  /* Pull CS high */ +	eec &= ~IXGBE_EEC_SK; /* Lower SCK */ + +	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); +	IXGBE_WRITE_FLUSH(hw); + +	udelay(1); + +	/* Stop requesting EEPROM access */ +	eec &= ~IXGBE_EEC_REQ; +	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + +	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + +	/* +	 * Delay before attempt to obtain semaphore again to allow FW +	 * access. semaphore_delay is in ms we need us for usleep_range +	 */ +	usleep_range(hw->eeprom.semaphore_delay * 1000, +		     hw->eeprom.semaphore_delay * 2000); +} + +/** + *  ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum + *  @hw: pointer to hardware structure + **/ +u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) +{ +	u16 i; +	u16 j; +	u16 checksum = 0; +	u16 length = 0; +	u16 pointer = 0; +	u16 word = 0; + +	/* Include 0x0-0x3F in the checksum */ +	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { +		if (hw->eeprom.ops.read(hw, i, &word) != 0) { +			hw_dbg(hw, "EEPROM read failed\n"); +			break; +		} +		checksum += word; +	} + +	/* Include all data from pointers except for the fw pointer */ +	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { +		hw->eeprom.ops.read(hw, i, &pointer); + +		/* Make sure the pointer seems valid */ +		if (pointer != 0xFFFF && pointer != 0) { +			hw->eeprom.ops.read(hw, pointer, &length); + +			if (length != 0xFFFF && length != 0) { +				for (j = pointer+1; j <= pointer+length; j++) { +					hw->eeprom.ops.read(hw, j, &word); +					checksum += word; +				} +			} +		} +	} + +	checksum = (u16)IXGBE_EEPROM_SUM - checksum; + +	return checksum; +} + +/** + *  ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum + *  @hw: pointer to hardware structure + *  @checksum_val: calculated checksum + * + *  Performs checksum calculation and validates the EEPROM checksum.  If the + *  caller does not need checksum_val, the value can be NULL. + **/ +s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, +                                           u16 *checksum_val) +{ +	s32 status; +	u16 checksum; +	u16 read_checksum = 0; + +	/* +	 * Read the first word from the EEPROM. If this times out or fails, do +	 * not continue or we could be in for a very long wait while every +	 * EEPROM read fails +	 */ +	status = hw->eeprom.ops.read(hw, 0, &checksum); + +	if (status == 0) { +		checksum = hw->eeprom.ops.calc_checksum(hw); + +		hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); + +		/* +		 * Verify read checksum from EEPROM is the same as +		 * calculated checksum +		 */ +		if (read_checksum != checksum) +			status = IXGBE_ERR_EEPROM_CHECKSUM; + +		/* If the user cares, return the calculated checksum */ +		if (checksum_val) +			*checksum_val = checksum; +	} else { +		hw_dbg(hw, "EEPROM read failed\n"); +	} + +	return status; +} + +/** + *  ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum + *  @hw: pointer to hardware structure + **/ +s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) +{ +	s32 status; +	u16 checksum; + +	/* +	 * Read the first word from the EEPROM. If this times out or fails, do +	 * not continue or we could be in for a very long wait while every +	 * EEPROM read fails +	 */ +	status = hw->eeprom.ops.read(hw, 0, &checksum); + +	if (status == 0) { +		checksum = hw->eeprom.ops.calc_checksum(hw); +		status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, +					      checksum); +	} else { +		hw_dbg(hw, "EEPROM read failed\n"); +	} + +	return status; +} + +/** + *  ixgbe_validate_mac_addr - Validate MAC address + *  @mac_addr: pointer to MAC address. + * + *  Tests a MAC address to ensure it is a valid Individual Address + **/ +s32 ixgbe_validate_mac_addr(u8 *mac_addr) +{ +	s32 status = 0; + +	/* Make sure it is not a multicast address */ +	if (IXGBE_IS_MULTICAST(mac_addr)) +		status = IXGBE_ERR_INVALID_MAC_ADDR; +	/* Not a broadcast address */ +	else if (IXGBE_IS_BROADCAST(mac_addr)) +		status = IXGBE_ERR_INVALID_MAC_ADDR; +	/* Reject the zero address */ +	else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && +	         mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) +		status = IXGBE_ERR_INVALID_MAC_ADDR; + +	return status; +} + +/** + *  ixgbe_set_rar_generic - Set Rx address register + *  @hw: pointer to hardware structure + *  @index: Receive address register to write + *  @addr: Address to put into receive address register + *  @vmdq: VMDq "set" or "pool" index + *  @enable_addr: set flag that address is active + * + *  Puts an ethernet address into a receive address register. + **/ +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, +                          u32 enable_addr) +{ +	u32 rar_low, rar_high; +	u32 rar_entries = hw->mac.num_rar_entries; + +	/* Make sure we are using a valid rar index range */ +	if (index >= rar_entries) { +		hw_dbg(hw, "RAR index %d is out of range.\n", index); +		return IXGBE_ERR_INVALID_ARGUMENT; +	} + +	/* setup VMDq pool selection before this RAR gets enabled */ +	hw->mac.ops.set_vmdq(hw, index, vmdq); + +	/* +	 * HW expects these in little endian so we reverse the byte +	 * order from network order (big endian) to little endian +	 */ +	rar_low = ((u32)addr[0] | +		   ((u32)addr[1] << 8) | +		   ((u32)addr[2] << 16) | +		   ((u32)addr[3] << 24)); +	/* +	 * Some parts put the VMDq setting in the extra RAH bits, +	 * so save everything except the lower 16 bits that hold part +	 * of the address and the address valid bit. +	 */ +	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); +	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); +	rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); + +	if (enable_addr != 0) +		rar_high |= IXGBE_RAH_AV; + +	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); +	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + +	return 0; +} + +/** + *  ixgbe_clear_rar_generic - Remove Rx address register + *  @hw: pointer to hardware structure + *  @index: Receive address register to write + * + *  Clears an ethernet address from a receive address register. + **/ +s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) +{ +	u32 rar_high; +	u32 rar_entries = hw->mac.num_rar_entries; + +	/* Make sure we are using a valid rar index range */ +	if (index >= rar_entries) { +		hw_dbg(hw, "RAR index %d is out of range.\n", index); +		return IXGBE_ERR_INVALID_ARGUMENT; +	} + +	/* +	 * Some parts put the VMDq setting in the extra RAH bits, +	 * so save everything except the lower 16 bits that hold part +	 * of the address and the address valid bit. +	 */ +	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); +	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); + +	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); +	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + +	/* clear VMDq pool/queue selection for this RAR */ +	hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); + +	return 0; +} + +/** + *  ixgbe_init_rx_addrs_generic - Initializes receive address filters. + *  @hw: pointer to hardware structure + * + *  Places the MAC address in receive address register 0 and clears the rest + *  of the receive address registers. Clears the multicast table. Assumes + *  the receiver is in reset when the routine is called. + **/ +s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) +{ +	u32 i; +	u32 rar_entries = hw->mac.num_rar_entries; + +	/* +	 * If the current mac address is valid, assume it is a software override +	 * to the permanent address. +	 * Otherwise, use the permanent address from the eeprom. +	 */ +	if (ixgbe_validate_mac_addr(hw->mac.addr) == +	    IXGBE_ERR_INVALID_MAC_ADDR) { +		/* Get the MAC address from the RAR0 for later reference */ +		hw->mac.ops.get_mac_addr(hw, hw->mac.addr); + +		hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr); +	} else { +		/* Setup the receive address. */ +		hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); +		hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); + +		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); + +		/*  clear VMDq pool/queue selection for RAR 0 */ +		hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); +	} +	hw->addr_ctrl.overflow_promisc = 0; + +	hw->addr_ctrl.rar_used_count = 1; + +	/* Zero out the other receive addresses. */ +	hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1); +	for (i = 1; i < rar_entries; i++) { +		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); +		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); +	} + +	/* Clear the MTA */ +	hw->addr_ctrl.mta_in_use = 0; +	IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); + +	hw_dbg(hw, " Clearing MTA\n"); +	for (i = 0; i < hw->mac.mcft_size; i++) +		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); + +	if (hw->mac.ops.init_uta_tables) +		hw->mac.ops.init_uta_tables(hw); + +	return 0; +} + +/** + *  ixgbe_mta_vector - Determines bit-vector in multicast table to set + *  @hw: pointer to hardware structure + *  @mc_addr: the multicast address + * + *  Extracts the 12 bits, from a multicast address, to determine which + *  bit-vector to set in the multicast table. The hardware uses 12 bits, from + *  incoming rx multicast addresses, to determine the bit-vector to check in + *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + *  by the MO field of the MCSTCTRL. The MO field is set during initialization + *  to mc_filter_type. + **/ +static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) +{ +	u32 vector = 0; + +	switch (hw->mac.mc_filter_type) { +	case 0:   /* use bits [47:36] of the address */ +		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); +		break; +	case 1:   /* use bits [46:35] of the address */ +		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); +		break; +	case 2:   /* use bits [45:34] of the address */ +		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); +		break; +	case 3:   /* use bits [43:32] of the address */ +		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); +		break; +	default:  /* Invalid mc_filter_type */ +		hw_dbg(hw, "MC filter type param set incorrectly\n"); +		break; +	} + +	/* vector can only be 12-bits or boundary will be exceeded */ +	vector &= 0xFFF; +	return vector; +} + +/** + *  ixgbe_set_mta - Set bit-vector in multicast table + *  @hw: pointer to hardware structure + *  @hash_value: Multicast address hash value + * + *  Sets the bit-vector in the multicast table. + **/ +static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) +{ +	u32 vector; +	u32 vector_bit; +	u32 vector_reg; + +	hw->addr_ctrl.mta_in_use++; + +	vector = ixgbe_mta_vector(hw, mc_addr); +	hw_dbg(hw, " bit-vector = 0x%03X\n", vector); + +	/* +	 * The MTA is a register array of 128 32-bit registers. It is treated +	 * like an array of 4096 bits.  We want to set bit +	 * BitArray[vector_value]. So we figure out what register the bit is +	 * in, read it, OR in the new bit, then write back the new value.  The +	 * register is determined by the upper 7 bits of the vector value and +	 * the bit within that register are determined by the lower 5 bits of +	 * the value. +	 */ +	vector_reg = (vector >> 5) & 0x7F; +	vector_bit = vector & 0x1F; +	hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); +} + +/** + *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses + *  @hw: pointer to hardware structure + *  @netdev: pointer to net device structure + * + *  The given list replaces any existing list. Clears the MC addrs from receive + *  address registers and the multicast table. Uses unused receive address + *  registers for the first multicast addresses, and hashes the rest into the + *  multicast table. + **/ +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, +				      struct net_device *netdev) +{ +	struct netdev_hw_addr *ha; +	u32 i; + +	/* +	 * Set the new number of MC addresses that we are being requested to +	 * use. +	 */ +	hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); +	hw->addr_ctrl.mta_in_use = 0; + +	/* Clear mta_shadow */ +	hw_dbg(hw, " Clearing MTA\n"); +	memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + +	/* Update mta shadow */ +	netdev_for_each_mc_addr(ha, netdev) { +		hw_dbg(hw, " Adding the multicast addresses:\n"); +		ixgbe_set_mta(hw, ha->addr); +	} + +	/* Enable mta */ +	for (i = 0; i < hw->mac.mcft_size; i++) +		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, +				      hw->mac.mta_shadow[i]); + +	if (hw->addr_ctrl.mta_in_use > 0) +		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, +		                IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); + +	hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); +	return 0; +} + +/** + *  ixgbe_enable_mc_generic - Enable multicast address in RAR + *  @hw: pointer to hardware structure + * + *  Enables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) +{ +	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; + +	if (a->mta_in_use > 0) +		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | +		                hw->mac.mc_filter_type); + +	return 0; +} + +/** + *  ixgbe_disable_mc_generic - Disable multicast address in RAR + *  @hw: pointer to hardware structure + * + *  Disables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) +{ +	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; + +	if (a->mta_in_use > 0) +		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); + +	return 0; +} + +/** + *  ixgbe_fc_enable_generic - Enable flow control + *  @hw: pointer to hardware structure + *  @packetbuf_num: packet buffer number (0-7) + * + *  Enable flow control according to the current settings. + **/ +s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) +{ +	s32 ret_val = 0; +	u32 mflcn_reg, fccfg_reg; +	u32 reg; +	u32 rx_pba_size; +	u32 fcrtl, fcrth; + +#ifdef CONFIG_DCB +	if (hw->fc.requested_mode == ixgbe_fc_pfc) +		goto out; + +#endif /* CONFIG_DCB */ +	/* Negotiate the fc mode to use */ +	ret_val = ixgbe_fc_autoneg(hw); +	if (ret_val == IXGBE_ERR_FLOW_CONTROL) +		goto out; + +	/* Disable any previous flow control settings */ +	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); +	mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE); + +	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); +	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); + +	/* +	 * The possible values of fc.current_mode are: +	 * 0: Flow control is completely disabled +	 * 1: Rx flow control is enabled (we can receive pause frames, +	 *    but not send pause frames). +	 * 2: Tx flow control is enabled (we can send pause frames but +	 *    we do not support receiving pause frames). +	 * 3: Both Rx and Tx flow control (symmetric) are enabled. +#ifdef CONFIG_DCB +	 * 4: Priority Flow Control is enabled. +#endif +	 * other: Invalid. +	 */ +	switch (hw->fc.current_mode) { +	case ixgbe_fc_none: +		/* +		 * Flow control is disabled by software override or autoneg. +		 * The code below will actually disable it in the HW. +		 */ +		break; +	case ixgbe_fc_rx_pause: +		/* +		 * Rx Flow control is enabled and Tx Flow control is +		 * disabled by software override. Since there really +		 * isn't a way to advertise that we are capable of RX +		 * Pause ONLY, we will advertise that we support both +		 * symmetric and asymmetric Rx PAUSE.  Later, we will +		 * disable the adapter's ability to send PAUSE frames. +		 */ +		mflcn_reg |= IXGBE_MFLCN_RFCE; +		break; +	case ixgbe_fc_tx_pause: +		/* +		 * Tx Flow control is enabled, and Rx Flow control is +		 * disabled by software override. +		 */ +		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; +		break; +	case ixgbe_fc_full: +		/* Flow control (both Rx and Tx) is enabled by SW override. */ +		mflcn_reg |= IXGBE_MFLCN_RFCE; +		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; +		break; +#ifdef CONFIG_DCB +	case ixgbe_fc_pfc: +		goto out; +		break; +#endif /* CONFIG_DCB */ +	default: +		hw_dbg(hw, "Flow control param set incorrectly\n"); +		ret_val = IXGBE_ERR_CONFIG; +		goto out; +		break; +	} + +	/* Set 802.3x based flow control settings. */ +	mflcn_reg |= IXGBE_MFLCN_DPF; +	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); +	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); + +	rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)); +	rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; + +	fcrth = (rx_pba_size - hw->fc.high_water) << 10; +	fcrtl = (rx_pba_size - hw->fc.low_water) << 10; + +	if (hw->fc.current_mode & ixgbe_fc_tx_pause) { +		fcrth |= IXGBE_FCRTH_FCEN; +		if (hw->fc.send_xon) +			fcrtl |= IXGBE_FCRTL_XONE; +	} + +	IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth); +	IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl); + +	/* Configure pause time (2 TCs per register) */ +	reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); +	if ((packetbuf_num & 1) == 0) +		reg = (reg & 0xFFFF0000) | hw->fc.pause_time; +	else +		reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16); +	IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg); + +	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); + +out: +	return ret_val; +} + +/** + *  ixgbe_fc_autoneg - Configure flow control + *  @hw: pointer to hardware structure + * + *  Compares our advertised flow control capabilities to those advertised by + *  our link partner, and determines the proper flow control mode to use. + **/ +s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw) +{ +	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; +	ixgbe_link_speed speed; +	bool link_up; + +	if (hw->fc.disable_fc_autoneg) +		goto out; + +	/* +	 * AN should have completed when the cable was plugged in. +	 * Look for reasons to bail out.  Bail out if: +	 * - FC autoneg is disabled, or if +	 * - link is not up. +	 * +	 * Since we're being called from an LSC, link is already known to be up. +	 * So use link_up_wait_to_complete=false. +	 */ +	hw->mac.ops.check_link(hw, &speed, &link_up, false); +	if (!link_up) { +		ret_val = IXGBE_ERR_FLOW_CONTROL; +		goto out; +	} + +	switch (hw->phy.media_type) { +	/* Autoneg flow control on fiber adapters */ +	case ixgbe_media_type_fiber: +		if (speed == IXGBE_LINK_SPEED_1GB_FULL) +			ret_val = ixgbe_fc_autoneg_fiber(hw); +		break; + +	/* Autoneg flow control on backplane adapters */ +	case ixgbe_media_type_backplane: +		ret_val = ixgbe_fc_autoneg_backplane(hw); +		break; + +	/* Autoneg flow control on copper adapters */ +	case ixgbe_media_type_copper: +		if (ixgbe_device_supports_autoneg_fc(hw) == 0) +			ret_val = ixgbe_fc_autoneg_copper(hw); +		break; + +	default: +		break; +	} + +out: +	if (ret_val == 0) { +		hw->fc.fc_was_autonegged = true; +	} else { +		hw->fc.fc_was_autonegged = false; +		hw->fc.current_mode = hw->fc.requested_mode; +	} +	return ret_val; +} + +/** + *  ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber + *  @hw: pointer to hardware structure + * + *  Enable flow control according on 1 gig fiber. + **/ +static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) +{ +	u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; +	s32 ret_val; + +	/* +	 * On multispeed fiber at 1g, bail out if +	 * - link is up but AN did not complete, or if +	 * - link is up and AN completed but timed out +	 */ + +	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); +	if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || +	    ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { +		ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; +		goto out; +	} + +	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); +	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); + +	ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg, +			       pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, +			       IXGBE_PCS1GANA_ASM_PAUSE, +			       IXGBE_PCS1GANA_SYM_PAUSE, +			       IXGBE_PCS1GANA_ASM_PAUSE); + +out: +	return ret_val; +} + +/** + *  ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 + *  @hw: pointer to hardware structure + * + *  Enable flow control according to IEEE clause 37. + **/ +static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) +{ +	u32 links2, anlp1_reg, autoc_reg, links; +	s32 ret_val; + +	/* +	 * On backplane, bail out if +	 * - backplane autoneg was not completed, or if +	 * - we are 82599 and link partner is not AN enabled +	 */ +	links = IXGBE_READ_REG(hw, IXGBE_LINKS); +	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { +		hw->fc.fc_was_autonegged = false; +		hw->fc.current_mode = hw->fc.requested_mode; +		ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; +		goto out; +	} + +	if (hw->mac.type == ixgbe_mac_82599EB) { +		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); +		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { +			hw->fc.fc_was_autonegged = false; +			hw->fc.current_mode = hw->fc.requested_mode; +			ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; +			goto out; +		} +	} +	/* +	 * Read the 10g AN autoc and LP ability registers and resolve +	 * local flow control settings accordingly +	 */ +	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); +	anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); + +	ret_val = ixgbe_negotiate_fc(hw, autoc_reg, +		anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, +		IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); + +out: +	return ret_val; +} + +/** + *  ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 + *  @hw: pointer to hardware structure + * + *  Enable flow control according to IEEE clause 37. + **/ +static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) +{ +	u16 technology_ability_reg = 0; +	u16 lp_technology_ability_reg = 0; + +	hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, +			     MDIO_MMD_AN, +			     &technology_ability_reg); +	hw->phy.ops.read_reg(hw, MDIO_AN_LPA, +			     MDIO_MMD_AN, +			     &lp_technology_ability_reg); + +	return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, +				  (u32)lp_technology_ability_reg, +				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, +				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); +} + +/** + *  ixgbe_negotiate_fc - Negotiate flow control + *  @hw: pointer to hardware structure + *  @adv_reg: flow control advertised settings + *  @lp_reg: link partner's flow control settings + *  @adv_sym: symmetric pause bit in advertisement + *  @adv_asm: asymmetric pause bit in advertisement + *  @lp_sym: symmetric pause bit in link partner advertisement + *  @lp_asm: asymmetric pause bit in link partner advertisement + * + *  Find the intersection between advertised settings and link partner's + *  advertised settings + **/ +static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, +			      u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) +{ +	if ((!(adv_reg)) ||  (!(lp_reg))) +		return IXGBE_ERR_FC_NOT_NEGOTIATED; + +	if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { +		/* +		 * Now we need to check if the user selected Rx ONLY +		 * of pause frames.  In this case, we had to advertise +		 * FULL flow control because we could not advertise RX +		 * ONLY. Hence, we must now check to see if we need to +		 * turn OFF the TRANSMISSION of PAUSE frames. +		 */ +		if (hw->fc.requested_mode == ixgbe_fc_full) { +			hw->fc.current_mode = ixgbe_fc_full; +			hw_dbg(hw, "Flow Control = FULL.\n"); +		} else { +			hw->fc.current_mode = ixgbe_fc_rx_pause; +			hw_dbg(hw, "Flow Control=RX PAUSE frames only\n"); +		} +	} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && +		   (lp_reg & lp_sym) && (lp_reg & lp_asm)) { +		hw->fc.current_mode = ixgbe_fc_tx_pause; +		hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); +	} else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && +		   !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { +		hw->fc.current_mode = ixgbe_fc_rx_pause; +		hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); +	} else { +		hw->fc.current_mode = ixgbe_fc_none; +		hw_dbg(hw, "Flow Control = NONE.\n"); +	} +	return 0; +} + +/** + *  ixgbe_setup_fc - Set up flow control + *  @hw: pointer to hardware structure + * + *  Called at init time to set up flow control. + **/ +static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) +{ +	s32 ret_val = 0; +	u32 reg = 0, reg_bp = 0; +	u16 reg_cu = 0; + +#ifdef CONFIG_DCB +	if (hw->fc.requested_mode == ixgbe_fc_pfc) { +		hw->fc.current_mode = hw->fc.requested_mode; +		goto out; +	} + +#endif /* CONFIG_DCB */ +	/* Validate the packetbuf configuration */ +	if (packetbuf_num < 0 || packetbuf_num > 7) { +		hw_dbg(hw, "Invalid packet buffer number [%d], expected range " +		       "is 0-7\n", packetbuf_num); +		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; +		goto out; +	} + +	/* +	 * Validate the water mark configuration.  Zero water marks are invalid +	 * because it causes the controller to just blast out fc packets. +	 */ +	if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) { +		hw_dbg(hw, "Invalid water mark configuration\n"); +		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; +		goto out; +	} + +	/* +	 * Validate the requested mode.  Strict IEEE mode does not allow +	 * ixgbe_fc_rx_pause because it will cause us to fail at UNH. +	 */ +	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { +		hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict " +		       "IEEE mode\n"); +		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; +		goto out; +	} + +	/* +	 * 10gig parts do not have a word in the EEPROM to determine the +	 * default flow control setting, so we explicitly set it to full. +	 */ +	if (hw->fc.requested_mode == ixgbe_fc_default) +		hw->fc.requested_mode = ixgbe_fc_full; + +	/* +	 * Set up the 1G and 10G flow control advertisement registers so the +	 * HW will be able to do fc autoneg once the cable is plugged in.  If +	 * we link at 10G, the 1G advertisement is harmless and vice versa. +	 */ + +	switch (hw->phy.media_type) { +	case ixgbe_media_type_fiber: +	case ixgbe_media_type_backplane: +		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); +		reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC); +		break; + +	case ixgbe_media_type_copper: +		hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, +					MDIO_MMD_AN, ®_cu); +		break; + +	default: +		; +	} + +	/* +	 * The possible values of fc.requested_mode are: +	 * 0: Flow control is completely disabled +	 * 1: Rx flow control is enabled (we can receive pause frames, +	 *    but not send pause frames). +	 * 2: Tx flow control is enabled (we can send pause frames but +	 *    we do not support receiving pause frames). +	 * 3: Both Rx and Tx flow control (symmetric) are enabled. +#ifdef CONFIG_DCB +	 * 4: Priority Flow Control is enabled. +#endif +	 * other: Invalid. +	 */ +	switch (hw->fc.requested_mode) { +	case ixgbe_fc_none: +		/* Flow control completely disabled by software override. */ +		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); +		if (hw->phy.media_type == ixgbe_media_type_backplane) +			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | +				    IXGBE_AUTOC_ASM_PAUSE); +		else if (hw->phy.media_type == ixgbe_media_type_copper) +			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); +		break; +	case ixgbe_fc_rx_pause: +		/* +		 * Rx Flow control is enabled and Tx Flow control is +		 * disabled by software override. Since there really +		 * isn't a way to advertise that we are capable of RX +		 * Pause ONLY, we will advertise that we support both +		 * symmetric and asymmetric Rx PAUSE.  Later, we will +		 * disable the adapter's ability to send PAUSE frames. +		 */ +		reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); +		if (hw->phy.media_type == ixgbe_media_type_backplane) +			reg_bp |= (IXGBE_AUTOC_SYM_PAUSE | +				   IXGBE_AUTOC_ASM_PAUSE); +		else if (hw->phy.media_type == ixgbe_media_type_copper) +			reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); +		break; +	case ixgbe_fc_tx_pause: +		/* +		 * Tx Flow control is enabled, and Rx Flow control is +		 * disabled by software override. +		 */ +		reg |= (IXGBE_PCS1GANA_ASM_PAUSE); +		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE); +		if (hw->phy.media_type == ixgbe_media_type_backplane) { +			reg_bp |= (IXGBE_AUTOC_ASM_PAUSE); +			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE); +		} else if (hw->phy.media_type == ixgbe_media_type_copper) { +			reg_cu |= (IXGBE_TAF_ASM_PAUSE); +			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE); +		} +		break; +	case ixgbe_fc_full: +		/* Flow control (both Rx and Tx) is enabled by SW override. */ +		reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); +		if (hw->phy.media_type == ixgbe_media_type_backplane) +			reg_bp |= (IXGBE_AUTOC_SYM_PAUSE | +				   IXGBE_AUTOC_ASM_PAUSE); +		else if (hw->phy.media_type == ixgbe_media_type_copper) +			reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); +		break; +#ifdef CONFIG_DCB +	case ixgbe_fc_pfc: +		goto out; +		break; +#endif /* CONFIG_DCB */ +	default: +		hw_dbg(hw, "Flow control param set incorrectly\n"); +		ret_val = IXGBE_ERR_CONFIG; +		goto out; +		break; +	} + +	if (hw->mac.type != ixgbe_mac_X540) { +		/* +		 * Enable auto-negotiation between the MAC & PHY; +		 * the MAC will advertise clause 37 flow control. +		 */ +		IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); +		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); + +		/* Disable AN timeout */ +		if (hw->fc.strict_ieee) +			reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; + +		IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); +		hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); +	} + +	/* +	 * AUTOC restart handles negotiation of 1G and 10G on backplane +	 * and copper. There is no need to set the PCS1GCTL register. +	 * +	 */ +	if (hw->phy.media_type == ixgbe_media_type_backplane) { +		reg_bp |= IXGBE_AUTOC_AN_RESTART; +		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp); +	} else if ((hw->phy.media_type == ixgbe_media_type_copper) && +		    (ixgbe_device_supports_autoneg_fc(hw) == 0)) { +		hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, +				      MDIO_MMD_AN, reg_cu); +	} + +	hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); +out: +	return ret_val; +} + +/** + *  ixgbe_disable_pcie_master - Disable PCI-express master access + *  @hw: pointer to hardware structure + * + *  Disables PCI-Express master access and verifies there are no pending + *  requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable + *  bit hasn't caused the master requests to be disabled, else 0 + *  is returned signifying master requests disabled. + **/ +s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) +{ +	struct ixgbe_adapter *adapter = hw->back; +	u32 i; +	u32 reg_val; +	u32 number_of_queues; +	s32 status = 0; +	u16 dev_status = 0; + +	/* Just jump out if bus mastering is already disabled */ +	if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) +		goto out; + +	/* Disable the receive unit by stopping each queue */ +	number_of_queues = hw->mac.max_rx_queues; +	for (i = 0; i < number_of_queues; i++) { +		reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); +		if (reg_val & IXGBE_RXDCTL_ENABLE) { +			reg_val &= ~IXGBE_RXDCTL_ENABLE; +			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); +		} +	} + +	reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL); +	reg_val |= IXGBE_CTRL_GIO_DIS; +	IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val); + +	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { +		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) +			goto check_device_status; +		udelay(100); +	} + +	hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n"); +	status = IXGBE_ERR_MASTER_REQUESTS_PENDING; + +	/* +	 * Before proceeding, make sure that the PCIe block does not have +	 * transactions pending. +	 */ +check_device_status: +	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { +		pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS, +							 &dev_status); +		if (!(dev_status & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) +			break; +		udelay(100); +	} + +	if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT) +		hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n"); +	else +		goto out; + +	/* +	 * Two consecutive resets are required via CTRL.RST per datasheet +	 * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine +	 * of this need.  The first reset prevents new master requests from +	 * being issued by our device.  We then must wait 1usec for any +	 * remaining completions from the PCIe bus to trickle in, and then reset +	 * again to clear out any effects they may have had on our device. +	 */ +	 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + +out: +	return status; +} + + +/** + *  ixgbe_acquire_swfw_sync - Acquire SWFW semaphore + *  @hw: pointer to hardware structure + *  @mask: Mask to specify which semaphore to acquire + * + *  Acquires the SWFW semaphore through the GSSR register for the specified + *  function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) +{ +	u32 gssr; +	u32 swmask = mask; +	u32 fwmask = mask << 5; +	s32 timeout = 200; + +	while (timeout) { +		/* +		 * SW EEPROM semaphore bit is used for access to all +		 * SW_FW_SYNC/GSSR bits (not just EEPROM) +		 */ +		if (ixgbe_get_eeprom_semaphore(hw)) +			return IXGBE_ERR_SWFW_SYNC; + +		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); +		if (!(gssr & (fwmask | swmask))) +			break; + +		/* +		 * Firmware currently using resource (fwmask) or other software +		 * thread currently using resource (swmask) +		 */ +		ixgbe_release_eeprom_semaphore(hw); +		usleep_range(5000, 10000); +		timeout--; +	} + +	if (!timeout) { +		hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n"); +		return IXGBE_ERR_SWFW_SYNC; +	} + +	gssr |= swmask; +	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); + +	ixgbe_release_eeprom_semaphore(hw); +	return 0; +} + +/** + *  ixgbe_release_swfw_sync - Release SWFW semaphore + *  @hw: pointer to hardware structure + *  @mask: Mask to specify which semaphore to release + * + *  Releases the SWFW semaphore through the GSSR register for the specified + *  function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) +{ +	u32 gssr; +	u32 swmask = mask; + +	ixgbe_get_eeprom_semaphore(hw); + +	gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); +	gssr &= ~swmask; +	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); + +	ixgbe_release_eeprom_semaphore(hw); +} + +/** + *  ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit + *  @hw: pointer to hardware structure + *  @regval: register value to write to RXCTRL + * + *  Enables the Rx DMA unit + **/ +s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) +{ +	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); + +	return 0; +} + +/** + *  ixgbe_blink_led_start_generic - Blink LED based on index. + *  @hw: pointer to hardware structure + *  @index: led number to blink + **/ +s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) +{ +	ixgbe_link_speed speed = 0; +	bool link_up = 0; +	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); +	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + +	/* +	 * Link must be up to auto-blink the LEDs; +	 * Force it if link is down. +	 */ +	hw->mac.ops.check_link(hw, &speed, &link_up, false); + +	if (!link_up) { +		autoc_reg |= IXGBE_AUTOC_AN_RESTART; +		autoc_reg |= IXGBE_AUTOC_FLU; +		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); +		IXGBE_WRITE_FLUSH(hw); +		usleep_range(10000, 20000); +	} + +	led_reg &= ~IXGBE_LED_MODE_MASK(index); +	led_reg |= IXGBE_LED_BLINK(index); +	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); +	IXGBE_WRITE_FLUSH(hw); + +	return 0; +} + +/** + *  ixgbe_blink_led_stop_generic - Stop blinking LED based on index. + *  @hw: pointer to hardware structure + *  @index: led number to stop blinking + **/ +s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) +{ +	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); +	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + +	autoc_reg &= ~IXGBE_AUTOC_FLU; +	autoc_reg |= IXGBE_AUTOC_AN_RESTART; +	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + +	led_reg &= ~IXGBE_LED_MODE_MASK(index); +	led_reg &= ~IXGBE_LED_BLINK(index); +	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); +	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); +	IXGBE_WRITE_FLUSH(hw); + +	return 0; +} + +/** + *  ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM + *  @hw: pointer to hardware structure + *  @san_mac_offset: SAN MAC address offset + * + *  This function will read the EEPROM location for the SAN MAC address + *  pointer, and returns the value at that location.  This is used in both + *  get and set mac_addr routines. + **/ +static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, +                                        u16 *san_mac_offset) +{ +	/* +	 * First read the EEPROM pointer to see if the MAC addresses are +	 * available. +	 */ +	hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset); + +	return 0; +} + +/** + *  ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM + *  @hw: pointer to hardware structure + *  @san_mac_addr: SAN MAC address + * + *  Reads the SAN MAC address from the EEPROM, if it's available.  This is + *  per-port, so set_lan_id() must be called before reading the addresses. + *  set_lan_id() is called by identify_sfp(), but this cannot be relied + *  upon for non-SFP connections, so we must call it here. + **/ +s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) +{ +	u16 san_mac_data, san_mac_offset; +	u8 i; + +	/* +	 * First read the EEPROM pointer to see if the MAC addresses are +	 * available.  If they're not, no point in calling set_lan_id() here. +	 */ +	ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); + +	if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) { +		/* +		 * No addresses available in this EEPROM.  It's not an +		 * error though, so just wipe the local address and return. +		 */ +		for (i = 0; i < 6; i++) +			san_mac_addr[i] = 0xFF; + +		goto san_mac_addr_out; +	} + +	/* make sure we know which port we need to program */ +	hw->mac.ops.set_lan_id(hw); +	/* apply the port offset to the address offset */ +	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : +	                 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); +	for (i = 0; i < 3; i++) { +		hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data); +		san_mac_addr[i * 2] = (u8)(san_mac_data); +		san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); +		san_mac_offset++; +	} + +san_mac_addr_out: +	return 0; +} + +/** + *  ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count + *  @hw: pointer to hardware structure + * + *  Read PCIe configuration space, and get the MSI-X vector count from + *  the capabilities table. + **/ +u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) +{ +	struct ixgbe_adapter *adapter = hw->back; +	u16 msix_count; +	pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS, +	                     &msix_count); +	msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; + +	/* MSI-X count is zero-based in HW, so increment to give proper value */ +	msix_count++; + +	return msix_count; +} + +/** + *  ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address + *  @hw: pointer to hardware struct + *  @rar: receive address register index to disassociate + *  @vmdq: VMDq pool index to remove from the rar + **/ +s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ +	u32 mpsar_lo, mpsar_hi; +	u32 rar_entries = hw->mac.num_rar_entries; + +	/* Make sure we are using a valid rar index range */ +	if (rar >= rar_entries) { +		hw_dbg(hw, "RAR index %d is out of range.\n", rar); +		return IXGBE_ERR_INVALID_ARGUMENT; +	} + +	mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); +	mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); + +	if (!mpsar_lo && !mpsar_hi) +		goto done; + +	if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { +		if (mpsar_lo) { +			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); +			mpsar_lo = 0; +		} +		if (mpsar_hi) { +			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); +			mpsar_hi = 0; +		} +	} else if (vmdq < 32) { +		mpsar_lo &= ~(1 << vmdq); +		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); +	} else { +		mpsar_hi &= ~(1 << (vmdq - 32)); +		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); +	} + +	/* was that the last pool using this rar? */ +	if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) +		hw->mac.ops.clear_rar(hw, rar); +done: +	return 0; +} + +/** + *  ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address + *  @hw: pointer to hardware struct + *  @rar: receive address register index to associate with a VMDq index + *  @vmdq: VMDq pool index + **/ +s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ +	u32 mpsar; +	u32 rar_entries = hw->mac.num_rar_entries; + +	/* Make sure we are using a valid rar index range */ +	if (rar >= rar_entries) { +		hw_dbg(hw, "RAR index %d is out of range.\n", rar); +		return IXGBE_ERR_INVALID_ARGUMENT; +	} + +	if (vmdq < 32) { +		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); +		mpsar |= 1 << vmdq; +		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); +	} else { +		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); +		mpsar |= 1 << (vmdq - 32); +		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); +	} +	return 0; +} + +/** + *  ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array + *  @hw: pointer to hardware structure + **/ +s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) +{ +	int i; + +	for (i = 0; i < 128; i++) +		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); + +	return 0; +} + +/** + *  ixgbe_find_vlvf_slot - find the vlanid or the first empty slot + *  @hw: pointer to hardware structure + *  @vlan: VLAN id to write to VLAN filter + * + *  return the VLVF index where this VLAN id should be placed + * + **/ +static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) +{ +	u32 bits = 0; +	u32 first_empty_slot = 0; +	s32 regindex; + +	/* short cut the special case */ +	if (vlan == 0) +		return 0; + +	/* +	  * Search for the vlan id in the VLVF entries. Save off the first empty +	  * slot found along the way +	  */ +	for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { +		bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); +		if (!bits && !(first_empty_slot)) +			first_empty_slot = regindex; +		else if ((bits & 0x0FFF) == vlan) +			break; +	} + +	/* +	  * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan +	  * in the VLVF. Else use the first empty VLVF register for this +	  * vlan id. +	  */ +	if (regindex >= IXGBE_VLVF_ENTRIES) { +		if (first_empty_slot) +			regindex = first_empty_slot; +		else { +			hw_dbg(hw, "No space in VLVF.\n"); +			regindex = IXGBE_ERR_NO_SPACE; +		} +	} + +	return regindex; +} + +/** + *  ixgbe_set_vfta_generic - Set VLAN filter table + *  @hw: pointer to hardware structure + *  @vlan: VLAN id to write to VLAN filter + *  @vind: VMDq output index that maps queue to VLAN id in VFVFB + *  @vlan_on: boolean flag to turn on/off VLAN in VFVF + * + *  Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, +                           bool vlan_on) +{ +	s32 regindex; +	u32 bitindex; +	u32 vfta; +	u32 bits; +	u32 vt; +	u32 targetbit; +	bool vfta_changed = false; + +	if (vlan > 4095) +		return IXGBE_ERR_PARAM; + +	/* +	 * this is a 2 part operation - first the VFTA, then the +	 * VLVF and VLVFB if VT Mode is set +	 * We don't write the VFTA until we know the VLVF part succeeded. +	 */ + +	/* Part 1 +	 * The VFTA is a bitstring made up of 128 32-bit registers +	 * that enable the particular VLAN id, much like the MTA: +	 *    bits[11-5]: which register +	 *    bits[4-0]:  which bit in the register +	 */ +	regindex = (vlan >> 5) & 0x7F; +	bitindex = vlan & 0x1F; +	targetbit = (1 << bitindex); +	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); + +	if (vlan_on) { +		if (!(vfta & targetbit)) { +			vfta |= targetbit; +			vfta_changed = true; +		} +	} else { +		if ((vfta & targetbit)) { +			vfta &= ~targetbit; +			vfta_changed = true; +		} +	} + +	/* Part 2 +	 * If VT Mode is set +	 *   Either vlan_on +	 *     make sure the vlan is in VLVF +	 *     set the vind bit in the matching VLVFB +	 *   Or !vlan_on +	 *     clear the pool bit and possibly the vind +	 */ +	vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL); +	if (vt & IXGBE_VT_CTL_VT_ENABLE) { +		s32 vlvf_index; + +		vlvf_index = ixgbe_find_vlvf_slot(hw, vlan); +		if (vlvf_index < 0) +			return vlvf_index; + +		if (vlan_on) { +			/* set the pool bit */ +			if (vind < 32) { +				bits = IXGBE_READ_REG(hw, +						IXGBE_VLVFB(vlvf_index*2)); +				bits |= (1 << vind); +				IXGBE_WRITE_REG(hw, +						IXGBE_VLVFB(vlvf_index*2), +						bits); +			} else { +				bits = IXGBE_READ_REG(hw, +						IXGBE_VLVFB((vlvf_index*2)+1)); +				bits |= (1 << (vind-32)); +				IXGBE_WRITE_REG(hw, +						IXGBE_VLVFB((vlvf_index*2)+1), +						bits); +			} +		} else { +			/* clear the pool bit */ +			if (vind < 32) { +				bits = IXGBE_READ_REG(hw, +						IXGBE_VLVFB(vlvf_index*2)); +				bits &= ~(1 << vind); +				IXGBE_WRITE_REG(hw, +						IXGBE_VLVFB(vlvf_index*2), +						bits); +				bits |= IXGBE_READ_REG(hw, +						IXGBE_VLVFB((vlvf_index*2)+1)); +			} else { +				bits = IXGBE_READ_REG(hw, +						IXGBE_VLVFB((vlvf_index*2)+1)); +				bits &= ~(1 << (vind-32)); +				IXGBE_WRITE_REG(hw, +						IXGBE_VLVFB((vlvf_index*2)+1), +						bits); +				bits |= IXGBE_READ_REG(hw, +						IXGBE_VLVFB(vlvf_index*2)); +			} +		} + +		/* +		 * If there are still bits set in the VLVFB registers +		 * for the VLAN ID indicated we need to see if the +		 * caller is requesting that we clear the VFTA entry bit. +		 * If the caller has requested that we clear the VFTA +		 * entry bit but there are still pools/VFs using this VLAN +		 * ID entry then ignore the request.  We're not worried +		 * about the case where we're turning the VFTA VLAN ID +		 * entry bit on, only when requested to turn it off as +		 * there may be multiple pools and/or VFs using the +		 * VLAN ID entry.  In that case we cannot clear the +		 * VFTA bit until all pools/VFs using that VLAN ID have also +		 * been cleared.  This will be indicated by "bits" being +		 * zero. +		 */ +		if (bits) { +			IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), +					(IXGBE_VLVF_VIEN | vlan)); +			if (!vlan_on) { +				/* someone wants to clear the vfta entry +				 * but some pools/VFs are still using it. +				 * Ignore it. */ +				vfta_changed = false; +			} +		} +		else +			IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); +	} + +	if (vfta_changed) +		IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta); + +	return 0; +} + +/** + *  ixgbe_clear_vfta_generic - Clear VLAN filter table + *  @hw: pointer to hardware structure + * + *  Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) +{ +	u32 offset; + +	for (offset = 0; offset < hw->mac.vft_size; offset++) +		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); + +	for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { +		IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); +		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0); +		IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0); +	} + +	return 0; +} + +/** + *  ixgbe_check_mac_link_generic - Determine link and speed status + *  @hw: pointer to hardware structure + *  @speed: pointer to link speed + *  @link_up: true when link is up + *  @link_up_wait_to_complete: bool used to wait for link up or not + * + *  Reads the links register to determine if link is up and the current speed + **/ +s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, +				 bool *link_up, bool link_up_wait_to_complete) +{ +	u32 links_reg, links_orig; +	u32 i; + +	/* clear the old state */ +	links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); + +	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + +	if (links_orig != links_reg) { +		hw_dbg(hw, "LINKS changed from %08X to %08X\n", +		       links_orig, links_reg); +	} + +	if (link_up_wait_to_complete) { +		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { +			if (links_reg & IXGBE_LINKS_UP) { +				*link_up = true; +				break; +			} else { +				*link_up = false; +			} +			msleep(100); +			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); +		} +	} else { +		if (links_reg & IXGBE_LINKS_UP) +			*link_up = true; +		else +			*link_up = false; +	} + +	if ((links_reg & IXGBE_LINKS_SPEED_82599) == +	    IXGBE_LINKS_SPEED_10G_82599) +		*speed = IXGBE_LINK_SPEED_10GB_FULL; +	else if ((links_reg & IXGBE_LINKS_SPEED_82599) == +		 IXGBE_LINKS_SPEED_1G_82599) +		*speed = IXGBE_LINK_SPEED_1GB_FULL; +	else if ((links_reg & IXGBE_LINKS_SPEED_82599) == +		 IXGBE_LINKS_SPEED_100_82599) +		*speed = IXGBE_LINK_SPEED_100_FULL; +	else +		*speed = IXGBE_LINK_SPEED_UNKNOWN; + +	/* if link is down, zero out the current_mode */ +	if (*link_up == false) { +		hw->fc.current_mode = ixgbe_fc_none; +		hw->fc.fc_was_autonegged = false; +	} + +	return 0; +} + +/** + *  ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from + *  the EEPROM + *  @hw: pointer to hardware structure + *  @wwnn_prefix: the alternative WWNN prefix + *  @wwpn_prefix: the alternative WWPN prefix + * + *  This function will read the EEPROM from the alternative SAN MAC address + *  block to check the support for the alternative WWNN/WWPN prefix support. + **/ +s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, +                                        u16 *wwpn_prefix) +{ +	u16 offset, caps; +	u16 alt_san_mac_blk_offset; + +	/* clear output first */ +	*wwnn_prefix = 0xFFFF; +	*wwpn_prefix = 0xFFFF; + +	/* check if alternative SAN MAC is supported */ +	hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR, +	                    &alt_san_mac_blk_offset); + +	if ((alt_san_mac_blk_offset == 0) || +	    (alt_san_mac_blk_offset == 0xFFFF)) +		goto wwn_prefix_out; + +	/* check capability in alternative san mac address block */ +	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; +	hw->eeprom.ops.read(hw, offset, &caps); +	if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) +		goto wwn_prefix_out; + +	/* get the corresponding prefix for WWNN/WWPN */ +	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; +	hw->eeprom.ops.read(hw, offset, wwnn_prefix); + +	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; +	hw->eeprom.ops.read(hw, offset, wwpn_prefix); + +wwn_prefix_out: +	return 0; +} + +/** + *  ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow + *  control + *  @hw: pointer to hardware structure + * + *  There are several phys that do not support autoneg flow control. This + *  function check the device id to see if the associated phy supports + *  autoneg flow control. + **/ +static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) +{ + +	switch (hw->device_id) { +	case IXGBE_DEV_ID_X540T: +		return 0; +	case IXGBE_DEV_ID_82599_T3_LOM: +		return 0; +	default: +		return IXGBE_ERR_FC_NOT_SUPPORTED; +	} +} + +/** + *  ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing + *  @hw: pointer to hardware structure + *  @enable: enable or disable switch for anti-spoofing + *  @pf: Physical Function pool - do not enable anti-spoofing for the PF + * + **/ +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf) +{ +	int j; +	int pf_target_reg = pf >> 3; +	int pf_target_shift = pf % 8; +	u32 pfvfspoof = 0; + +	if (hw->mac.type == ixgbe_mac_82598EB) +		return; + +	if (enable) +		pfvfspoof = IXGBE_SPOOF_MACAS_MASK; + +	/* +	 * PFVFSPOOF register array is size 8 with 8 bits assigned to +	 * MAC anti-spoof enables in each register array element. +	 */ +	for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) +		IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); + +	/* If not enabling anti-spoofing then done */ +	if (!enable) +		return; + +	/* +	 * The PF should be allowed to spoof so that it can support +	 * emulation mode NICs.  Reset the bit assigned to the PF +	 */ +	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg)); +	pfvfspoof ^= (1 << pf_target_shift); +	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof); +} + +/** + *  ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing + *  @hw: pointer to hardware structure + *  @enable: enable or disable switch for VLAN anti-spoofing + *  @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing + * + **/ +void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) +{ +	int vf_target_reg = vf >> 3; +	int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; +	u32 pfvfspoof; + +	if (hw->mac.type == ixgbe_mac_82598EB) +		return; + +	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); +	if (enable) +		pfvfspoof |= (1 << vf_target_shift); +	else +		pfvfspoof &= ~(1 << vf_target_shift); +	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); +} + +/** + *  ixgbe_get_device_caps_generic - Get additional device capabilities + *  @hw: pointer to hardware structure + *  @device_caps: the EEPROM word with the extra device capabilities + * + *  This function will read the EEPROM location for the device capabilities, + *  and return the word through device_caps. + **/ +s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) +{ +	hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); + +	return 0; +} + +/** + * ixgbe_set_rxpba_generic - Initialize RX packet buffer + * @hw: pointer to hardware structure + * @num_pb: number of packet buffers to allocate + * @headroom: reserve n KB of headroom + * @strategy: packet buffer allocation strategy + **/ +void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, +			     int num_pb, +			     u32 headroom, +			     int strategy) +{ +	u32 pbsize = hw->mac.rx_pb_size; +	int i = 0; +	u32 rxpktsize, txpktsize, txpbthresh; + +	/* Reserve headroom */ +	pbsize -= headroom; + +	if (!num_pb) +		num_pb = 1; + +	/* Divide remaining packet buffer space amongst the number +	 * of packet buffers requested using supplied strategy. +	 */ +	switch (strategy) { +	case (PBA_STRATEGY_WEIGHTED): +		/* pba_80_48 strategy weight first half of packet buffer with +		 * 5/8 of the packet buffer space. +		 */ +		rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8)); +		pbsize -= rxpktsize * (num_pb / 2); +		rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; +		for (; i < (num_pb / 2); i++) +			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); +		/* Fall through to configure remaining packet buffers */ +	case (PBA_STRATEGY_EQUAL): +		/* Divide the remaining Rx packet buffer evenly among the TCs */ +		rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; +		for (; i < num_pb; i++) +			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); +		break; +	default: +		break; +	} + +	/* +	 * Setup Tx packet buffer and threshold equally for all TCs +	 * TXPBTHRESH register is set in K so divide by 1024 and subtract +	 * 10 since the largest packet we support is just over 9K. +	 */ +	txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; +	txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; +	for (i = 0; i < num_pb; i++) { +		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); +		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); +	} + +	/* Clear unused TCs, if any, to zero buffer size*/ +	for (; i < IXGBE_MAX_PB; i++) { +		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); +		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); +		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); +	} +} + +/** + *  ixgbe_calculate_checksum - Calculate checksum for buffer + *  @buffer: pointer to EEPROM + *  @length: size of EEPROM to calculate a checksum for + *  Calculates the checksum for some buffer on a specified length.  The + *  checksum calculated is returned. + **/ +static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) +{ +	u32 i; +	u8 sum = 0; + +	if (!buffer) +		return 0; + +	for (i = 0; i < length; i++) +		sum += buffer[i]; + +	return (u8) (0 - sum); +} + +/** + *  ixgbe_host_interface_command - Issue command to manageability block + *  @hw: pointer to the HW structure + *  @buffer: contains the command to write and where the return status will + *           be placed + *  @lenght: lenght of buffer, must be multiple of 4 bytes + * + *  Communicates with the manageability block.  On success return 0 + *  else return IXGBE_ERR_HOST_INTERFACE_COMMAND. + **/ +static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u8 *buffer, +					u32 length) +{ +	u32 hicr, i; +	u32 hdr_size = sizeof(struct ixgbe_hic_hdr); +	u8 buf_len, dword_len; + +	s32 ret_val = 0; + +	if (length == 0 || length & 0x3 || +	    length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { +		hw_dbg(hw, "Buffer length failure.\n"); +		ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; +		goto out; +	} + +	/* Check that the host interface is enabled. */ +	hicr = IXGBE_READ_REG(hw, IXGBE_HICR); +	if ((hicr & IXGBE_HICR_EN) == 0) { +		hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); +		ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; +		goto out; +	} + +	/* Calculate length in DWORDs */ +	dword_len = length >> 2; + +	/* +	 * The device driver writes the relevant command block +	 * into the ram area. +	 */ +	for (i = 0; i < dword_len; i++) +		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, +				      i, *((u32 *)buffer + i)); + +	/* Setting this bit tells the ARC that a new command is pending. */ +	IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); + +	for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) { +		hicr = IXGBE_READ_REG(hw, IXGBE_HICR); +		if (!(hicr & IXGBE_HICR_C)) +			break; +		usleep_range(1000, 2000); +	} + +	/* Check command successful completion. */ +	if (i == IXGBE_HI_COMMAND_TIMEOUT || +	    (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) { +		hw_dbg(hw, "Command has failed with no status valid.\n"); +		ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; +		goto out; +	} + +	/* Calculate length in DWORDs */ +	dword_len = hdr_size >> 2; + +	/* first pull in the header so we know the buffer length */ +	for (i = 0; i < dword_len; i++) +		*((u32 *)buffer + i) = +			IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i); + +	/* If there is any thing in data position pull it in */ +	buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len; +	if (buf_len == 0) +		goto out; + +	if (length < (buf_len + hdr_size)) { +		hw_dbg(hw, "Buffer not large enough for reply message.\n"); +		ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; +		goto out; +	} + +	/* Calculate length in DWORDs, add one for odd lengths */ +	dword_len = (buf_len + 1) >> 2; + +	/* Pull in the rest of the buffer (i is where we left off)*/ +	for (; i < buf_len; i++) +		*((u32 *)buffer + i) = +			IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i); + +out: +	return ret_val; +} + +/** + *  ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware + *  @hw: pointer to the HW structure + *  @maj: driver version major number + *  @min: driver version minor number + *  @build: driver version build number + *  @sub: driver version sub build number + * + *  Sends driver version number to firmware through the manageability + *  block.  On success return 0 + *  else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring + *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, +				 u8 build, u8 sub) +{ +	struct ixgbe_hic_drv_info fw_cmd; +	int i; +	s32 ret_val = 0; + +	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM) != 0) { +		ret_val = IXGBE_ERR_SWFW_SYNC; +		goto out; +	} + +	fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; +	fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; +	fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; +	fw_cmd.port_num = (u8)hw->bus.func; +	fw_cmd.ver_maj = maj; +	fw_cmd.ver_min = min; +	fw_cmd.ver_build = build; +	fw_cmd.ver_sub = sub; +	fw_cmd.hdr.checksum = 0; +	fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, +				(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); +	fw_cmd.pad = 0; +	fw_cmd.pad2 = 0; + +	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { +		ret_val = ixgbe_host_interface_command(hw, (u8 *)&fw_cmd, +						       sizeof(fw_cmd)); +		if (ret_val != 0) +			continue; + +		if (fw_cmd.hdr.cmd_or_resp.ret_status == +		    FW_CEM_RESP_STATUS_SUCCESS) +			ret_val = 0; +		else +			ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; + +		break; +	} + +	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); +out: +	return ret_val; +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h new file mode 100644 index 00000000000..f24fd64a4c4 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -0,0 +1,145 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_COMMON_H_ +#define _IXGBE_COMMON_H_ + +#include "ixgbe_type.h" +#include "ixgbe.h" + +u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); +s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); +s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw); +s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); +s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, +                                  u32 pba_num_size); +s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); +s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); +void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw); +s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); + +s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index); + +s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); +s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, +					       u16 words, u16 *data); +s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); +s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, +				   u16 words, u16 *data); +s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, +				    u16 words, u16 *data); +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, +                                       u16 *data); +s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, +					      u16 words, u16 *data); +u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); +s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, +                                           u16 *checksum_val); +s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); + +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, +                          u32 enable_addr); +s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, +				      struct net_device *netdev); +s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); +s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); +s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packtetbuf_num); +s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw); + +s32 ixgbe_validate_mac_addr(u8 *mac_addr); +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); +s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); +s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); +s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); +s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, +                           u32 vind, bool vlan_on); +s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); +s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, +                                 ixgbe_link_speed *speed, +                                 bool *link_up, bool link_up_wait_to_complete); +s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, +                                 u16 *wwpn_prefix); +s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf); +void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); +s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); +s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, +				 u8 build, u8 ver); + +void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, +			     u32 headroom, int strategy); + +#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) + +#ifndef writeq +#define writeq(val, addr) writel((u32) (val), addr); \ +    writel((u32) (val >> 32), (addr + 4)); +#endif + +#define IXGBE_WRITE_REG64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) + +#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg)) + +#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) (\ +    writel((value), ((a)->hw_addr + (reg) + ((offset) << 2)))) + +#define IXGBE_READ_REG_ARRAY(a, reg, offset) (\ +    readl((a)->hw_addr + (reg) + ((offset) << 2))) + +#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) + +#define hw_dbg(hw, format, arg...) \ +	netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg) +#define e_dev_info(format, arg...) \ +	dev_info(&adapter->pdev->dev, format, ## arg) +#define e_dev_warn(format, arg...) \ +	dev_warn(&adapter->pdev->dev, format, ## arg) +#define e_dev_err(format, arg...) \ +	dev_err(&adapter->pdev->dev, format, ## arg) +#define e_dev_notice(format, arg...) \ +	dev_notice(&adapter->pdev->dev, format, ## arg) +#define e_info(msglvl, format, arg...) \ +	netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_err(msglvl, format, arg...) \ +	netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ +	netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_crit(msglvl, format, arg...) \ +	netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) +#endif /* IXGBE_COMMON */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c new file mode 100644 index 00000000000..9d88c31487b --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -0,0 +1,320 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  Linux NICS <linux.nics@intel.com> +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +#include "ixgbe.h" +#include "ixgbe_type.h" +#include "ixgbe_dcb.h" +#include "ixgbe_dcb_82598.h" +#include "ixgbe_dcb_82599.h" + +/** + * ixgbe_ieee_credits - This calculates the ieee traffic class + * credits from the configured bandwidth percentages. Credits + * are the smallest unit programmable into the underlying + * hardware. The IEEE 802.1Qaz specification do not use bandwidth + * groups so this is much simplified from the CEE case. + */ +s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame) +{ +	int min_percent = 100; +	int min_credit, multiplier; +	int i; + +	min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / +			DCB_CREDIT_QUANTUM; + +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { +		if (bw[i] < min_percent && bw[i]) +			min_percent = bw[i]; +	} + +	multiplier = (min_credit / min_percent) + 1; + +	/* Find out the hw credits for each TC */ +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { +		int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL); + +		if (val < min_credit) +			val = min_credit; +		refill[i] = val; + +		max[i] = bw[i] ? (bw[i] * MAX_CREDIT)/100 : min_credit; +	} +	return 0; +} + +/** + * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits + * @ixgbe_dcb_config: Struct containing DCB settings. + * @direction: Configuring either Tx or Rx. + * + * This function calculates the credits allocated to each traffic class. + * It should be called only after the rules are checked by + * ixgbe_dcb_check_config(). + */ +s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, +				   struct ixgbe_dcb_config *dcb_config, +				   int max_frame, u8 direction) +{ +	struct tc_bw_alloc *p; +	int min_credit; +	int min_multiplier; +	int min_percent = 100; +	s32 ret_val = 0; +	/* Initialization values default for Tx settings */ +	u32 credit_refill       = 0; +	u32 credit_max          = 0; +	u16 link_percentage     = 0; +	u8  bw_percent          = 0; +	u8  i; + +	if (dcb_config == NULL) { +		ret_val = DCB_ERR_CONFIG; +		goto out; +	} + +	min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / +			DCB_CREDIT_QUANTUM; + +	/* Find smallest link percentage */ +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { +		p = &dcb_config->tc_config[i].path[direction]; +		bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; +		link_percentage = p->bwg_percent; + +		link_percentage = (link_percentage * bw_percent) / 100; + +		if (link_percentage && link_percentage < min_percent) +			min_percent = link_percentage; +	} + +	/* +	 * The ratio between traffic classes will control the bandwidth +	 * percentages seen on the wire. To calculate this ratio we use +	 * a multiplier. It is required that the refill credits must be +	 * larger than the max frame size so here we find the smallest +	 * multiplier that will allow all bandwidth percentages to be +	 * greater than the max frame size. +	 */ +	min_multiplier = (min_credit / min_percent) + 1; + +	/* Find out the link percentage for each TC first */ +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { +		p = &dcb_config->tc_config[i].path[direction]; +		bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; + +		link_percentage = p->bwg_percent; +		/* Must be careful of integer division for very small nums */ +		link_percentage = (link_percentage * bw_percent) / 100; +		if (p->bwg_percent > 0 && link_percentage == 0) +			link_percentage = 1; + +		/* Save link_percentage for reference */ +		p->link_percent = (u8)link_percentage; + +		/* Calculate credit refill ratio using multiplier */ +		credit_refill = min(link_percentage * min_multiplier, +				    MAX_CREDIT_REFILL); +		p->data_credits_refill = (u16)credit_refill; + +		/* Calculate maximum credit for the TC */ +		credit_max = (link_percentage * MAX_CREDIT) / 100; + +		/* +		 * Adjustment based on rule checking, if the percentage +		 * of a TC is too small, the maximum credit may not be +		 * enough to send out a jumbo frame in data plane arbitration. +		 */ +		if (credit_max && (credit_max < min_credit)) +			credit_max = min_credit; + +		if (direction == DCB_TX_CONFIG) { +			/* +			 * Adjustment based on rule checking, if the +			 * percentage of a TC is too small, the maximum +			 * credit may not be enough to send out a TSO +			 * packet in descriptor plane arbitration. +			 */ +			if ((hw->mac.type == ixgbe_mac_82598EB) && +			    credit_max && +			    (credit_max < MINIMUM_CREDIT_FOR_TSO)) +				credit_max = MINIMUM_CREDIT_FOR_TSO; + +			dcb_config->tc_config[i].desc_credits_max = +				(u16)credit_max; +		} + +		p->data_credits_max = (u16)credit_max; +	} + +out: +	return ret_val; +} + +void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en) +{ +	int i; + +	*pfc_en = 0; +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) +		*pfc_en |= (cfg->tc_config[i].dcb_pfc & 0xF) << i; +} + +void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction, +			     u16 *refill) +{ +	struct tc_bw_alloc *p; +	int i; + +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { +		p = &cfg->tc_config[i].path[direction]; +		refill[i] = p->data_credits_refill; +	} +} + +void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max) +{ +	int i; + +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) +		max[i] = cfg->tc_config[i].desc_credits_max; +} + +void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction, +			    u8 *bwgid) +{ +	struct tc_bw_alloc *p; +	int i; + +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { +		p = &cfg->tc_config[i].path[direction]; +		bwgid[i] = p->bwg_id; +	} +} + +void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction, +			    u8 *ptype) +{ +	struct tc_bw_alloc *p; +	int i; + +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { +		p = &cfg->tc_config[i].path[direction]; +		ptype[i] = p->prio_type; +	} +} + +/** + * ixgbe_dcb_hw_config - Config and enable DCB + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure dcb settings and enable dcb mode. + */ +s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, +                        struct ixgbe_dcb_config *dcb_config) +{ +	s32 ret = 0; +	u8 pfc_en; +	u8 ptype[MAX_TRAFFIC_CLASS]; +	u8 bwgid[MAX_TRAFFIC_CLASS]; +	u16 refill[MAX_TRAFFIC_CLASS]; +	u16 max[MAX_TRAFFIC_CLASS]; +	/* CEE does not define a priority to tc mapping so map 1:1 */ +	u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7}; + +	/* Unpack CEE standard containers */ +	ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en); +	ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill); +	ixgbe_dcb_unpack_max(dcb_config, max); +	ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid); +	ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype); + +	switch (hw->mac.type) { +	case ixgbe_mac_82598EB: +		ret = ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max, +						bwgid, ptype); +		break; +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		ret = ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max, +						bwgid, ptype, prio_tc); +		break; +	default: +		break; +	} +	return ret; +} + +/* Helper routines to abstract HW specifics from DCB netlink ops */ +s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en) +{ +	int ret = -EINVAL; + +	switch (hw->mac.type) { +	case ixgbe_mac_82598EB: +		ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en); +		break; +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en); +		break; +	default: +		break; +	} +	return ret; +} + +s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, +			    u16 *refill, u16 *max, u8 *bwg_id, +			    u8 *prio_type, u8 *prio_tc) +{ +	switch (hw->mac.type) { +	case ixgbe_mac_82598EB: +		ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, +							prio_type); +		ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, +							     bwg_id, prio_type); +		ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, +							     bwg_id, prio_type); +		break; +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, +						  bwg_id, prio_type, prio_tc); +		ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, +						       bwg_id, prio_type); +		ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, +						       prio_type, prio_tc); +		break; +	default: +		break; +	} +	return 0; +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h new file mode 100644 index 00000000000..e85826ae032 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h @@ -0,0 +1,167 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  Linux NICS <linux.nics@intel.com> +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _DCB_CONFIG_H_ +#define _DCB_CONFIG_H_ + +#include "ixgbe_type.h" + +/* DCB data structures */ + +#define IXGBE_MAX_PACKET_BUFFERS 8 +#define MAX_USER_PRIORITY        8 +#define MAX_TRAFFIC_CLASS        8 +#define MAX_BW_GROUP             8 +#define BW_PERCENT               100 + +#define DCB_TX_CONFIG            0 +#define DCB_RX_CONFIG            1 + +/* DCB error Codes */ +#define DCB_SUCCESS              0 +#define DCB_ERR_CONFIG           -1 +#define DCB_ERR_PARAM            -2 + +/* Transmit and receive Errors */ +/* Error in bandwidth group allocation */ +#define DCB_ERR_BW_GROUP        -3 +/* Error in traffic class bandwidth allocation */ +#define DCB_ERR_TC_BW           -4 +/* Traffic class has both link strict and group strict enabled */ +#define DCB_ERR_LS_GS           -5 +/* Link strict traffic class has non zero bandwidth */ +#define DCB_ERR_LS_BW_NONZERO   -6 +/* Link strict bandwidth group has non zero bandwidth */ +#define DCB_ERR_LS_BWG_NONZERO  -7 +/*  Traffic class has zero bandwidth */ +#define DCB_ERR_TC_BW_ZERO      -8 + +#define DCB_NOT_IMPLEMENTED      0x7FFFFFFF + +struct dcb_pfc_tc_debug { +	u8  tc; +	u8  pause_status; +	u64 pause_quanta; +}; + +enum strict_prio_type { +	prio_none = 0, +	prio_group, +	prio_link +}; + +/* DCB capability definitions */ +#define IXGBE_DCB_PG_SUPPORT        0x00000001 +#define IXGBE_DCB_PFC_SUPPORT       0x00000002 +#define IXGBE_DCB_BCN_SUPPORT       0x00000004 +#define IXGBE_DCB_UP2TC_SUPPORT     0x00000008 +#define IXGBE_DCB_GSP_SUPPORT       0x00000010 + +#define IXGBE_DCB_8_TC_SUPPORT      0x80 + +struct dcb_support { +	/* DCB capabilities */ +	u32 capabilities; + +	/* Each bit represents a number of TCs configurable in the hw. +	 * If 8 traffic classes can be configured, the value is 0x80. +	 */ +	u8  traffic_classes; +	u8  pfc_traffic_classes; +}; + +/* Traffic class bandwidth allocation per direction */ +struct tc_bw_alloc { +	u8 bwg_id;		  /* Bandwidth Group (BWG) ID */ +	u8 bwg_percent;		  /* % of BWG's bandwidth */ +	u8 link_percent;	  /* % of link bandwidth */ +	u8 up_to_tc_bitmap;	  /* User Priority to Traffic Class mapping */ +	u16 data_credits_refill;  /* Credit refill amount in 64B granularity */ +	u16 data_credits_max;	  /* Max credits for a configured packet buffer +				   * in 64B granularity.*/ +	enum strict_prio_type prio_type; /* Link or Group Strict Priority */ +}; + +enum dcb_pfc_type { +	pfc_disabled = 0, +	pfc_enabled_full, +	pfc_enabled_tx, +	pfc_enabled_rx +}; + +/* Traffic class configuration */ +struct tc_configuration { +	struct tc_bw_alloc path[2]; /* One each for Tx/Rx */ +	enum dcb_pfc_type  dcb_pfc; /* Class based flow control setting */ + +	u16 desc_credits_max; /* For Tx Descriptor arbitration */ +	u8 tc; /* Traffic class (TC) */ +}; + +struct dcb_num_tcs { +	u8 pg_tcs; +	u8 pfc_tcs; +}; + +struct ixgbe_dcb_config { +	struct dcb_support support; +	struct dcb_num_tcs num_tcs; +	struct tc_configuration tc_config[MAX_TRAFFIC_CLASS]; +	u8     bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */ +	bool   pfc_mode_enable; + +	u32  dcb_cfg_version; /* Not used...OS-specific? */ +	u32  link_speed; /* For bandwidth allocation validation purpose */ +}; + +/* DCB driver APIs */ +void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en); +void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *); +void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *); +void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *); +void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *); + +/* DCB credits calculation */ +s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame); +s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *, +				   struct ixgbe_dcb_config *, int, u8); + +/* DCB hw initialization */ +s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, +			    u8 *bwg_id, u8 *prio_type, u8 *tc_prio); +s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en); +s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); + +/* DCB definitions for credit calculation */ +#define DCB_CREDIT_QUANTUM	64   /* DCB Quantum */ +#define MAX_CREDIT_REFILL       511  /* 0x1FF * 64B = 32704B */ +#define DCB_MAX_TSO_SIZE        (32*1024) /* MAX TSO packet size supported in DCB mode */ +#define MINIMUM_CREDIT_FOR_TSO  (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */ +#define MAX_CREDIT              4095 /* Maximum credit supported: 256KB * 1204 / 64B */ + +#endif /* _DCB_CONFIG_H */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c new file mode 100644 index 00000000000..2288c3cac01 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c @@ -0,0 +1,297 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  Linux NICS <linux.nics@intel.com> +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include "ixgbe_type.h" +#include "ixgbe_dcb.h" +#include "ixgbe_dcb_82598.h" + +/** + * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Rx Data Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, +					u16 *refill, +					u16 *max, +					u8 *prio_type) +{ +	u32    reg           = 0; +	u32    credit_refill = 0; +	u32    credit_max    = 0; +	u8     i             = 0; + +	reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA; +	IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg); + +	reg = IXGBE_READ_REG(hw, IXGBE_RMCS); +	/* Enable Arbiter */ +	reg &= ~IXGBE_RMCS_ARBDIS; +	/* Enable Receive Recycle within the BWG */ +	reg |= IXGBE_RMCS_RRM; +	/* Enable Deficit Fixed Priority arbitration*/ +	reg |= IXGBE_RMCS_DFP; + +	IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); + +	/* Configure traffic class credits and priority */ +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { +		credit_refill = refill[i]; +		credit_max    = max[i]; + +		reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT); + +		if (prio_type[i] == prio_link) +			reg |= IXGBE_RT2CR_LSP; + +		IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg); +	} + +	reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); +	reg |= IXGBE_RDRXCTL_RDMTS_1_2; +	reg |= IXGBE_RDRXCTL_MPBEN; +	reg |= IXGBE_RDRXCTL_MCEN; +	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); + +	reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL); +	/* Make sure there is enough descriptors before arbitration */ +	reg &= ~IXGBE_RXCTRL_DMBYPS; +	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg); + +	return 0; +} + +/** + * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Tx Descriptor Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, +						u16 *refill, +						u16 *max, +						u8 *bwg_id, +						u8 *prio_type) +{ +	u32    reg, max_credits; +	u8     i; + +	reg = IXGBE_READ_REG(hw, IXGBE_DPMCS); + +	/* Enable arbiter */ +	reg &= ~IXGBE_DPMCS_ARBDIS; +	/* Enable DFP and Recycle mode */ +	reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM); +	reg |= IXGBE_DPMCS_TSOEF; +	/* Configure Max TSO packet size 34KB including payload and headers */ +	reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT); + +	IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg); + +	/* Configure traffic class credits and priority */ +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { +		max_credits = max[i]; +		reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT; +		reg |= refill[i]; +		reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT; + +		if (prio_type[i] == prio_group) +			reg |= IXGBE_TDTQ2TCCR_GSP; + +		if (prio_type[i] == prio_link) +			reg |= IXGBE_TDTQ2TCCR_LSP; + +		IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg); +	} + +	return 0; +} + +/** + * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Tx Data Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, +						u16 *refill, +						u16 *max, +						u8 *bwg_id, +						u8 *prio_type) +{ +	u32 reg; +	u8 i; + +	reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS); +	/* Enable Data Plane Arbiter */ +	reg &= ~IXGBE_PDPMCS_ARBDIS; +	/* Enable DFP and Transmit Recycle Mode */ +	reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM); + +	IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg); + +	/* Configure traffic class credits and priority */ +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { +		reg = refill[i]; +		reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT; +		reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT; + +		if (prio_type[i] == prio_group) +			reg |= IXGBE_TDPT2TCCR_GSP; + +		if (prio_type[i] == prio_link) +			reg |= IXGBE_TDPT2TCCR_LSP; + +		IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg); +	} + +	/* Enable Tx packet buffer division */ +	reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL); +	reg |= IXGBE_DTXCTL_ENDBUBD; +	IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg); + +	return 0; +} + +/** + * ixgbe_dcb_config_pfc_82598 - Config priority flow control + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Priority Flow Control for each traffic class. + */ +s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) +{ +	u32 reg, rx_pba_size; +	u8  i; + +	if (pfc_en) { +		/* Enable Transmit Priority Flow Control */ +		reg = IXGBE_READ_REG(hw, IXGBE_RMCS); +		reg &= ~IXGBE_RMCS_TFCE_802_3X; +		/* correct the reporting of our flow control status */ +		reg |= IXGBE_RMCS_TFCE_PRIORITY; +		IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); + +		/* Enable Receive Priority Flow Control */ +		reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); +		reg &= ~IXGBE_FCTRL_RFCE; +		reg |= IXGBE_FCTRL_RPFCE; +		IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); + +		/* Configure pause time */ +		for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++) +			IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800); + +		/* Configure flow control refresh threshold value */ +		IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400); +	} + +	/* +	 * Configure flow control thresholds and enable priority flow control +	 * for each traffic class. +	 */ +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { +		int enabled = pfc_en & (1 << i); +		rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); +		rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; +		reg = (rx_pba_size - hw->fc.low_water) << 10; + +		if (enabled == pfc_enabled_tx || +		    enabled == pfc_enabled_full) +			reg |= IXGBE_FCRTL_XONE; + +		IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg); + +		reg = (rx_pba_size - hw->fc.high_water) << 10; +		if (enabled == pfc_enabled_tx || +		    enabled == pfc_enabled_full) +			reg |= IXGBE_FCRTH_FCEN; + +		IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); +	} + +	return 0; +} + +/** + * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics + * @hw: pointer to hardware structure + * + * Configure queue statistics registers, all queues belonging to same traffic + * class uses a single set of queue statistics counters. + */ +static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) +{ +	u32 reg = 0; +	u8  i   = 0; +	u8  j   = 0; + +	/* Receive Queues stats setting -  8 queues per statistics reg */ +	for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) { +		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i)); +		reg |= ((0x1010101) * j); +		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); +		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1)); +		reg |= ((0x1010101) * j); +		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg); +	} +	/* Transmit Queues stats setting -  4 queues per statistics reg */ +	for (i = 0; i < 8; i++) { +		reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i)); +		reg |= ((0x1010101) * i); +		IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg); +	} + +	return 0; +} + +/** + * ixgbe_dcb_hw_config_82598 - Config and enable DCB + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure dcb settings and enable dcb mode. + */ +s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, +			      u16 *max, u8 *bwg_id, u8 *prio_type) +{ +	ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type); +	ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, +					       bwg_id, prio_type); +	ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, +					       bwg_id, prio_type); +	ixgbe_dcb_config_pfc_82598(hw, pfc_en); +	ixgbe_dcb_config_tc_stats_82598(hw); + +	return 0; +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h new file mode 100644 index 00000000000..2f318935561 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h @@ -0,0 +1,97 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  Linux NICS <linux.nics@intel.com> +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _DCB_82598_CONFIG_H_ +#define _DCB_82598_CONFIG_H_ + +/* DCB register definitions */ + +#define IXGBE_DPMCS_MTSOS_SHIFT 16 +#define IXGBE_DPMCS_TDPAC       0x00000001 /* 0 Round Robin, 1 DFP - Deficit Fixed Priority */ +#define IXGBE_DPMCS_TRM         0x00000010 /* Transmit Recycle Mode */ +#define IXGBE_DPMCS_ARBDIS      0x00000040 /* DCB arbiter disable */ +#define IXGBE_DPMCS_TSOEF       0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */ + +#define IXGBE_RUPPBMR_MQA       0x80000000 /* Enable UP to queue mapping */ + +#define IXGBE_RT2CR_MCL_SHIFT   12 /* Offset to Max Credit Limit setting */ +#define IXGBE_RT2CR_LSP         0x80000000 /* LSP enable bit */ + +#define IXGBE_RDRXCTL_MPBEN     0x00000010 /* DMA config for multiple packet buffers enable */ +#define IXGBE_RDRXCTL_MCEN      0x00000040 /* DMA config for multiple cores (RSS) enable */ + +#define IXGBE_TDTQ2TCCR_MCL_SHIFT   12 +#define IXGBE_TDTQ2TCCR_BWG_SHIFT   9 +#define IXGBE_TDTQ2TCCR_GSP     0x40000000 +#define IXGBE_TDTQ2TCCR_LSP     0x80000000 + +#define IXGBE_TDPT2TCCR_MCL_SHIFT   12 +#define IXGBE_TDPT2TCCR_BWG_SHIFT   9 +#define IXGBE_TDPT2TCCR_GSP     0x40000000 +#define IXGBE_TDPT2TCCR_LSP     0x80000000 + +#define IXGBE_PDPMCS_TPPAC      0x00000020 /* 0 Round Robin, 1 for DFP - Deficit Fixed Priority */ +#define IXGBE_PDPMCS_ARBDIS     0x00000040 /* Arbiter disable */ +#define IXGBE_PDPMCS_TRM        0x00000100 /* Transmit Recycle Mode enable */ + +#define IXGBE_DTXCTL_ENDBUBD    0x00000004 /* Enable DBU buffer division */ + +#define IXGBE_TXPBSIZE_40KB     0x0000A000 /* 40KB Packet Buffer */ +#define IXGBE_RXPBSIZE_48KB     0x0000C000 /* 48KB Packet Buffer */ +#define IXGBE_RXPBSIZE_64KB     0x00010000 /* 64KB Packet Buffer */ +#define IXGBE_RXPBSIZE_80KB     0x00014000 /* 80KB Packet Buffer */ + +#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 + +/* DCB hardware-specific driver APIs */ + +/* DCB PFC functions */ +s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en); + +/* DCB hw initialization */ +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, +					u16 *refill, +					u16 *max, +					u8 *prio_type); + +s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, +						u16 *refill, +						u16 *max, +						u8 *bwg_id, +						u8 *prio_type); + +s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, +						u16 *refill, +						u16 *max, +						u8 *bwg_id, +						u8 *prio_type); + +s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, +			      u16 *max, u8 *bwg_id, u8 *prio_type); + +#endif /* _DCB_82598_CONFIG_H */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c new file mode 100644 index 00000000000..ade98200288 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c @@ -0,0 +1,346 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include "ixgbe_type.h" +#include "ixgbe_dcb.h" +#include "ixgbe_dcb_82599.h" + +/** + * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter + * @hw: pointer to hardware structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @prio_type: priority type indexed by traffic class + * + * Configure Rx Packet Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, +				      u16 *refill, +				      u16 *max, +				      u8 *bwg_id, +				      u8 *prio_type, +				      u8 *prio_tc) +{ +	u32    reg           = 0; +	u32    credit_refill = 0; +	u32    credit_max    = 0; +	u8     i             = 0; + +	/* +	 * Disable the arbiter before changing parameters +	 * (always enable recycle mode; WSP) +	 */ +	reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS; +	IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); + +	/* Map all traffic classes to their UP, 1 to 1 */ +	reg = 0; +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) +		reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT)); +	IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); + +	/* Configure traffic class credits and priority */ +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { +		credit_refill = refill[i]; +		credit_max    = max[i]; +		reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT); + +		reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT; + +		if (prio_type[i] == prio_link) +			reg |= IXGBE_RTRPT4C_LSP; + +		IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg); +	} + +	/* +	 * Configure Rx packet plane (recycle mode; WSP) and +	 * enable arbiter +	 */ +	reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC; +	IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); + +	return 0; +} + +/** + * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter + * @hw: pointer to hardware structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @prio_type: priority type indexed by traffic class + * + * Configure Tx Descriptor Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, +					   u16 *refill, +					   u16 *max, +					   u8 *bwg_id, +					   u8 *prio_type) +{ +	u32    reg, max_credits; +	u8     i; + +	/* Clear the per-Tx queue credits; we use per-TC instead */ +	for (i = 0; i < 128; i++) { +		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); +		IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0); +	} + +	/* Configure traffic class credits and priority */ +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { +		max_credits = max[i]; +		reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT; +		reg |= refill[i]; +		reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT; + +		if (prio_type[i] == prio_group) +			reg |= IXGBE_RTTDT2C_GSP; + +		if (prio_type[i] == prio_link) +			reg |= IXGBE_RTTDT2C_LSP; + +		IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg); +	} + +	/* +	 * Configure Tx descriptor plane (recycle mode; WSP) and +	 * enable arbiter +	 */ +	reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM; +	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + +	return 0; +} + +/** + * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter + * @hw: pointer to hardware structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @prio_type: priority type indexed by traffic class + * + * Configure Tx Packet Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, +					   u16 *refill, +					   u16 *max, +					   u8 *bwg_id, +					   u8 *prio_type, +					   u8 *prio_tc) +{ +	u32 reg; +	u8 i; + +	/* +	 * Disable the arbiter before changing parameters +	 * (always enable recycle mode; SP; arb delay) +	 */ +	reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | +	      (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) | +	      IXGBE_RTTPCS_ARBDIS; +	IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); + +	/* Map all traffic classes to their UP, 1 to 1 */ +	reg = 0; +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) +		reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT)); +	IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg); + +	/* Configure traffic class credits and priority */ +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { +		reg = refill[i]; +		reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT; +		reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT; + +		if (prio_type[i] == prio_group) +			reg |= IXGBE_RTTPT2C_GSP; + +		if (prio_type[i] == prio_link) +			reg |= IXGBE_RTTPT2C_LSP; + +		IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg); +	} + +	/* +	 * Configure Tx packet plane (recycle mode; SP; arb delay) and +	 * enable arbiter +	 */ +	reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | +	      (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT); +	IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); + +	return 0; +} + +/** + * ixgbe_dcb_config_pfc_82599 - Configure priority flow control + * @hw: pointer to hardware structure + * @pfc_en: enabled pfc bitmask + * + * Configure Priority Flow Control (PFC) for each traffic class. + */ +s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en) +{ +	u32 i, reg, rx_pba_size; + +	/* Configure PFC Tx thresholds per TC */ +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { +		int enabled = pfc_en & (1 << i); +		rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); +		rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; + +		reg = (rx_pba_size - hw->fc.low_water) << 10; + +		if (enabled) +			reg |= IXGBE_FCRTL_XONE; +		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg); + +		reg = (rx_pba_size - hw->fc.high_water) << 10; +		if (enabled) +			reg |= IXGBE_FCRTH_FCEN; +		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); +	} + +	if (pfc_en) { +		/* Configure pause time (2 TCs per register) */ +		reg = hw->fc.pause_time | (hw->fc.pause_time << 16); +		for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) +			IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + +		/* Configure flow control refresh threshold value */ +		IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + + +		reg = IXGBE_FCCFG_TFCE_PRIORITY; +		IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg); +		/* +		 * Enable Receive PFC +		 * 82599 will always honor XOFF frames we receive when +		 * we are in PFC mode however X540 only honors enabled +		 * traffic classes. +		 */ +		reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); +		reg &= ~IXGBE_MFLCN_RFCE; +		reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF; + +		if (hw->mac.type == ixgbe_mac_X540) +			reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT; + +		IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); + +	} else { +		for (i = 0; i < MAX_TRAFFIC_CLASS; i++) +			hw->mac.ops.fc_enable(hw, i); +	} + +	return 0; +} + +/** + * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics + * @hw: pointer to hardware structure + * + * Configure queue statistics registers, all queues belonging to same traffic + * class uses a single set of queue statistics counters. + */ +static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw) +{ +	u32 reg = 0; +	u8  i   = 0; + +	/* +	 * Receive Queues stats setting +	 * 32 RQSMR registers, each configuring 4 queues. +	 * Set all 16 queues of each TC to the same stat +	 * with TC 'n' going to stat 'n'. +	 */ +	for (i = 0; i < 32; i++) { +		reg = 0x01010101 * (i / 4); +		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); +	} +	/* +	 * Transmit Queues stats setting +	 * 32 TQSM registers, each controlling 4 queues. +	 * Set all queues of each TC to the same stat +	 * with TC 'n' going to stat 'n'. +	 * Tx queues are allocated non-uniformly to TCs: +	 * 32, 32, 16, 16, 8, 8, 8, 8. +	 */ +	for (i = 0; i < 32; i++) { +		if (i < 8) +			reg = 0x00000000; +		else if (i < 16) +			reg = 0x01010101; +		else if (i < 20) +			reg = 0x02020202; +		else if (i < 24) +			reg = 0x03030303; +		else if (i < 26) +			reg = 0x04040404; +		else if (i < 28) +			reg = 0x05050505; +		else if (i < 30) +			reg = 0x06060606; +		else +			reg = 0x07070707; +		IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); +	} + +	return 0; +} + +/** + * ixgbe_dcb_hw_config_82599 - Configure and enable DCB + * @hw: pointer to hardware structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @prio_type: priority type indexed by traffic class + * @pfc_en: enabled pfc bitmask + * + * Configure dcb settings and enable dcb mode. + */ +s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, +			      u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc) +{ +	ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, +					  prio_type, prio_tc); +	ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, +					       bwg_id, prio_type); +	ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, +					       bwg_id, prio_type, prio_tc); +	ixgbe_dcb_config_pfc_82599(hw, pfc_en); +	ixgbe_dcb_config_tc_stats_82599(hw); + +	return 0; +} + diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h new file mode 100644 index 00000000000..08d1749862a --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h @@ -0,0 +1,123 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _DCB_82599_CONFIG_H_ +#define _DCB_82599_CONFIG_H_ + +/* DCB register definitions */ +#define IXGBE_RTTDCS_TDPAC      0x00000001 /* 0 Round Robin, +                                            * 1 WSP - Weighted Strict Priority +                                            */ +#define IXGBE_RTTDCS_VMPAC      0x00000002 /* 0 Round Robin, +                                            * 1 WRR - Weighted Round Robin +                                            */ +#define IXGBE_RTTDCS_TDRM       0x00000010 /* Transmit Recycle Mode */ +#define IXGBE_RTTDCS_ARBDIS     0x00000040 /* DCB arbiter disable */ +#define IXGBE_RTTDCS_BDPM       0x00400000 /* Bypass Data Pipe - must clear! */ +#define IXGBE_RTTDCS_BPBFSM     0x00800000 /* Bypass PB Free Space - must +                                             * clear! +                                             */ +#define IXGBE_RTTDCS_SPEED_CHG  0x80000000 /* Link speed change */ + +/* Receive UP2TC mapping */ +#define IXGBE_RTRUP2TC_UP_SHIFT 3 +/* Transmit UP2TC mapping */ +#define IXGBE_RTTUP2TC_UP_SHIFT 3 + +#define IXGBE_RTRPT4C_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ +#define IXGBE_RTRPT4C_BWG_SHIFT 9  /* Offset to BWG index */ +#define IXGBE_RTRPT4C_GSP       0x40000000 /* GSP enable bit */ +#define IXGBE_RTRPT4C_LSP       0x80000000 /* LSP enable bit */ + +#define IXGBE_RDRXCTL_MPBEN     0x00000010 /* DMA config for multiple packet +                                            * buffers enable +                                            */ +#define IXGBE_RDRXCTL_MCEN      0x00000040 /* DMA config for multiple cores +                                            * (RSS) enable +                                            */ + +/* RTRPCS Bit Masks */ +#define IXGBE_RTRPCS_RRM        0x00000002 /* Receive Recycle Mode enable */ +/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ +#define IXGBE_RTRPCS_RAC        0x00000004 +#define IXGBE_RTRPCS_ARBDIS     0x00000040 /* Arbitration disable bit */ + +/* RTTDT2C Bit Masks */ +#define IXGBE_RTTDT2C_MCL_SHIFT 12 +#define IXGBE_RTTDT2C_BWG_SHIFT 9 +#define IXGBE_RTTDT2C_GSP       0x40000000 +#define IXGBE_RTTDT2C_LSP       0x80000000 + +#define IXGBE_RTTPT2C_MCL_SHIFT 12 +#define IXGBE_RTTPT2C_BWG_SHIFT 9 +#define IXGBE_RTTPT2C_GSP       0x40000000 +#define IXGBE_RTTPT2C_LSP       0x80000000 + +/* RTTPCS Bit Masks */ +#define IXGBE_RTTPCS_TPPAC      0x00000020 /* 0 Round Robin, +                                            * 1 SP - Strict Priority +                                            */ +#define IXGBE_RTTPCS_ARBDIS     0x00000040 /* Arbiter disable */ +#define IXGBE_RTTPCS_TPRM       0x00000100 /* Transmit Recycle Mode enable */ +#define IXGBE_RTTPCS_ARBD_SHIFT 22 +#define IXGBE_RTTPCS_ARBD_DCB   0x4        /* Arbitration delay in DCB mode */ + +/* SECTXMINIFG DCB */ +#define IXGBE_SECTX_DCB		0x00001F00 /* DCB TX Buffer IFG */ + + +/* DCB hardware-specific driver APIs */ + +/* DCB PFC functions */ +s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en); + +/* DCB hw initialization */ +s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, +					u16 *refill, +					u16 *max, +					u8 *bwg_id, +					u8 *prio_type, +					u8 *prio_tc); + +s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, +						u16 *refill, +						u16 *max, +						u8 *bwg_id, +						u8 *prio_type); + +s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, +						u16 *refill, +						u16 *max, +						u8 *bwg_id, +						u8 *prio_type, +						u8 *prio_tc); + +s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, +			      u16 *max, u8 *bwg_id, u8 *prio_type, +			      u8 *prio_tc); + +#endif /* _DCB_82599_CONFIG_H */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c new file mode 100644 index 00000000000..0ace6ce1d0b --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -0,0 +1,816 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  Linux NICS <linux.nics@intel.com> +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include <linux/dcbnl.h> +#include "ixgbe_dcb_82598.h" +#include "ixgbe_dcb_82599.h" + +/* Callbacks for DCB netlink in the kernel */ +#define BIT_DCB_MODE	0x01 +#define BIT_PFC		0x02 +#define BIT_PG_RX	0x04 +#define BIT_PG_TX	0x08 +#define BIT_APP_UPCHG	0x10 +#define BIT_LINKSPEED   0x80 + +/* Responses for the DCB_C_SET_ALL command */ +#define DCB_HW_CHG_RST  0  /* DCB configuration changed with reset */ +#define DCB_NO_HW_CHG   1  /* DCB configuration did not change */ +#define DCB_HW_CHG      2  /* DCB configuration changed, no reset */ + +int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, +                       struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max) +{ +	struct tc_configuration *src_tc_cfg = NULL; +	struct tc_configuration *dst_tc_cfg = NULL; +	int i; + +	if (!src_dcb_cfg || !dst_dcb_cfg) +		return -EINVAL; + +	for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { +		src_tc_cfg = &src_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0]; +		dst_tc_cfg = &dst_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0]; + +		dst_tc_cfg->path[DCB_TX_CONFIG].prio_type = +				src_tc_cfg->path[DCB_TX_CONFIG].prio_type; + +		dst_tc_cfg->path[DCB_TX_CONFIG].bwg_id = +				src_tc_cfg->path[DCB_TX_CONFIG].bwg_id; + +		dst_tc_cfg->path[DCB_TX_CONFIG].bwg_percent = +				src_tc_cfg->path[DCB_TX_CONFIG].bwg_percent; + +		dst_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap = +				src_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap; + +		dst_tc_cfg->path[DCB_RX_CONFIG].prio_type = +				src_tc_cfg->path[DCB_RX_CONFIG].prio_type; + +		dst_tc_cfg->path[DCB_RX_CONFIG].bwg_id = +				src_tc_cfg->path[DCB_RX_CONFIG].bwg_id; + +		dst_tc_cfg->path[DCB_RX_CONFIG].bwg_percent = +				src_tc_cfg->path[DCB_RX_CONFIG].bwg_percent; + +		dst_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap = +				src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap; +	} + +	for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) { +		dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG] +			[i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage +				[DCB_TX_CONFIG][i-DCB_PG_ATTR_BW_ID_0]; +		dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG] +			[i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage +				[DCB_RX_CONFIG][i-DCB_PG_ATTR_BW_ID_0]; +	} + +	for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) { +		dst_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc = +			src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc; +	} + +	dst_dcb_cfg->pfc_mode_enable = src_dcb_cfg->pfc_mode_enable; + +	return 0; +} + +static u8 ixgbe_dcbnl_get_state(struct net_device *netdev) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED); +} + +static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) +{ +	u8 err = 0; +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	/* verify there is something to do, if not then exit */ +	if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) +		return err; + +	if (state > 0) { +		/* Turn on DCB */ +		if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { +			e_err(drv, "Enable failed, needs MSI-X\n"); +			err = 1; +			goto out; +		} + +		adapter->flags |= IXGBE_FLAG_DCB_ENABLED; + +		switch (adapter->hw.mac.type) { +		case ixgbe_mac_82598EB: +			adapter->last_lfc_mode = adapter->hw.fc.current_mode; +			adapter->hw.fc.requested_mode = ixgbe_fc_none; +			break; +		case ixgbe_mac_82599EB: +		case ixgbe_mac_X540: +			adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; +			break; +		default: +			break; +		} + +		ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS); +	} else { +		/* Turn off DCB */ +		adapter->hw.fc.requested_mode = adapter->last_lfc_mode; +		adapter->temp_dcb_cfg.pfc_mode_enable = false; +		adapter->dcb_cfg.pfc_mode_enable = false; +		adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; +		switch (adapter->hw.mac.type) { +		case ixgbe_mac_82599EB: +		case ixgbe_mac_X540: +			if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) +				adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; +			break; +		default: +			break; +		} +		ixgbe_setup_tc(netdev, 0); +	} + +out: +	return err; +} + +static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, +					 u8 *perm_addr) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	int i, j; + +	memset(perm_addr, 0xff, MAX_ADDR_LEN); + +	for (i = 0; i < netdev->addr_len; i++) +		perm_addr[i] = adapter->hw.mac.perm_addr[i]; + +	switch (adapter->hw.mac.type) { +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		for (j = 0; j < netdev->addr_len; j++, i++) +			perm_addr[i] = adapter->hw.mac.san_addr[j]; +		break; +	default: +		break; +	} +} + +static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, +                                         u8 prio, u8 bwg_id, u8 bw_pct, +                                         u8 up_map) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	if (prio != DCB_ATTR_VALUE_UNDEFINED) +		adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio; +	if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) +		adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id; +	if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) +		adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent = +			bw_pct; +	if (up_map != DCB_ATTR_VALUE_UNDEFINED) +		adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap = +			up_map; + +	if ((adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type != +	     adapter->dcb_cfg.tc_config[tc].path[0].prio_type) || +	    (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id != +	     adapter->dcb_cfg.tc_config[tc].path[0].bwg_id) || +	    (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent != +	     adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) || +	    (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap != +	     adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) +		adapter->dcb_set_bitmap |= BIT_PG_TX; +} + +static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, +                                          u8 bw_pct) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; + +	if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] != +	    adapter->dcb_cfg.bw_percentage[0][bwg_id]) +		adapter->dcb_set_bitmap |= BIT_PG_TX; +} + +static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, +                                         u8 prio, u8 bwg_id, u8 bw_pct, +                                         u8 up_map) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	if (prio != DCB_ATTR_VALUE_UNDEFINED) +		adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio; +	if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) +		adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id; +	if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) +		adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent = +			bw_pct; +	if (up_map != DCB_ATTR_VALUE_UNDEFINED) +		adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap = +			up_map; + +	if ((adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type != +	     adapter->dcb_cfg.tc_config[tc].path[1].prio_type) || +	    (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id != +	     adapter->dcb_cfg.tc_config[tc].path[1].bwg_id) || +	    (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent != +	     adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) || +	    (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap != +	     adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) +		adapter->dcb_set_bitmap |= BIT_PG_RX; +} + +static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, +                                          u8 bw_pct) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; + +	if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] != +	    adapter->dcb_cfg.bw_percentage[1][bwg_id]) +		adapter->dcb_set_bitmap |= BIT_PG_RX; +} + +static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, +                                         u8 *prio, u8 *bwg_id, u8 *bw_pct, +                                         u8 *up_map) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	*prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type; +	*bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id; +	*bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent; +	*up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap; +} + +static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, +                                          u8 *bw_pct) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	*bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id]; +} + +static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, +                                         u8 *prio, u8 *bwg_id, u8 *bw_pct, +                                         u8 *up_map) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	*prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type; +	*bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id; +	*bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent; +	*up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap; +} + +static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, +                                          u8 *bw_pct) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	*bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id]; +} + +static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, +                                    u8 setting) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting; +	if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc != +	    adapter->dcb_cfg.tc_config[priority].dcb_pfc) { +		adapter->dcb_set_bitmap |= BIT_PFC; +		adapter->temp_dcb_cfg.pfc_mode_enable = true; +	} +} + +static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, +                                    u8 *setting) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	*setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc; +} + +static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	int ret; +#ifdef IXGBE_FCOE +	struct dcb_app app = { +			      .selector = DCB_APP_IDTYPE_ETHTYPE, +			      .protocol = ETH_P_FCOE, +			     }; +	u8 up = dcb_getapp(netdev, &app); +#endif + +	ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, +				 MAX_TRAFFIC_CLASS); +	if (ret) +		return DCB_NO_HW_CHG; + +#ifdef IXGBE_FCOE +	if (up && (up != (1 << adapter->fcoe.up))) +		adapter->dcb_set_bitmap |= BIT_APP_UPCHG; + +	/* +	 * Only take down the adapter if an app change occurred. FCoE +	 * may shuffle tx rings in this case and this can not be done +	 * without a reset currently. +	 */ +	if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { +		while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) +			usleep_range(1000, 2000); + +		adapter->fcoe.up = ffs(up) - 1; + +		if (netif_running(netdev)) +			netdev->netdev_ops->ndo_stop(netdev); +		ixgbe_clear_interrupt_scheme(adapter); +	} +#endif + +	if (adapter->dcb_cfg.pfc_mode_enable) { +		switch (adapter->hw.mac.type) { +		case ixgbe_mac_82599EB: +		case ixgbe_mac_X540: +			if (adapter->hw.fc.current_mode != ixgbe_fc_pfc) +				adapter->last_lfc_mode = +				                  adapter->hw.fc.current_mode; +			break; +		default: +			break; +		} +		adapter->hw.fc.requested_mode = ixgbe_fc_pfc; +	} else { +		switch (adapter->hw.mac.type) { +		case ixgbe_mac_82598EB: +			adapter->hw.fc.requested_mode = ixgbe_fc_none; +			break; +		case ixgbe_mac_82599EB: +		case ixgbe_mac_X540: +			adapter->hw.fc.requested_mode = adapter->last_lfc_mode; +			break; +		default: +			break; +		} +	} + +#ifdef IXGBE_FCOE +	if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { +		ixgbe_init_interrupt_scheme(adapter); +		if (netif_running(netdev)) +			netdev->netdev_ops->ndo_open(netdev); +		ret = DCB_HW_CHG_RST; +	} +#endif + +	if (adapter->dcb_set_bitmap & BIT_PFC) { +		u8 pfc_en; +		ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en); +		ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en); +		ret = DCB_HW_CHG; +	} + +	if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) { +		u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS]; +		u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS]; +		/* Priority to TC mapping in CEE case default to 1:1 */ +		u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7}; +		int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + +#ifdef CONFIG_FCOE +		if (adapter->netdev->features & NETIF_F_FCOE_MTU) +			max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif + +		ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg, +					       max_frame, DCB_TX_CONFIG); +		ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg, +					       max_frame, DCB_RX_CONFIG); + +		ixgbe_dcb_unpack_refill(&adapter->dcb_cfg, +					DCB_TX_CONFIG, refill); +		ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max); +		ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg, +				       DCB_TX_CONFIG, bwg_id); +		ixgbe_dcb_unpack_prio(&adapter->dcb_cfg, +				      DCB_TX_CONFIG, prio_type); + +		ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max, +					bwg_id, prio_type, prio_tc); +	} + +	if (adapter->dcb_cfg.pfc_mode_enable) +		adapter->hw.fc.current_mode = ixgbe_fc_pfc; + +	if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) +		clear_bit(__IXGBE_RESETTING, &adapter->state); +	adapter->dcb_set_bitmap = 0x00; +	return ret; +} + +static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	switch (capid) { +	case DCB_CAP_ATTR_PG: +		*cap = true; +		break; +	case DCB_CAP_ATTR_PFC: +		*cap = true; +		break; +	case DCB_CAP_ATTR_UP2TC: +		*cap = false; +		break; +	case DCB_CAP_ATTR_PG_TCS: +		*cap = 0x80; +		break; +	case DCB_CAP_ATTR_PFC_TCS: +		*cap = 0x80; +		break; +	case DCB_CAP_ATTR_GSP: +		*cap = true; +		break; +	case DCB_CAP_ATTR_BCN: +		*cap = false; +		break; +	case DCB_CAP_ATTR_DCBX: +		*cap = adapter->dcbx_cap; +		break; +	default: +		*cap = false; +		break; +	} + +	return 0; +} + +static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	u8 rval = 0; + +	if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { +		switch (tcid) { +		case DCB_NUMTCS_ATTR_PG: +			*num = MAX_TRAFFIC_CLASS; +			break; +		case DCB_NUMTCS_ATTR_PFC: +			*num = MAX_TRAFFIC_CLASS; +			break; +		default: +			rval = -EINVAL; +			break; +		} +	} else { +		rval = -EINVAL; +	} + +	return rval; +} + +static u8 ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) +{ +	return -EINVAL; +} + +static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	return adapter->dcb_cfg.pfc_mode_enable; +} + +static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	adapter->temp_dcb_cfg.pfc_mode_enable = state; +	if (adapter->temp_dcb_cfg.pfc_mode_enable != +		adapter->dcb_cfg.pfc_mode_enable) +		adapter->dcb_set_bitmap |= BIT_PFC; +} + +/** + * ixgbe_dcbnl_getapp - retrieve the DCBX application user priority + * @netdev : the corresponding netdev + * @idtype : identifies the id as ether type or TCP/UDP port number + * @id: id is either ether type or TCP/UDP port number + * + * Returns : on success, returns a non-zero 802.1p user priority bitmap + * otherwise returns 0 as the invalid user priority bitmap to indicate an + * error. + */ +static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct dcb_app app = { +				.selector = idtype, +				.protocol = id, +			     }; + +	if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) +		return 0; + +	return dcb_getapp(netdev, &app); +} + +static int ixgbe_dcbnl_ieee_getets(struct net_device *dev, +				   struct ieee_ets *ets) +{ +	struct ixgbe_adapter *adapter = netdev_priv(dev); +	struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets; + +	/* No IEEE PFC settings available */ +	if (!my_ets) +		return -EINVAL; + +	ets->ets_cap = MAX_TRAFFIC_CLASS; +	ets->cbs = my_ets->cbs; +	memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); +	memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)); +	memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)); +	memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)); +	return 0; +} + +static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, +				   struct ieee_ets *ets) +{ +	struct ixgbe_adapter *adapter = netdev_priv(dev); +	__u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS]; +	__u8 prio_type[IEEE_8021QAZ_MAX_TCS]; +	int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; +	int i, err; +	__u64 *p = (__u64 *) ets->prio_tc; +	/* naively give each TC a bwg to map onto CEE hardware */ +	__u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7}; + +	if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) +		return -EINVAL; + +	if (!adapter->ixgbe_ieee_ets) { +		adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets), +						  GFP_KERNEL); +		if (!adapter->ixgbe_ieee_ets) +			return -ENOMEM; +	} + +	memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets)); + +	/* Map TSA onto CEE prio type */ +	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { +		switch (ets->tc_tsa[i]) { +		case IEEE_8021QAZ_TSA_STRICT: +			prio_type[i] = 2; +			break; +		case IEEE_8021QAZ_TSA_ETS: +			prio_type[i] = 0; +			break; +		default: +			/* Hardware only supports priority strict or +			 * ETS transmission selection algorithms if +			 * we receive some other value from dcbnl +			 * throw an error +			 */ +			return -EINVAL; +		} +	} + +	if (*p) +		ixgbe_dcbnl_set_state(dev, 1); +	else +		ixgbe_dcbnl_set_state(dev, 0); + +	ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame); +	err = ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max, +				      bwg_id, prio_type, ets->prio_tc); +	return err; +} + +static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev, +				   struct ieee_pfc *pfc) +{ +	struct ixgbe_adapter *adapter = netdev_priv(dev); +	struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc; +	int i; + +	/* No IEEE PFC settings available */ +	if (!my_pfc) +		return -EINVAL; + +	pfc->pfc_cap = MAX_TRAFFIC_CLASS; +	pfc->pfc_en = my_pfc->pfc_en; +	pfc->mbc = my_pfc->mbc; +	pfc->delay = my_pfc->delay; + +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { +		pfc->requests[i] = adapter->stats.pxoffrxc[i]; +		pfc->indications[i] = adapter->stats.pxofftxc[i]; +	} + +	return 0; +} + +static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, +				   struct ieee_pfc *pfc) +{ +	struct ixgbe_adapter *adapter = netdev_priv(dev); +	int err; + +	if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) +		return -EINVAL; + +	if (!adapter->ixgbe_ieee_pfc) { +		adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc), +						  GFP_KERNEL); +		if (!adapter->ixgbe_ieee_pfc) +			return -ENOMEM; +	} + +	memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc)); +	err = ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en); +	return err; +} + +#ifdef IXGBE_FCOE +static void ixgbe_dcbnl_devreset(struct net_device *dev) +{ +	struct ixgbe_adapter *adapter = netdev_priv(dev); + +	if (netif_running(dev)) +		dev->netdev_ops->ndo_stop(dev); + +	ixgbe_clear_interrupt_scheme(adapter); +	ixgbe_init_interrupt_scheme(adapter); + +	if (netif_running(dev)) +		dev->netdev_ops->ndo_open(dev); +} +#endif + +static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, +				   struct dcb_app *app) +{ +	struct ixgbe_adapter *adapter = netdev_priv(dev); +	int err = -EINVAL; + +	if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) +		return err; + +	err = dcb_ieee_setapp(dev, app); + +#ifdef IXGBE_FCOE +	if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && +	    app->protocol == ETH_P_FCOE) { +		u8 app_mask = dcb_ieee_getapp_mask(dev, app); + +		if (app_mask & (1 << adapter->fcoe.up)) +			return err; + +		adapter->fcoe.up = app->priority; +		ixgbe_dcbnl_devreset(dev); +	} +#endif +	return 0; +} + +static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, +				   struct dcb_app *app) +{ +	struct ixgbe_adapter *adapter = netdev_priv(dev); +	int err; + +	if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) +		return -EINVAL; + +	err = dcb_ieee_delapp(dev, app); + +#ifdef IXGBE_FCOE +	if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && +	    app->protocol == ETH_P_FCOE) { +		u8 app_mask = dcb_ieee_getapp_mask(dev, app); + +		if (app_mask & (1 << adapter->fcoe.up)) +			return err; + +		adapter->fcoe.up = app_mask ? +				   ffs(app_mask) - 1 : IXGBE_FCOE_DEFTC; +		ixgbe_dcbnl_devreset(dev); +	} +#endif +	return err; +} + +static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev) +{ +	struct ixgbe_adapter *adapter = netdev_priv(dev); +	return adapter->dcbx_cap; +} + +static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) +{ +	struct ixgbe_adapter *adapter = netdev_priv(dev); +	struct ieee_ets ets = {0}; +	struct ieee_pfc pfc = {0}; + +	/* no support for LLD_MANAGED modes or CEE+IEEE */ +	if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || +	    ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) || +	    !(mode & DCB_CAP_DCBX_HOST)) +		return 1; + +	if (mode == adapter->dcbx_cap) +		return 0; + +	adapter->dcbx_cap = mode; + +	/* ETS and PFC defaults */ +	ets.ets_cap = 8; +	pfc.pfc_cap = 8; + +	if (mode & DCB_CAP_DCBX_VER_IEEE) { +		ixgbe_dcbnl_ieee_setets(dev, &ets); +		ixgbe_dcbnl_ieee_setpfc(dev, &pfc); +	} else if (mode & DCB_CAP_DCBX_VER_CEE) { +		adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX); +		ixgbe_dcbnl_set_all(dev); +	} else { +		/* Drop into single TC mode strict priority as this +		 * indicates CEE and IEEE versions are disabled +		 */ +		ixgbe_dcbnl_ieee_setets(dev, &ets); +		ixgbe_dcbnl_ieee_setpfc(dev, &pfc); +		ixgbe_dcbnl_set_state(dev, 0); +	} + +	return 0; +} + +const struct dcbnl_rtnl_ops dcbnl_ops = { +	.ieee_getets	= ixgbe_dcbnl_ieee_getets, +	.ieee_setets	= ixgbe_dcbnl_ieee_setets, +	.ieee_getpfc	= ixgbe_dcbnl_ieee_getpfc, +	.ieee_setpfc	= ixgbe_dcbnl_ieee_setpfc, +	.ieee_setapp	= ixgbe_dcbnl_ieee_setapp, +	.ieee_delapp	= ixgbe_dcbnl_ieee_delapp, +	.getstate	= ixgbe_dcbnl_get_state, +	.setstate	= ixgbe_dcbnl_set_state, +	.getpermhwaddr	= ixgbe_dcbnl_get_perm_hw_addr, +	.setpgtccfgtx	= ixgbe_dcbnl_set_pg_tc_cfg_tx, +	.setpgbwgcfgtx	= ixgbe_dcbnl_set_pg_bwg_cfg_tx, +	.setpgtccfgrx	= ixgbe_dcbnl_set_pg_tc_cfg_rx, +	.setpgbwgcfgrx	= ixgbe_dcbnl_set_pg_bwg_cfg_rx, +	.getpgtccfgtx	= ixgbe_dcbnl_get_pg_tc_cfg_tx, +	.getpgbwgcfgtx	= ixgbe_dcbnl_get_pg_bwg_cfg_tx, +	.getpgtccfgrx	= ixgbe_dcbnl_get_pg_tc_cfg_rx, +	.getpgbwgcfgrx	= ixgbe_dcbnl_get_pg_bwg_cfg_rx, +	.setpfccfg	= ixgbe_dcbnl_set_pfc_cfg, +	.getpfccfg	= ixgbe_dcbnl_get_pfc_cfg, +	.setall		= ixgbe_dcbnl_set_all, +	.getcap		= ixgbe_dcbnl_getcap, +	.getnumtcs	= ixgbe_dcbnl_getnumtcs, +	.setnumtcs	= ixgbe_dcbnl_setnumtcs, +	.getpfcstate	= ixgbe_dcbnl_getpfcstate, +	.setpfcstate	= ixgbe_dcbnl_setpfcstate, +	.getapp		= ixgbe_dcbnl_getapp, +	.getdcbx	= ixgbe_dcbnl_getdcbx, +	.setdcbx	= ixgbe_dcbnl_setdcbx, +}; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c new file mode 100644 index 00000000000..82d4244c6e1 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -0,0 +1,2592 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for ixgbe */ + +#include <linux/interrupt.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/ethtool.h> +#include <linux/vmalloc.h> +#include <linux/uaccess.h> + +#include "ixgbe.h" + + +#define IXGBE_ALL_RAR_ENTRIES 16 + +enum {NETDEV_STATS, IXGBE_STATS}; + +struct ixgbe_stats { +	char stat_string[ETH_GSTRING_LEN]; +	int type; +	int sizeof_stat; +	int stat_offset; +}; + +#define IXGBE_STAT(m)		IXGBE_STATS, \ +				sizeof(((struct ixgbe_adapter *)0)->m), \ +				offsetof(struct ixgbe_adapter, m) +#define IXGBE_NETDEV_STAT(m)	NETDEV_STATS, \ +				sizeof(((struct rtnl_link_stats64 *)0)->m), \ +				offsetof(struct rtnl_link_stats64, m) + +static struct ixgbe_stats ixgbe_gstrings_stats[] = { +	{"rx_packets", IXGBE_NETDEV_STAT(rx_packets)}, +	{"tx_packets", IXGBE_NETDEV_STAT(tx_packets)}, +	{"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)}, +	{"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)}, +	{"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, +	{"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, +	{"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, +	{"tx_bytes_nic", IXGBE_STAT(stats.gotc)}, +	{"lsc_int", IXGBE_STAT(lsc_int)}, +	{"tx_busy", IXGBE_STAT(tx_busy)}, +	{"non_eop_descs", IXGBE_STAT(non_eop_descs)}, +	{"rx_errors", IXGBE_NETDEV_STAT(rx_errors)}, +	{"tx_errors", IXGBE_NETDEV_STAT(tx_errors)}, +	{"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)}, +	{"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)}, +	{"multicast", IXGBE_NETDEV_STAT(multicast)}, +	{"broadcast", IXGBE_STAT(stats.bprc)}, +	{"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, +	{"collisions", IXGBE_NETDEV_STAT(collisions)}, +	{"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)}, +	{"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)}, +	{"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)}, +	{"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)}, +	{"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, +	{"fdir_match", IXGBE_STAT(stats.fdirmatch)}, +	{"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, +	{"fdir_overflow", IXGBE_STAT(fdir_overflow)}, +	{"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)}, +	{"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)}, +	{"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)}, +	{"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)}, +	{"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)}, +	{"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)}, +	{"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, +	{"tx_restart_queue", IXGBE_STAT(restart_queue)}, +	{"rx_long_length_errors", IXGBE_STAT(stats.roc)}, +	{"rx_short_length_errors", IXGBE_STAT(stats.ruc)}, +	{"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)}, +	{"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)}, +	{"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)}, +	{"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)}, +	{"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)}, +	{"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, +	{"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, +	{"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, +	{"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)}, +	{"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)}, +	{"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)}, +	{"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)}, +#ifdef IXGBE_FCOE +	{"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, +	{"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, +	{"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)}, +	{"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)}, +	{"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)}, +	{"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)}, +#endif /* IXGBE_FCOE */ +}; + +#define IXGBE_QUEUE_STATS_LEN \ +	((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \ +	((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \ +	(sizeof(struct ixgbe_queue_stats) / sizeof(u64))) +#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) +#define IXGBE_PB_STATS_LEN ( \ +                 (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \ +                 IXGBE_FLAG_DCB_ENABLED) ? \ +                 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ +                  sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ +                  sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ +                  sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ +                  / sizeof(u64) : 0) +#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ +                         IXGBE_PB_STATS_LEN + \ +                         IXGBE_QUEUE_STATS_LEN) + +static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { +	"Register test  (offline)", "Eeprom test    (offline)", +	"Interrupt test (offline)", "Loopback test  (offline)", +	"Link test   (on/offline)" +}; +#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN + +static int ixgbe_get_settings(struct net_device *netdev, +                              struct ethtool_cmd *ecmd) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_hw *hw = &adapter->hw; +	u32 link_speed = 0; +	bool link_up; + +	ecmd->supported = SUPPORTED_10000baseT_Full; +	ecmd->autoneg = AUTONEG_ENABLE; +	ecmd->transceiver = XCVR_EXTERNAL; +	if ((hw->phy.media_type == ixgbe_media_type_copper) || +	    (hw->phy.multispeed_fiber)) { +		ecmd->supported |= (SUPPORTED_1000baseT_Full | +		                    SUPPORTED_Autoneg); + +		switch (hw->mac.type) { +		case ixgbe_mac_X540: +			ecmd->supported |= SUPPORTED_100baseT_Full; +			break; +		default: +			break; +		} + +		ecmd->advertising = ADVERTISED_Autoneg; +		if (hw->phy.autoneg_advertised) { +			if (hw->phy.autoneg_advertised & +			    IXGBE_LINK_SPEED_100_FULL) +				ecmd->advertising |= ADVERTISED_100baseT_Full; +			if (hw->phy.autoneg_advertised & +			    IXGBE_LINK_SPEED_10GB_FULL) +				ecmd->advertising |= ADVERTISED_10000baseT_Full; +			if (hw->phy.autoneg_advertised & +			    IXGBE_LINK_SPEED_1GB_FULL) +				ecmd->advertising |= ADVERTISED_1000baseT_Full; +		} else { +			/* +			 * Default advertised modes in case +			 * phy.autoneg_advertised isn't set. +			 */ +			ecmd->advertising |= (ADVERTISED_10000baseT_Full | +					      ADVERTISED_1000baseT_Full); +			if (hw->mac.type == ixgbe_mac_X540) +				ecmd->advertising |= ADVERTISED_100baseT_Full; +		} + +		if (hw->phy.media_type == ixgbe_media_type_copper) { +			ecmd->supported |= SUPPORTED_TP; +			ecmd->advertising |= ADVERTISED_TP; +			ecmd->port = PORT_TP; +		} else { +			ecmd->supported |= SUPPORTED_FIBRE; +			ecmd->advertising |= ADVERTISED_FIBRE; +			ecmd->port = PORT_FIBRE; +		} +	} else if (hw->phy.media_type == ixgbe_media_type_backplane) { +		/* Set as FIBRE until SERDES defined in kernel */ +		if (hw->device_id == IXGBE_DEV_ID_82598_BX) { +			ecmd->supported = (SUPPORTED_1000baseT_Full | +					   SUPPORTED_FIBRE); +			ecmd->advertising = (ADVERTISED_1000baseT_Full | +					     ADVERTISED_FIBRE); +			ecmd->port = PORT_FIBRE; +			ecmd->autoneg = AUTONEG_DISABLE; +		} else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) || +			   (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) { +			ecmd->supported |= (SUPPORTED_1000baseT_Full | +					    SUPPORTED_Autoneg | +					    SUPPORTED_FIBRE); +			ecmd->advertising = (ADVERTISED_10000baseT_Full | +					     ADVERTISED_1000baseT_Full | +					     ADVERTISED_Autoneg | +					     ADVERTISED_FIBRE); +			ecmd->port = PORT_FIBRE; +		} else { +			ecmd->supported |= (SUPPORTED_1000baseT_Full | +					    SUPPORTED_FIBRE); +			ecmd->advertising = (ADVERTISED_10000baseT_Full | +					     ADVERTISED_1000baseT_Full | +					     ADVERTISED_FIBRE); +			ecmd->port = PORT_FIBRE; +		} +	} else { +		ecmd->supported |= SUPPORTED_FIBRE; +		ecmd->advertising = (ADVERTISED_10000baseT_Full | +		                     ADVERTISED_FIBRE); +		ecmd->port = PORT_FIBRE; +		ecmd->autoneg = AUTONEG_DISABLE; +	} + +	/* Get PHY type */ +	switch (adapter->hw.phy.type) { +	case ixgbe_phy_tn: +	case ixgbe_phy_aq: +	case ixgbe_phy_cu_unknown: +		/* Copper 10G-BASET */ +		ecmd->port = PORT_TP; +		break; +	case ixgbe_phy_qt: +		ecmd->port = PORT_FIBRE; +		break; +	case ixgbe_phy_nl: +	case ixgbe_phy_sfp_passive_tyco: +	case ixgbe_phy_sfp_passive_unknown: +	case ixgbe_phy_sfp_ftl: +	case ixgbe_phy_sfp_avago: +	case ixgbe_phy_sfp_intel: +	case ixgbe_phy_sfp_unknown: +		switch (adapter->hw.phy.sfp_type) { +		/* SFP+ devices, further checking needed */ +		case ixgbe_sfp_type_da_cu: +		case ixgbe_sfp_type_da_cu_core0: +		case ixgbe_sfp_type_da_cu_core1: +			ecmd->port = PORT_DA; +			break; +		case ixgbe_sfp_type_sr: +		case ixgbe_sfp_type_lr: +		case ixgbe_sfp_type_srlr_core0: +		case ixgbe_sfp_type_srlr_core1: +			ecmd->port = PORT_FIBRE; +			break; +		case ixgbe_sfp_type_not_present: +			ecmd->port = PORT_NONE; +			break; +		case ixgbe_sfp_type_1g_cu_core0: +		case ixgbe_sfp_type_1g_cu_core1: +			ecmd->port = PORT_TP; +			ecmd->supported = SUPPORTED_TP; +			ecmd->advertising = (ADVERTISED_1000baseT_Full | +			                     ADVERTISED_TP); +			break; +		case ixgbe_sfp_type_unknown: +		default: +			ecmd->port = PORT_OTHER; +			break; +		} +		break; +	case ixgbe_phy_xaui: +		ecmd->port = PORT_NONE; +		break; +	case ixgbe_phy_unknown: +	case ixgbe_phy_generic: +	case ixgbe_phy_sfp_unsupported: +	default: +		ecmd->port = PORT_OTHER; +		break; +	} + +	hw->mac.ops.check_link(hw, &link_speed, &link_up, false); +	if (link_up) { +		switch (link_speed) { +		case IXGBE_LINK_SPEED_10GB_FULL: +			ethtool_cmd_speed_set(ecmd, SPEED_10000); +			break; +		case IXGBE_LINK_SPEED_1GB_FULL: +			ethtool_cmd_speed_set(ecmd, SPEED_1000); +			break; +		case IXGBE_LINK_SPEED_100_FULL: +			ethtool_cmd_speed_set(ecmd, SPEED_100); +			break; +		default: +			break; +		} +		ecmd->duplex = DUPLEX_FULL; +	} else { +		ethtool_cmd_speed_set(ecmd, -1); +		ecmd->duplex = -1; +	} + +	return 0; +} + +static int ixgbe_set_settings(struct net_device *netdev, +                              struct ethtool_cmd *ecmd) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_hw *hw = &adapter->hw; +	u32 advertised, old; +	s32 err = 0; + +	if ((hw->phy.media_type == ixgbe_media_type_copper) || +	    (hw->phy.multispeed_fiber)) { +		/* 10000/copper and 1000/copper must autoneg +		 * this function does not support any duplex forcing, but can +		 * limit the advertising of the adapter to only 10000 or 1000 */ +		if (ecmd->autoneg == AUTONEG_DISABLE) +			return -EINVAL; + +		old = hw->phy.autoneg_advertised; +		advertised = 0; +		if (ecmd->advertising & ADVERTISED_10000baseT_Full) +			advertised |= IXGBE_LINK_SPEED_10GB_FULL; + +		if (ecmd->advertising & ADVERTISED_1000baseT_Full) +			advertised |= IXGBE_LINK_SPEED_1GB_FULL; + +		if (ecmd->advertising & ADVERTISED_100baseT_Full) +			advertised |= IXGBE_LINK_SPEED_100_FULL; + +		if (old == advertised) +			return err; +		/* this sets the link speed and restarts auto-neg */ +		hw->mac.autotry_restart = true; +		err = hw->mac.ops.setup_link(hw, advertised, true, true); +		if (err) { +			e_info(probe, "setup link failed with code %d\n", err); +			hw->mac.ops.setup_link(hw, old, true, true); +		} +	} else { +		/* in this case we currently only support 10Gb/FULL */ +		u32 speed = ethtool_cmd_speed(ecmd); +		if ((ecmd->autoneg == AUTONEG_ENABLE) || +		    (ecmd->advertising != ADVERTISED_10000baseT_Full) || +		    (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) +			return -EINVAL; +	} + +	return err; +} + +static void ixgbe_get_pauseparam(struct net_device *netdev, +                                 struct ethtool_pauseparam *pause) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_hw *hw = &adapter->hw; + +	/* +	 * Flow Control Autoneg isn't on if +	 *  - we didn't ask for it OR +	 *  - it failed, we know this by tx & rx being off +	 */ +	if (hw->fc.disable_fc_autoneg || +	    (hw->fc.current_mode == ixgbe_fc_none)) +		pause->autoneg = 0; +	else +		pause->autoneg = 1; + +	if (hw->fc.current_mode == ixgbe_fc_rx_pause) { +		pause->rx_pause = 1; +	} else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { +		pause->tx_pause = 1; +	} else if (hw->fc.current_mode == ixgbe_fc_full) { +		pause->rx_pause = 1; +		pause->tx_pause = 1; +#ifdef CONFIG_DCB +	} else if (hw->fc.current_mode == ixgbe_fc_pfc) { +		pause->rx_pause = 0; +		pause->tx_pause = 0; +#endif +	} +} + +static int ixgbe_set_pauseparam(struct net_device *netdev, +                                struct ethtool_pauseparam *pause) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_hw *hw = &adapter->hw; +	struct ixgbe_fc_info fc; + +#ifdef CONFIG_DCB +	if (adapter->dcb_cfg.pfc_mode_enable || +		((hw->mac.type == ixgbe_mac_82598EB) && +		(adapter->flags & IXGBE_FLAG_DCB_ENABLED))) +		return -EINVAL; + +#endif +	fc = hw->fc; + +	if (pause->autoneg != AUTONEG_ENABLE) +		fc.disable_fc_autoneg = true; +	else +		fc.disable_fc_autoneg = false; + +	if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) +		fc.requested_mode = ixgbe_fc_full; +	else if (pause->rx_pause && !pause->tx_pause) +		fc.requested_mode = ixgbe_fc_rx_pause; +	else if (!pause->rx_pause && pause->tx_pause) +		fc.requested_mode = ixgbe_fc_tx_pause; +	else if (!pause->rx_pause && !pause->tx_pause) +		fc.requested_mode = ixgbe_fc_none; +	else +		return -EINVAL; + +#ifdef CONFIG_DCB +	adapter->last_lfc_mode = fc.requested_mode; +#endif + +	/* if the thing changed then we'll update and use new autoneg */ +	if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) { +		hw->fc = fc; +		if (netif_running(netdev)) +			ixgbe_reinit_locked(adapter); +		else +			ixgbe_reset(adapter); +	} + +	return 0; +} + +static u32 ixgbe_get_msglevel(struct net_device *netdev) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	return adapter->msg_enable; +} + +static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	adapter->msg_enable = data; +} + +static int ixgbe_get_regs_len(struct net_device *netdev) +{ +#define IXGBE_REGS_LEN  1128 +	return IXGBE_REGS_LEN * sizeof(u32); +} + +#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ + +static void ixgbe_get_regs(struct net_device *netdev, +                           struct ethtool_regs *regs, void *p) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_hw *hw = &adapter->hw; +	u32 *regs_buff = p; +	u8 i; + +	memset(p, 0, IXGBE_REGS_LEN * sizeof(u32)); + +	regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; + +	/* General Registers */ +	regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL); +	regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS); +	regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); +	regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP); +	regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP); +	regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL); +	regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER); +	regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); + +	/* NVM Register */ +	regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC); +	regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD); +	regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA); +	regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL); +	regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA); +	regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL); +	regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA); +	regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT); +	regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP); +	regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC); + +	/* Interrupt */ +	/* don't read EICR because it can clear interrupt causes, instead +	 * read EICS which is a shadow but doesn't clear EICR */ +	regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS); +	regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); +	regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); +	regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); +	regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC); +	regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM); +	regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0)); +	regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); +	regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); +	regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); +	regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0)); +	regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); + +	/* Flow Control */ +	regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); +	regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0)); +	regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); +	regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); +	regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); +	for (i = 0; i < 8; i++) { +		switch (hw->mac.type) { +		case ixgbe_mac_82598EB: +			regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); +			regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); +			break; +		case ixgbe_mac_82599EB: +			regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); +			regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); +			break; +		default: +			break; +		} +	} +	regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); +	regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); + +	/* Receive DMA */ +	for (i = 0; i < 64; i++) +		regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); +	for (i = 0; i < 64; i++) +		regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); +	for (i = 0; i < 64; i++) +		regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); +	for (i = 0; i < 64; i++) +		regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); +	for (i = 0; i < 64; i++) +		regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); +	for (i = 0; i < 64; i++) +		regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); +	for (i = 0; i < 16; i++) +		regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); +	for (i = 0; i < 16; i++) +		regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); +	regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); +	for (i = 0; i < 8; i++) +		regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); +	regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL); +	regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN); + +	/* Receive */ +	regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM); +	regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL); +	for (i = 0; i < 16; i++) +		regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); +	for (i = 0; i < 16; i++) +		regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); +	regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)); +	regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); +	regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); +	regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); +	regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC); +	regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); +	for (i = 0; i < 8; i++) +		regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i)); +	for (i = 0; i < 8; i++) +		regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i)); +	regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP); + +	/* Transmit */ +	for (i = 0; i < 32; i++) +		regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); +	for (i = 0; i < 32; i++) +		regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); +	for (i = 0; i < 32; i++) +		regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); +	for (i = 0; i < 32; i++) +		regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); +	for (i = 0; i < 32; i++) +		regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); +	for (i = 0; i < 32; i++) +		regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); +	for (i = 0; i < 32; i++) +		regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i)); +	for (i = 0; i < 32; i++) +		regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i)); +	regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL); +	for (i = 0; i < 16; i++) +		regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); +	regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG); +	for (i = 0; i < 8; i++) +		regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i)); +	regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP); + +	/* Wake Up */ +	regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC); +	regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC); +	regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS); +	regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV); +	regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT); +	regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT); +	regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL); +	regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); +	regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); + +	/* DCB */ +	regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); +	regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); +	regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); +	regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR); +	for (i = 0; i < 8; i++) +		regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i)); +	for (i = 0; i < 8; i++) +		regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i)); +	for (i = 0; i < 8; i++) +		regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i)); +	for (i = 0; i < 8; i++) +		regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i)); +	for (i = 0; i < 8; i++) +		regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); +	for (i = 0; i < 8; i++) +		regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); + +	/* Statistics */ +	regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs); +	regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc); +	regs_buff[883] = IXGBE_GET_STAT(adapter, errbc); +	regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc); +	for (i = 0; i < 8; i++) +		regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]); +	regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc); +	regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc); +	regs_buff[895] = IXGBE_GET_STAT(adapter, rlec); +	regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc); +	regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc); +	regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc); +	regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc); +	for (i = 0; i < 8; i++) +		regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]); +	for (i = 0; i < 8; i++) +		regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]); +	for (i = 0; i < 8; i++) +		regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]); +	for (i = 0; i < 8; i++) +		regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]); +	regs_buff[932] = IXGBE_GET_STAT(adapter, prc64); +	regs_buff[933] = IXGBE_GET_STAT(adapter, prc127); +	regs_buff[934] = IXGBE_GET_STAT(adapter, prc255); +	regs_buff[935] = IXGBE_GET_STAT(adapter, prc511); +	regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023); +	regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522); +	regs_buff[938] = IXGBE_GET_STAT(adapter, gprc); +	regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); +	regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); +	regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); +	regs_buff[942] = IXGBE_GET_STAT(adapter, gorc); +	regs_buff[944] = IXGBE_GET_STAT(adapter, gotc); +	for (i = 0; i < 8; i++) +		regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); +	regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); +	regs_buff[955] = IXGBE_GET_STAT(adapter, rfc); +	regs_buff[956] = IXGBE_GET_STAT(adapter, roc); +	regs_buff[957] = IXGBE_GET_STAT(adapter, rjc); +	regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); +	regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); +	regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); +	regs_buff[961] = IXGBE_GET_STAT(adapter, tor); +	regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); +	regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); +	regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); +	regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127); +	regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255); +	regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511); +	regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023); +	regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522); +	regs_buff[971] = IXGBE_GET_STAT(adapter, mptc); +	regs_buff[972] = IXGBE_GET_STAT(adapter, bptc); +	regs_buff[973] = IXGBE_GET_STAT(adapter, xec); +	for (i = 0; i < 16; i++) +		regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]); +	for (i = 0; i < 16; i++) +		regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]); +	for (i = 0; i < 16; i++) +		regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]); +	for (i = 0; i < 16; i++) +		regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]); + +	/* MAC */ +	regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG); +	regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); +	regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); +	regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0); +	regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1); +	regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); +	regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); +	regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP); +	regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP); +	regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0); +	regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1); +	regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP); +	regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA); +	regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE); +	regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD); +	regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS); +	regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA); +	regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD); +	regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD); +	regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD); +	regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG); +	regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1); +	regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2); +	regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS); +	regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC); +	regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS); +	regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC); +	regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS); +	regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2); +	regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3); +	regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1); +	regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2); +	regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); + +	/* Diagnostic */ +	regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); +	for (i = 0; i < 8; i++) +		regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); +	regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); +	for (i = 0; i < 4; i++) +		regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i)); +	regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); +	regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); +	for (i = 0; i < 8; i++) +		regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); +	regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); +	for (i = 0; i < 4; i++) +		regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); +	regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); +	regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); +	regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); +	regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1); +	regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2); +	regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3); +	regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); +	regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0); +	regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1); +	regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2); +	regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3); +	for (i = 0; i < 8; i++) +		regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); +	regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); +	regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); +	regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); +	regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1); +	regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2); +	regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS); +	regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL); +	regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC); +	regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC); +} + +static int ixgbe_get_eeprom_len(struct net_device *netdev) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	return adapter->hw.eeprom.word_size * 2; +} + +static int ixgbe_get_eeprom(struct net_device *netdev, +                            struct ethtool_eeprom *eeprom, u8 *bytes) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_hw *hw = &adapter->hw; +	u16 *eeprom_buff; +	int first_word, last_word, eeprom_len; +	int ret_val = 0; +	u16 i; + +	if (eeprom->len == 0) +		return -EINVAL; + +	eeprom->magic = hw->vendor_id | (hw->device_id << 16); + +	first_word = eeprom->offset >> 1; +	last_word = (eeprom->offset + eeprom->len - 1) >> 1; +	eeprom_len = last_word - first_word + 1; + +	eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); +	if (!eeprom_buff) +		return -ENOMEM; + +	ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len, +					     eeprom_buff); + +	/* Device's eeprom is always little-endian, word addressable */ +	for (i = 0; i < eeprom_len; i++) +		le16_to_cpus(&eeprom_buff[i]); + +	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); +	kfree(eeprom_buff); + +	return ret_val; +} + +static void ixgbe_get_drvinfo(struct net_device *netdev, +                              struct ethtool_drvinfo *drvinfo) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	char firmware_version[32]; + +	strncpy(drvinfo->driver, ixgbe_driver_name, +	        sizeof(drvinfo->driver) - 1); +	strncpy(drvinfo->version, ixgbe_driver_version, +	        sizeof(drvinfo->version) - 1); + +	snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d", +	         (adapter->eeprom_version & 0xF000) >> 12, +	         (adapter->eeprom_version & 0x0FF0) >> 4, +	         adapter->eeprom_version & 0x000F); + +	strncpy(drvinfo->fw_version, firmware_version, +	        sizeof(drvinfo->fw_version)); +	strncpy(drvinfo->bus_info, pci_name(adapter->pdev), +	        sizeof(drvinfo->bus_info)); +	drvinfo->n_stats = IXGBE_STATS_LEN; +	drvinfo->testinfo_len = IXGBE_TEST_LEN; +	drvinfo->regdump_len = ixgbe_get_regs_len(netdev); +} + +static void ixgbe_get_ringparam(struct net_device *netdev, +                                struct ethtool_ringparam *ring) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; +	struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; + +	ring->rx_max_pending = IXGBE_MAX_RXD; +	ring->tx_max_pending = IXGBE_MAX_TXD; +	ring->rx_mini_max_pending = 0; +	ring->rx_jumbo_max_pending = 0; +	ring->rx_pending = rx_ring->count; +	ring->tx_pending = tx_ring->count; +	ring->rx_mini_pending = 0; +	ring->rx_jumbo_pending = 0; +} + +static int ixgbe_set_ringparam(struct net_device *netdev, +                               struct ethtool_ringparam *ring) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; +	int i, err = 0; +	u32 new_rx_count, new_tx_count; +	bool need_update = false; + +	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) +		return -EINVAL; + +	new_rx_count = max(ring->rx_pending, (u32)IXGBE_MIN_RXD); +	new_rx_count = min(new_rx_count, (u32)IXGBE_MAX_RXD); +	new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); + +	new_tx_count = max(ring->tx_pending, (u32)IXGBE_MIN_TXD); +	new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD); +	new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); + +	if ((new_tx_count == adapter->tx_ring[0]->count) && +	    (new_rx_count == adapter->rx_ring[0]->count)) { +		/* nothing to do */ +		return 0; +	} + +	while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) +		usleep_range(1000, 2000); + +	if (!netif_running(adapter->netdev)) { +		for (i = 0; i < adapter->num_tx_queues; i++) +			adapter->tx_ring[i]->count = new_tx_count; +		for (i = 0; i < adapter->num_rx_queues; i++) +			adapter->rx_ring[i]->count = new_rx_count; +		adapter->tx_ring_count = new_tx_count; +		adapter->rx_ring_count = new_rx_count; +		goto clear_reset; +	} + +	temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring)); +	if (!temp_tx_ring) { +		err = -ENOMEM; +		goto clear_reset; +	} + +	if (new_tx_count != adapter->tx_ring_count) { +		for (i = 0; i < adapter->num_tx_queues; i++) { +			memcpy(&temp_tx_ring[i], adapter->tx_ring[i], +			       sizeof(struct ixgbe_ring)); +			temp_tx_ring[i].count = new_tx_count; +			err = ixgbe_setup_tx_resources(&temp_tx_ring[i]); +			if (err) { +				while (i) { +					i--; +					ixgbe_free_tx_resources(&temp_tx_ring[i]); +				} +				goto clear_reset; +			} +		} +		need_update = true; +	} + +	temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring)); +	if (!temp_rx_ring) { +		err = -ENOMEM; +		goto err_setup; +	} + +	if (new_rx_count != adapter->rx_ring_count) { +		for (i = 0; i < adapter->num_rx_queues; i++) { +			memcpy(&temp_rx_ring[i], adapter->rx_ring[i], +			       sizeof(struct ixgbe_ring)); +			temp_rx_ring[i].count = new_rx_count; +			err = ixgbe_setup_rx_resources(&temp_rx_ring[i]); +			if (err) { +				while (i) { +					i--; +					ixgbe_free_rx_resources(&temp_rx_ring[i]); +				} +				goto err_setup; +			} +		} +		need_update = true; +	} + +	/* if rings need to be updated, here's the place to do it in one shot */ +	if (need_update) { +		ixgbe_down(adapter); + +		/* tx */ +		if (new_tx_count != adapter->tx_ring_count) { +			for (i = 0; i < adapter->num_tx_queues; i++) { +				ixgbe_free_tx_resources(adapter->tx_ring[i]); +				memcpy(adapter->tx_ring[i], &temp_tx_ring[i], +				       sizeof(struct ixgbe_ring)); +			} +			adapter->tx_ring_count = new_tx_count; +		} + +		/* rx */ +		if (new_rx_count != adapter->rx_ring_count) { +			for (i = 0; i < adapter->num_rx_queues; i++) { +				ixgbe_free_rx_resources(adapter->rx_ring[i]); +				memcpy(adapter->rx_ring[i], &temp_rx_ring[i], +				       sizeof(struct ixgbe_ring)); +			} +			adapter->rx_ring_count = new_rx_count; +		} +		ixgbe_up(adapter); +	} + +	vfree(temp_rx_ring); +err_setup: +	vfree(temp_tx_ring); +clear_reset: +	clear_bit(__IXGBE_RESETTING, &adapter->state); +	return err; +} + +static int ixgbe_get_sset_count(struct net_device *netdev, int sset) +{ +	switch (sset) { +	case ETH_SS_TEST: +		return IXGBE_TEST_LEN; +	case ETH_SS_STATS: +		return IXGBE_STATS_LEN; +	default: +		return -EOPNOTSUPP; +	} +} + +static void ixgbe_get_ethtool_stats(struct net_device *netdev, +                                    struct ethtool_stats *stats, u64 *data) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct rtnl_link_stats64 temp; +	const struct rtnl_link_stats64 *net_stats; +	unsigned int start; +	struct ixgbe_ring *ring; +	int i, j; +	char *p = NULL; + +	ixgbe_update_stats(adapter); +	net_stats = dev_get_stats(netdev, &temp); +	for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { +		switch (ixgbe_gstrings_stats[i].type) { +		case NETDEV_STATS: +			p = (char *) net_stats + +					ixgbe_gstrings_stats[i].stat_offset; +			break; +		case IXGBE_STATS: +			p = (char *) adapter + +					ixgbe_gstrings_stats[i].stat_offset; +			break; +		} + +		data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == +		           sizeof(u64)) ? *(u64 *)p : *(u32 *)p; +	} +	for (j = 0; j < adapter->num_tx_queues; j++) { +		ring = adapter->tx_ring[j]; +		do { +			start = u64_stats_fetch_begin_bh(&ring->syncp); +			data[i]   = ring->stats.packets; +			data[i+1] = ring->stats.bytes; +		} while (u64_stats_fetch_retry_bh(&ring->syncp, start)); +		i += 2; +	} +	for (j = 0; j < adapter->num_rx_queues; j++) { +		ring = adapter->rx_ring[j]; +		do { +			start = u64_stats_fetch_begin_bh(&ring->syncp); +			data[i]   = ring->stats.packets; +			data[i+1] = ring->stats.bytes; +		} while (u64_stats_fetch_retry_bh(&ring->syncp, start)); +		i += 2; +	} +	if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { +		for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) { +			data[i++] = adapter->stats.pxontxc[j]; +			data[i++] = adapter->stats.pxofftxc[j]; +		} +		for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) { +			data[i++] = adapter->stats.pxonrxc[j]; +			data[i++] = adapter->stats.pxoffrxc[j]; +		} +	} +} + +static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, +                              u8 *data) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	char *p = (char *)data; +	int i; + +	switch (stringset) { +	case ETH_SS_TEST: +		memcpy(data, *ixgbe_gstrings_test, +		       IXGBE_TEST_LEN * ETH_GSTRING_LEN); +		break; +	case ETH_SS_STATS: +		for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { +			memcpy(p, ixgbe_gstrings_stats[i].stat_string, +			       ETH_GSTRING_LEN); +			p += ETH_GSTRING_LEN; +		} +		for (i = 0; i < adapter->num_tx_queues; i++) { +			sprintf(p, "tx_queue_%u_packets", i); +			p += ETH_GSTRING_LEN; +			sprintf(p, "tx_queue_%u_bytes", i); +			p += ETH_GSTRING_LEN; +		} +		for (i = 0; i < adapter->num_rx_queues; i++) { +			sprintf(p, "rx_queue_%u_packets", i); +			p += ETH_GSTRING_LEN; +			sprintf(p, "rx_queue_%u_bytes", i); +			p += ETH_GSTRING_LEN; +		} +		if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { +			for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { +				sprintf(p, "tx_pb_%u_pxon", i); +				p += ETH_GSTRING_LEN; +				sprintf(p, "tx_pb_%u_pxoff", i); +				p += ETH_GSTRING_LEN; +			} +			for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) { +				sprintf(p, "rx_pb_%u_pxon", i); +				p += ETH_GSTRING_LEN; +				sprintf(p, "rx_pb_%u_pxoff", i); +				p += ETH_GSTRING_LEN; +			} +		} +		/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ +		break; +	} +} + +static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	bool link_up; +	u32 link_speed = 0; +	*data = 0; + +	hw->mac.ops.check_link(hw, &link_speed, &link_up, true); +	if (link_up) +		return *data; +	else +		*data = 1; +	return *data; +} + +/* ethtool register test data */ +struct ixgbe_reg_test { +	u16 reg; +	u8  array_len; +	u8  test_type; +	u32 mask; +	u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables.  We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST	1 +#define SET_READ_TEST	2 +#define WRITE_NO_TEST	3 +#define TABLE32_TEST	4 +#define TABLE64_TEST_LO	5 +#define TABLE64_TEST_HI	6 + +/* default 82599 register test */ +static const struct ixgbe_reg_test reg_test_82599[] = { +	{ IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, +	{ IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, +	{ IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +	{ IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, +	{ IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, +	{ IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +	{ IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, +	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, +	{ IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, +	{ IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, +	{ IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +	{ IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +	{ IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +	{ IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, +	{ IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, +	{ IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, +	{ IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, +	{ IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +	{ 0, 0, 0, 0 } +}; + +/* default 82598 register test */ +static const struct ixgbe_reg_test reg_test_82598[] = { +	{ IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, +	{ IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, +	{ IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +	{ IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, +	{ IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +	{ IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +	{ IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, +	/* Enable all four RX queues before testing. */ +	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, +	/* RDH is read-only for 82598, only test RDT. */ +	{ IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, +	{ IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, +	{ IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +	{ IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF }, +	{ IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +	{ IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +	{ IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, +	{ IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 }, +	{ IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 }, +	{ IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, +	{ IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF }, +	{ IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +	{ 0, 0, 0, 0 } +}; + +static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, +			     u32 mask, u32 write) +{ +	u32 pat, val, before; +	static const u32 test_pattern[] = { +		0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + +	for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { +		before = readl(adapter->hw.hw_addr + reg); +		writel((test_pattern[pat] & write), +		       (adapter->hw.hw_addr + reg)); +		val = readl(adapter->hw.hw_addr + reg); +		if (val != (test_pattern[pat] & write & mask)) { +			e_err(drv, "pattern test reg %04X failed: got " +			      "0x%08X expected 0x%08X\n", +			      reg, val, (test_pattern[pat] & write & mask)); +			*data = reg; +			writel(before, adapter->hw.hw_addr + reg); +			return 1; +		} +		writel(before, adapter->hw.hw_addr + reg); +	} +	return 0; +} + +static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg, +			      u32 mask, u32 write) +{ +	u32 val, before; +	before = readl(adapter->hw.hw_addr + reg); +	writel((write & mask), (adapter->hw.hw_addr + reg)); +	val = readl(adapter->hw.hw_addr + reg); +	if ((write & mask) != (val & mask)) { +		e_err(drv, "set/check reg %04X test failed: got 0x%08X " +		      "expected 0x%08X\n", reg, (val & mask), (write & mask)); +		*data = reg; +		writel(before, (adapter->hw.hw_addr + reg)); +		return 1; +	} +	writel(before, (adapter->hw.hw_addr + reg)); +	return 0; +} + +#define REG_PATTERN_TEST(reg, mask, write)				      \ +	do {								      \ +		if (reg_pattern_test(adapter, data, reg, mask, write))	      \ +			return 1;					      \ +	} while (0)							      \ + + +#define REG_SET_AND_CHECK(reg, mask, write)				      \ +	do {								      \ +		if (reg_set_and_check(adapter, data, reg, mask, write))	      \ +			return 1;					      \ +	} while (0)							      \ + +static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) +{ +	const struct ixgbe_reg_test *test; +	u32 value, before, after; +	u32 i, toggle; + +	switch (adapter->hw.mac.type) { +	case ixgbe_mac_82598EB: +		toggle = 0x7FFFF3FF; +		test = reg_test_82598; +		break; +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		toggle = 0x7FFFF30F; +		test = reg_test_82599; +		break; +	default: +		*data = 1; +		return 1; +		break; +	} + +	/* +	 * Because the status register is such a special case, +	 * we handle it separately from the rest of the register +	 * tests.  Some bits are read-only, some toggle, and some +	 * are writeable on newer MACs. +	 */ +	before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS); +	value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle); +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle); +	after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle; +	if (value != after) { +		e_err(drv, "failed STATUS register test got: 0x%08X " +		      "expected: 0x%08X\n", after, value); +		*data = 1; +		return 1; +	} +	/* restore previous status */ +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before); + +	/* +	 * Perform the remainder of the register test, looping through +	 * the test table until we either fail or reach the null entry. +	 */ +	while (test->reg) { +		for (i = 0; i < test->array_len; i++) { +			switch (test->test_type) { +			case PATTERN_TEST: +				REG_PATTERN_TEST(test->reg + (i * 0x40), +						 test->mask, +						 test->write); +				break; +			case SET_READ_TEST: +				REG_SET_AND_CHECK(test->reg + (i * 0x40), +						  test->mask, +						  test->write); +				break; +			case WRITE_NO_TEST: +				writel(test->write, +				       (adapter->hw.hw_addr + test->reg) +				       + (i * 0x40)); +				break; +			case TABLE32_TEST: +				REG_PATTERN_TEST(test->reg + (i * 4), +						 test->mask, +						 test->write); +				break; +			case TABLE64_TEST_LO: +				REG_PATTERN_TEST(test->reg + (i * 8), +						 test->mask, +						 test->write); +				break; +			case TABLE64_TEST_HI: +				REG_PATTERN_TEST((test->reg + 4) + (i * 8), +						 test->mask, +						 test->write); +				break; +			} +		} +		test++; +	} + +	*data = 0; +	return 0; +} + +static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	if (hw->eeprom.ops.validate_checksum(hw, NULL)) +		*data = 1; +	else +		*data = 0; +	return *data; +} + +static irqreturn_t ixgbe_test_intr(int irq, void *data) +{ +	struct net_device *netdev = (struct net_device *) data; +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR); + +	return IRQ_HANDLED; +} + +static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) +{ +	struct net_device *netdev = adapter->netdev; +	u32 mask, i = 0, shared_int = true; +	u32 irq = adapter->pdev->irq; + +	*data = 0; + +	/* Hook up test interrupt handler just for this test */ +	if (adapter->msix_entries) { +		/* NOTE: we don't test MSI-X interrupts here, yet */ +		return 0; +	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { +		shared_int = false; +		if (request_irq(irq, ixgbe_test_intr, 0, netdev->name, +				netdev)) { +			*data = 1; +			return -1; +		} +	} else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED, +	                        netdev->name, netdev)) { +		shared_int = false; +	} else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED, +	                       netdev->name, netdev)) { +		*data = 1; +		return -1; +	} +	e_info(hw, "testing %s interrupt\n", shared_int ? +	       "shared" : "unshared"); + +	/* Disable all the interrupts */ +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); +	IXGBE_WRITE_FLUSH(&adapter->hw); +	usleep_range(10000, 20000); + +	/* Test each interrupt */ +	for (; i < 10; i++) { +		/* Interrupt to test */ +		mask = 1 << i; + +		if (!shared_int) { +			/* +			 * Disable the interrupts to be reported in +			 * the cause register and then force the same +			 * interrupt and see if one gets posted.  If +			 * an interrupt was posted to the bus, the +			 * test failed. +			 */ +			adapter->test_icr = 0; +			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, +			                ~mask & 0x00007FFF); +			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, +			                ~mask & 0x00007FFF); +			IXGBE_WRITE_FLUSH(&adapter->hw); +			usleep_range(10000, 20000); + +			if (adapter->test_icr & mask) { +				*data = 3; +				break; +			} +		} + +		/* +		 * Enable the interrupt to be reported in the cause +		 * register and then force the same interrupt and see +		 * if one gets posted.  If an interrupt was not posted +		 * to the bus, the test failed. +		 */ +		adapter->test_icr = 0; +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); +		IXGBE_WRITE_FLUSH(&adapter->hw); +		usleep_range(10000, 20000); + +		if (!(adapter->test_icr &mask)) { +			*data = 4; +			break; +		} + +		if (!shared_int) { +			/* +			 * Disable the other interrupts to be reported in +			 * the cause register and then force the other +			 * interrupts and see if any get posted.  If +			 * an interrupt was posted to the bus, the +			 * test failed. +			 */ +			adapter->test_icr = 0; +			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, +			                ~mask & 0x00007FFF); +			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, +			                ~mask & 0x00007FFF); +			IXGBE_WRITE_FLUSH(&adapter->hw); +			usleep_range(10000, 20000); + +			if (adapter->test_icr) { +				*data = 5; +				break; +			} +		} +	} + +	/* Disable all the interrupts */ +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); +	IXGBE_WRITE_FLUSH(&adapter->hw); +	usleep_range(10000, 20000); + +	/* Unhook test interrupt handler */ +	free_irq(irq, netdev); + +	return *data; +} + +static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; +	struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; +	struct ixgbe_hw *hw = &adapter->hw; +	u32 reg_ctl; + +	/* shut down the DMA engines now so they can be reinitialized later */ + +	/* first Rx */ +	reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); +	reg_ctl &= ~IXGBE_RXCTRL_RXEN; +	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); +	ixgbe_disable_rx_queue(adapter, rx_ring); + +	/* now Tx */ +	reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); +	reg_ctl &= ~IXGBE_TXDCTL_ENABLE; +	IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); + +	switch (hw->mac.type) { +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); +		reg_ctl &= ~IXGBE_DMATXCTL_TE; +		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); +		break; +	default: +		break; +	} + +	ixgbe_reset(adapter); + +	ixgbe_free_tx_resources(&adapter->test_tx_ring); +	ixgbe_free_rx_resources(&adapter->test_rx_ring); +} + +static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; +	struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; +	u32 rctl, reg_data; +	int ret_val; +	int err; + +	/* Setup Tx descriptor ring and Tx buffers */ +	tx_ring->count = IXGBE_DEFAULT_TXD; +	tx_ring->queue_index = 0; +	tx_ring->dev = &adapter->pdev->dev; +	tx_ring->netdev = adapter->netdev; +	tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; +	tx_ring->numa_node = adapter->node; + +	err = ixgbe_setup_tx_resources(tx_ring); +	if (err) +		return 1; + +	switch (adapter->hw.mac.type) { +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); +		reg_data |= IXGBE_DMATXCTL_TE; +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); +		break; +	default: +		break; +	} + +	ixgbe_configure_tx_ring(adapter, tx_ring); + +	/* Setup Rx Descriptor ring and Rx buffers */ +	rx_ring->count = IXGBE_DEFAULT_RXD; +	rx_ring->queue_index = 0; +	rx_ring->dev = &adapter->pdev->dev; +	rx_ring->netdev = adapter->netdev; +	rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; +	rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048; +	rx_ring->numa_node = adapter->node; + +	err = ixgbe_setup_rx_resources(rx_ring); +	if (err) { +		ret_val = 4; +		goto err_nomem; +	} + +	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN); + +	ixgbe_configure_rx_ring(adapter, rx_ring); + +	rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS; +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); + +	return 0; + +err_nomem: +	ixgbe_free_desc_rings(adapter); +	return ret_val; +} + +static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u32 reg_data; + +	/* X540 needs to set the MACC.FLU bit to force link up */ +	if (adapter->hw.mac.type == ixgbe_mac_X540) { +		reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MACC); +		reg_data |= IXGBE_MACC_FLU; +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_MACC, reg_data); +	} + +	/* right now we only support MAC loopback in the driver */ +	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); +	/* Setup MAC loopback */ +	reg_data |= IXGBE_HLREG0_LPBK; +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); + +	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); +	reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data); + +	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC); +	reg_data &= ~IXGBE_AUTOC_LMS_MASK; +	reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU; +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data); +	IXGBE_WRITE_FLUSH(&adapter->hw); +	usleep_range(10000, 20000); + +	/* Disable Atlas Tx lanes; re-enabled in reset path */ +	if (hw->mac.type == ixgbe_mac_82598EB) { +		u8 atlas; + +		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas); +		atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; +		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas); + +		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas); +		atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; +		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas); + +		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas); +		atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; +		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas); + +		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas); +		atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; +		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas); +	} + +	return 0; +} + +static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter) +{ +	u32 reg_data; + +	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); +	reg_data &= ~IXGBE_HLREG0_LPBK; +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); +} + +static void ixgbe_create_lbtest_frame(struct sk_buff *skb, +                                      unsigned int frame_size) +{ +	memset(skb->data, 0xFF, frame_size); +	frame_size &= ~1; +	memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); +	memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); +	memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); +} + +static int ixgbe_check_lbtest_frame(struct sk_buff *skb, +                                    unsigned int frame_size) +{ +	frame_size &= ~1; +	if (*(skb->data + 3) == 0xFF) { +		if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && +		    (*(skb->data + frame_size / 2 + 12) == 0xAF)) { +			return 0; +		} +	} +	return 13; +} + +static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, +                                  struct ixgbe_ring *tx_ring, +                                  unsigned int size) +{ +	union ixgbe_adv_rx_desc *rx_desc; +	struct ixgbe_rx_buffer *rx_buffer_info; +	struct ixgbe_tx_buffer *tx_buffer_info; +	const int bufsz = rx_ring->rx_buf_len; +	u32 staterr; +	u16 rx_ntc, tx_ntc, count = 0; + +	/* initialize next to clean and descriptor values */ +	rx_ntc = rx_ring->next_to_clean; +	tx_ntc = tx_ring->next_to_clean; +	rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc); +	staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + +	while (staterr & IXGBE_RXD_STAT_DD) { +		/* check Rx buffer */ +		rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; + +		/* unmap Rx buffer, will be remapped by alloc_rx_buffers */ +		dma_unmap_single(rx_ring->dev, +		                 rx_buffer_info->dma, +				 bufsz, +				 DMA_FROM_DEVICE); +		rx_buffer_info->dma = 0; + +		/* verify contents of skb */ +		if (!ixgbe_check_lbtest_frame(rx_buffer_info->skb, size)) +			count++; + +		/* unmap buffer on Tx side */ +		tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; +		ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); + +		/* increment Rx/Tx next to clean counters */ +		rx_ntc++; +		if (rx_ntc == rx_ring->count) +			rx_ntc = 0; +		tx_ntc++; +		if (tx_ntc == tx_ring->count) +			tx_ntc = 0; + +		/* fetch next descriptor */ +		rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc); +		staterr = le32_to_cpu(rx_desc->wb.upper.status_error); +	} + +	/* re-map buffers to ring, store next to clean values */ +	ixgbe_alloc_rx_buffers(rx_ring, count); +	rx_ring->next_to_clean = rx_ntc; +	tx_ring->next_to_clean = tx_ntc; + +	return count; +} + +static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; +	struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; +	int i, j, lc, good_cnt, ret_val = 0; +	unsigned int size = 1024; +	netdev_tx_t tx_ret_val; +	struct sk_buff *skb; + +	/* allocate test skb */ +	skb = alloc_skb(size, GFP_KERNEL); +	if (!skb) +		return 11; + +	/* place data into test skb */ +	ixgbe_create_lbtest_frame(skb, size); +	skb_put(skb, size); + +	/* +	 * Calculate the loop count based on the largest descriptor ring +	 * The idea is to wrap the largest ring a number of times using 64 +	 * send/receive pairs during each loop +	 */ + +	if (rx_ring->count <= tx_ring->count) +		lc = ((tx_ring->count / 64) * 2) + 1; +	else +		lc = ((rx_ring->count / 64) * 2) + 1; + +	for (j = 0; j <= lc; j++) { +		/* reset count of good packets */ +		good_cnt = 0; + +		/* place 64 packets on the transmit queue*/ +		for (i = 0; i < 64; i++) { +			skb_get(skb); +			tx_ret_val = ixgbe_xmit_frame_ring(skb, +							   adapter, +							   tx_ring); +			if (tx_ret_val == NETDEV_TX_OK) +				good_cnt++; +		} + +		if (good_cnt != 64) { +			ret_val = 12; +			break; +		} + +		/* allow 200 milliseconds for packets to go from Tx to Rx */ +		msleep(200); + +		good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size); +		if (good_cnt != 64) { +			ret_val = 13; +			break; +		} +	} + +	/* free the original skb */ +	kfree_skb(skb); + +	return ret_val; +} + +static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data) +{ +	*data = ixgbe_setup_desc_rings(adapter); +	if (*data) +		goto out; +	*data = ixgbe_setup_loopback_test(adapter); +	if (*data) +		goto err_loopback; +	*data = ixgbe_run_loopback_test(adapter); +	ixgbe_loopback_cleanup(adapter); + +err_loopback: +	ixgbe_free_desc_rings(adapter); +out: +	return *data; +} + +static void ixgbe_diag_test(struct net_device *netdev, +                            struct ethtool_test *eth_test, u64 *data) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	bool if_running = netif_running(netdev); + +	set_bit(__IXGBE_TESTING, &adapter->state); +	if (eth_test->flags == ETH_TEST_FL_OFFLINE) { +		/* Offline tests */ + +		e_info(hw, "offline testing starting\n"); + +		/* Link test performed before hardware reset so autoneg doesn't +		 * interfere with test result */ +		if (ixgbe_link_test(adapter, &data[4])) +			eth_test->flags |= ETH_TEST_FL_FAILED; + +		if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { +			int i; +			for (i = 0; i < adapter->num_vfs; i++) { +				if (adapter->vfinfo[i].clear_to_send) { +					netdev_warn(netdev, "%s", +						    "offline diagnostic is not " +						    "supported when VFs are " +						    "present\n"); +					data[0] = 1; +					data[1] = 1; +					data[2] = 1; +					data[3] = 1; +					eth_test->flags |= ETH_TEST_FL_FAILED; +					clear_bit(__IXGBE_TESTING, +						  &adapter->state); +					goto skip_ol_tests; +				} +			} +		} + +		if (if_running) +			/* indicate we're in test mode */ +			dev_close(netdev); +		else +			ixgbe_reset(adapter); + +		e_info(hw, "register testing starting\n"); +		if (ixgbe_reg_test(adapter, &data[0])) +			eth_test->flags |= ETH_TEST_FL_FAILED; + +		ixgbe_reset(adapter); +		e_info(hw, "eeprom testing starting\n"); +		if (ixgbe_eeprom_test(adapter, &data[1])) +			eth_test->flags |= ETH_TEST_FL_FAILED; + +		ixgbe_reset(adapter); +		e_info(hw, "interrupt testing starting\n"); +		if (ixgbe_intr_test(adapter, &data[2])) +			eth_test->flags |= ETH_TEST_FL_FAILED; + +		/* If SRIOV or VMDq is enabled then skip MAC +		 * loopback diagnostic. */ +		if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | +				      IXGBE_FLAG_VMDQ_ENABLED)) { +			e_info(hw, "Skip MAC loopback diagnostic in VT " +			       "mode\n"); +			data[3] = 0; +			goto skip_loopback; +		} + +		ixgbe_reset(adapter); +		e_info(hw, "loopback testing starting\n"); +		if (ixgbe_loopback_test(adapter, &data[3])) +			eth_test->flags |= ETH_TEST_FL_FAILED; + +skip_loopback: +		ixgbe_reset(adapter); + +		clear_bit(__IXGBE_TESTING, &adapter->state); +		if (if_running) +			dev_open(netdev); +	} else { +		e_info(hw, "online testing starting\n"); +		/* Online tests */ +		if (ixgbe_link_test(adapter, &data[4])) +			eth_test->flags |= ETH_TEST_FL_FAILED; + +		/* Online tests aren't run; pass by default */ +		data[0] = 0; +		data[1] = 0; +		data[2] = 0; +		data[3] = 0; + +		clear_bit(__IXGBE_TESTING, &adapter->state); +	} +skip_ol_tests: +	msleep_interruptible(4 * 1000); +} + +static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, +                               struct ethtool_wolinfo *wol) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	int retval = 1; + +	/* WOL not supported except for the following */ +	switch(hw->device_id) { +	case IXGBE_DEV_ID_82599_SFP: +		/* Only this subdevice supports WOL */ +		if (hw->subsystem_device_id != IXGBE_SUBDEV_ID_82599_SFP) { +			wol->supported = 0; +			break; +		} +		retval = 0; +		break; +	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: +		/* All except this subdevice support WOL */ +		if (hw->subsystem_device_id == +		    IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) { +			wol->supported = 0; +			break; +		} +		retval = 0; +		break; +	case IXGBE_DEV_ID_82599_KX4: +		retval = 0; +		break; +	default: +		wol->supported = 0; +	} + +	return retval; +} + +static void ixgbe_get_wol(struct net_device *netdev, +                          struct ethtool_wolinfo *wol) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	wol->supported = WAKE_UCAST | WAKE_MCAST | +	                 WAKE_BCAST | WAKE_MAGIC; +	wol->wolopts = 0; + +	if (ixgbe_wol_exclusion(adapter, wol) || +	    !device_can_wakeup(&adapter->pdev->dev)) +		return; + +	if (adapter->wol & IXGBE_WUFC_EX) +		wol->wolopts |= WAKE_UCAST; +	if (adapter->wol & IXGBE_WUFC_MC) +		wol->wolopts |= WAKE_MCAST; +	if (adapter->wol & IXGBE_WUFC_BC) +		wol->wolopts |= WAKE_BCAST; +	if (adapter->wol & IXGBE_WUFC_MAG) +		wol->wolopts |= WAKE_MAGIC; +} + +static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) +		return -EOPNOTSUPP; + +	if (ixgbe_wol_exclusion(adapter, wol)) +		return wol->wolopts ? -EOPNOTSUPP : 0; + +	adapter->wol = 0; + +	if (wol->wolopts & WAKE_UCAST) +		adapter->wol |= IXGBE_WUFC_EX; +	if (wol->wolopts & WAKE_MCAST) +		adapter->wol |= IXGBE_WUFC_MC; +	if (wol->wolopts & WAKE_BCAST) +		adapter->wol |= IXGBE_WUFC_BC; +	if (wol->wolopts & WAKE_MAGIC) +		adapter->wol |= IXGBE_WUFC_MAG; + +	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + +	return 0; +} + +static int ixgbe_nway_reset(struct net_device *netdev) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	if (netif_running(netdev)) +		ixgbe_reinit_locked(adapter); + +	return 0; +} + +static int ixgbe_set_phys_id(struct net_device *netdev, +			     enum ethtool_phys_id_state state) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_hw *hw = &adapter->hw; + +	switch (state) { +	case ETHTOOL_ID_ACTIVE: +		adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); +		return 2; + +	case ETHTOOL_ID_ON: +		hw->mac.ops.led_on(hw, IXGBE_LED_ON); +		break; + +	case ETHTOOL_ID_OFF: +		hw->mac.ops.led_off(hw, IXGBE_LED_ON); +		break; + +	case ETHTOOL_ID_INACTIVE: +		/* Restore LED settings */ +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); +		break; +	} + +	return 0; +} + +static int ixgbe_get_coalesce(struct net_device *netdev, +                              struct ethtool_coalesce *ec) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; + +	/* only valid if in constant ITR mode */ +	switch (adapter->rx_itr_setting) { +	case 0: +		/* throttling disabled */ +		ec->rx_coalesce_usecs = 0; +		break; +	case 1: +		/* dynamic ITR mode */ +		ec->rx_coalesce_usecs = 1; +		break; +	default: +		/* fixed interrupt rate mode */ +		ec->rx_coalesce_usecs = 1000000/adapter->rx_eitr_param; +		break; +	} + +	/* if in mixed tx/rx queues per vector mode, report only rx settings */ +	if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) +		return 0; + +	/* only valid if in constant ITR mode */ +	switch (adapter->tx_itr_setting) { +	case 0: +		/* throttling disabled */ +		ec->tx_coalesce_usecs = 0; +		break; +	case 1: +		/* dynamic ITR mode */ +		ec->tx_coalesce_usecs = 1; +		break; +	default: +		ec->tx_coalesce_usecs = 1000000/adapter->tx_eitr_param; +		break; +	} + +	return 0; +} + +/* + * this function must be called before setting the new value of + * rx_itr_setting + */ +static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter, +			     struct ethtool_coalesce *ec) +{ +	struct net_device *netdev = adapter->netdev; + +	if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) +		return false; + +	/* if interrupt rate is too high then disable RSC */ +	if (ec->rx_coalesce_usecs != 1 && +	    ec->rx_coalesce_usecs <= 1000000/IXGBE_MAX_RSC_INT_RATE) { +		if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { +			e_info(probe, "rx-usecs set too low, " +				      "disabling RSC\n"); +			adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; +			return true; +		} +	} else { +		/* check the feature flag value and enable RSC if necessary */ +		if ((netdev->features & NETIF_F_LRO) && +		    !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { +			e_info(probe, "rx-usecs set to %d, " +				      "re-enabling RSC\n", +			       ec->rx_coalesce_usecs); +			adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; +			return true; +		} +	} +	return false; +} + +static int ixgbe_set_coalesce(struct net_device *netdev, +                              struct ethtool_coalesce *ec) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_q_vector *q_vector; +	int i; +	bool need_reset = false; + +	/* don't accept tx specific changes if we've got mixed RxTx vectors */ +	if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count +	   && ec->tx_coalesce_usecs) +		return -EINVAL; + +	if (ec->tx_max_coalesced_frames_irq) +		adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; + +	if (ec->rx_coalesce_usecs > 1) { +		/* check the limits */ +		if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) || +		    (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE)) +			return -EINVAL; + +		/* check the old value and enable RSC if necessary */ +		need_reset = ixgbe_update_rsc(adapter, ec); + +		/* store the value in ints/second */ +		adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs; + +		/* static value of interrupt rate */ +		adapter->rx_itr_setting = adapter->rx_eitr_param; +		/* clear the lower bit as its used for dynamic state */ +		adapter->rx_itr_setting &= ~1; +	} else if (ec->rx_coalesce_usecs == 1) { +		/* check the old value and enable RSC if necessary */ +		need_reset = ixgbe_update_rsc(adapter, ec); + +		/* 1 means dynamic mode */ +		adapter->rx_eitr_param = 20000; +		adapter->rx_itr_setting = 1; +	} else { +		/* check the old value and enable RSC if necessary */ +		need_reset = ixgbe_update_rsc(adapter, ec); +		/* +		 * any other value means disable eitr, which is best +		 * served by setting the interrupt rate very high +		 */ +		adapter->rx_eitr_param = IXGBE_MAX_INT_RATE; +		adapter->rx_itr_setting = 0; +	} + +	if (ec->tx_coalesce_usecs > 1) { +		/* +		 * don't have to worry about max_int as above because +		 * tx vectors don't do hardware RSC (an rx function) +		 */ +		/* check the limits */ +		if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) || +		    (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE)) +			return -EINVAL; + +		/* store the value in ints/second */ +		adapter->tx_eitr_param = 1000000/ec->tx_coalesce_usecs; + +		/* static value of interrupt rate */ +		adapter->tx_itr_setting = adapter->tx_eitr_param; + +		/* clear the lower bit as its used for dynamic state */ +		adapter->tx_itr_setting &= ~1; +	} else if (ec->tx_coalesce_usecs == 1) { +		/* 1 means dynamic mode */ +		adapter->tx_eitr_param = 10000; +		adapter->tx_itr_setting = 1; +	} else { +		adapter->tx_eitr_param = IXGBE_MAX_INT_RATE; +		adapter->tx_itr_setting = 0; +	} + +	/* MSI/MSIx Interrupt Mode */ +	if (adapter->flags & +	    (IXGBE_FLAG_MSIX_ENABLED | IXGBE_FLAG_MSI_ENABLED)) { +		int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; +		for (i = 0; i < num_vectors; i++) { +			q_vector = adapter->q_vector[i]; +			if (q_vector->tx.count && !q_vector->rx.count) +				/* tx only */ +				q_vector->eitr = adapter->tx_eitr_param; +			else +				/* rx only or mixed */ +				q_vector->eitr = adapter->rx_eitr_param; +			q_vector->tx.work_limit = adapter->tx_work_limit; +			ixgbe_write_eitr(q_vector); +		} +	/* Legacy Interrupt Mode */ +	} else { +		q_vector = adapter->q_vector[0]; +		q_vector->eitr = adapter->rx_eitr_param; +		q_vector->tx.work_limit = adapter->tx_work_limit; +		ixgbe_write_eitr(q_vector); +	} + +	/* +	 * do reset here at the end to make sure EITR==0 case is handled +	 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings +	 * also locks in RSC enable/disable which requires reset +	 */ +	if (need_reset) +		ixgbe_do_reset(netdev); + +	return 0; +} + +static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, +					struct ethtool_rxnfc *cmd) +{ +	union ixgbe_atr_input *mask = &adapter->fdir_mask; +	struct ethtool_rx_flow_spec *fsp = +		(struct ethtool_rx_flow_spec *)&cmd->fs; +	struct hlist_node *node, *node2; +	struct ixgbe_fdir_filter *rule = NULL; + +	/* report total rule count */ +	cmd->data = (1024 << adapter->fdir_pballoc) - 2; + +	hlist_for_each_entry_safe(rule, node, node2, +				  &adapter->fdir_filter_list, fdir_node) { +		if (fsp->location <= rule->sw_idx) +			break; +	} + +	if (!rule || fsp->location != rule->sw_idx) +		return -EINVAL; + +	/* fill out the flow spec entry */ + +	/* set flow type field */ +	switch (rule->filter.formatted.flow_type) { +	case IXGBE_ATR_FLOW_TYPE_TCPV4: +		fsp->flow_type = TCP_V4_FLOW; +		break; +	case IXGBE_ATR_FLOW_TYPE_UDPV4: +		fsp->flow_type = UDP_V4_FLOW; +		break; +	case IXGBE_ATR_FLOW_TYPE_SCTPV4: +		fsp->flow_type = SCTP_V4_FLOW; +		break; +	case IXGBE_ATR_FLOW_TYPE_IPV4: +		fsp->flow_type = IP_USER_FLOW; +		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; +		fsp->h_u.usr_ip4_spec.proto = 0; +		fsp->m_u.usr_ip4_spec.proto = 0; +		break; +	default: +		return -EINVAL; +	} + +	fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; +	fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; +	fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; +	fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; +	fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; +	fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; +	fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; +	fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; +	fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id; +	fsp->m_ext.vlan_tci = mask->formatted.vlan_id; +	fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; +	fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; +	fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool); +	fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool); +	fsp->flow_type |= FLOW_EXT; + +	/* record action */ +	if (rule->action == IXGBE_FDIR_DROP_QUEUE) +		fsp->ring_cookie = RX_CLS_FLOW_DISC; +	else +		fsp->ring_cookie = rule->action; + +	return 0; +} + +static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter, +				      struct ethtool_rxnfc *cmd, +				      u32 *rule_locs) +{ +	struct hlist_node *node, *node2; +	struct ixgbe_fdir_filter *rule; +	int cnt = 0; + +	/* report total rule count */ +	cmd->data = (1024 << adapter->fdir_pballoc) - 2; + +	hlist_for_each_entry_safe(rule, node, node2, +				  &adapter->fdir_filter_list, fdir_node) { +		if (cnt == cmd->rule_cnt) +			return -EMSGSIZE; +		rule_locs[cnt] = rule->sw_idx; +		cnt++; +	} + +	return 0; +} + +static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, +			   void *rule_locs) +{ +	struct ixgbe_adapter *adapter = netdev_priv(dev); +	int ret = -EOPNOTSUPP; + +	switch (cmd->cmd) { +	case ETHTOOL_GRXRINGS: +		cmd->data = adapter->num_rx_queues; +		ret = 0; +		break; +	case ETHTOOL_GRXCLSRLCNT: +		cmd->rule_cnt = adapter->fdir_filter_count; +		ret = 0; +		break; +	case ETHTOOL_GRXCLSRULE: +		ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd); +		break; +	case ETHTOOL_GRXCLSRLALL: +		ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, +						 (u32 *)rule_locs); +		break; +	default: +		break; +	} + +	return ret; +} + +static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, +					   struct ixgbe_fdir_filter *input, +					   u16 sw_idx) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	struct hlist_node *node, *node2, *parent; +	struct ixgbe_fdir_filter *rule; +	int err = -EINVAL; + +	parent = NULL; +	rule = NULL; + +	hlist_for_each_entry_safe(rule, node, node2, +				  &adapter->fdir_filter_list, fdir_node) { +		/* hash found, or no matching entry */ +		if (rule->sw_idx >= sw_idx) +			break; +		parent = node; +	} + +	/* if there is an old rule occupying our place remove it */ +	if (rule && (rule->sw_idx == sw_idx)) { +		if (!input || (rule->filter.formatted.bkt_hash != +			       input->filter.formatted.bkt_hash)) { +			err = ixgbe_fdir_erase_perfect_filter_82599(hw, +								&rule->filter, +								sw_idx); +		} + +		hlist_del(&rule->fdir_node); +		kfree(rule); +		adapter->fdir_filter_count--; +	} + +	/* +	 * If no input this was a delete, err should be 0 if a rule was +	 * successfully found and removed from the list else -EINVAL +	 */ +	if (!input) +		return err; + +	/* initialize node and set software index */ +	INIT_HLIST_NODE(&input->fdir_node); + +	/* add filter to the list */ +	if (parent) +		hlist_add_after(parent, &input->fdir_node); +	else +		hlist_add_head(&input->fdir_node, +			       &adapter->fdir_filter_list); + +	/* update counts */ +	adapter->fdir_filter_count++; + +	return 0; +} + +static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, +				       u8 *flow_type) +{ +	switch (fsp->flow_type & ~FLOW_EXT) { +	case TCP_V4_FLOW: +		*flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; +		break; +	case UDP_V4_FLOW: +		*flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; +		break; +	case SCTP_V4_FLOW: +		*flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; +		break; +	case IP_USER_FLOW: +		switch (fsp->h_u.usr_ip4_spec.proto) { +		case IPPROTO_TCP: +			*flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; +			break; +		case IPPROTO_UDP: +			*flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; +			break; +		case IPPROTO_SCTP: +			*flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; +			break; +		case 0: +			if (!fsp->m_u.usr_ip4_spec.proto) { +				*flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; +				break; +			} +		default: +			return 0; +		} +		break; +	default: +		return 0; +	} + +	return 1; +} + +static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, +					struct ethtool_rxnfc *cmd) +{ +	struct ethtool_rx_flow_spec *fsp = +		(struct ethtool_rx_flow_spec *)&cmd->fs; +	struct ixgbe_hw *hw = &adapter->hw; +	struct ixgbe_fdir_filter *input; +	union ixgbe_atr_input mask; +	int err; + +	if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) +		return -EOPNOTSUPP; + +	/* +	 * Don't allow programming if the action is a queue greater than +	 * the number of online Rx queues. +	 */ +	if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && +	    (fsp->ring_cookie >= adapter->num_rx_queues)) +		return -EINVAL; + +	/* Don't allow indexes to exist outside of available space */ +	if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { +		e_err(drv, "Location out of range\n"); +		return -EINVAL; +	} + +	input = kzalloc(sizeof(*input), GFP_ATOMIC); +	if (!input) +		return -ENOMEM; + +	memset(&mask, 0, sizeof(union ixgbe_atr_input)); + +	/* set SW index */ +	input->sw_idx = fsp->location; + +	/* record flow type */ +	if (!ixgbe_flowspec_to_flow_type(fsp, +					 &input->filter.formatted.flow_type)) { +		e_err(drv, "Unrecognized flow type\n"); +		goto err_out; +	} + +	mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | +				   IXGBE_ATR_L4TYPE_MASK; + +	if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) +		mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; + +	/* Copy input into formatted structures */ +	input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; +	mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; +	input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; +	mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; +	input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; +	mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; +	input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; +	mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; + +	if (fsp->flow_type & FLOW_EXT) { +		input->filter.formatted.vm_pool = +				(unsigned char)ntohl(fsp->h_ext.data[1]); +		mask.formatted.vm_pool = +				(unsigned char)ntohl(fsp->m_ext.data[1]); +		input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci; +		mask.formatted.vlan_id = fsp->m_ext.vlan_tci; +		input->filter.formatted.flex_bytes = +						fsp->h_ext.vlan_etype; +		mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; +	} + +	/* determine if we need to drop or route the packet */ +	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) +		input->action = IXGBE_FDIR_DROP_QUEUE; +	else +		input->action = fsp->ring_cookie; + +	spin_lock(&adapter->fdir_perfect_lock); + +	if (hlist_empty(&adapter->fdir_filter_list)) { +		/* save mask and program input mask into HW */ +		memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); +		err = ixgbe_fdir_set_input_mask_82599(hw, &mask); +		if (err) { +			e_err(drv, "Error writing mask\n"); +			goto err_out_w_lock; +		} +	} else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { +		e_err(drv, "Only one mask supported per port\n"); +		goto err_out_w_lock; +	} + +	/* apply mask and compute/store hash */ +	ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); + +	/* program filters to filter memory */ +	err = ixgbe_fdir_write_perfect_filter_82599(hw, +				&input->filter, input->sw_idx, +				(input->action == IXGBE_FDIR_DROP_QUEUE) ? +				IXGBE_FDIR_DROP_QUEUE : +				adapter->rx_ring[input->action]->reg_idx); +	if (err) +		goto err_out_w_lock; + +	ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); + +	spin_unlock(&adapter->fdir_perfect_lock); + +	return err; +err_out_w_lock: +	spin_unlock(&adapter->fdir_perfect_lock); +err_out: +	kfree(input); +	return -EINVAL; +} + +static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter, +					struct ethtool_rxnfc *cmd) +{ +	struct ethtool_rx_flow_spec *fsp = +		(struct ethtool_rx_flow_spec *)&cmd->fs; +	int err; + +	spin_lock(&adapter->fdir_perfect_lock); +	err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location); +	spin_unlock(&adapter->fdir_perfect_lock); + +	return err; +} + +static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ +	struct ixgbe_adapter *adapter = netdev_priv(dev); +	int ret = -EOPNOTSUPP; + +	switch (cmd->cmd) { +	case ETHTOOL_SRXCLSRLINS: +		ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd); +		break; +	case ETHTOOL_SRXCLSRLDEL: +		ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd); +		break; +	default: +		break; +	} + +	return ret; +} + +static const struct ethtool_ops ixgbe_ethtool_ops = { +	.get_settings           = ixgbe_get_settings, +	.set_settings           = ixgbe_set_settings, +	.get_drvinfo            = ixgbe_get_drvinfo, +	.get_regs_len           = ixgbe_get_regs_len, +	.get_regs               = ixgbe_get_regs, +	.get_wol                = ixgbe_get_wol, +	.set_wol                = ixgbe_set_wol, +	.nway_reset             = ixgbe_nway_reset, +	.get_link               = ethtool_op_get_link, +	.get_eeprom_len         = ixgbe_get_eeprom_len, +	.get_eeprom             = ixgbe_get_eeprom, +	.get_ringparam          = ixgbe_get_ringparam, +	.set_ringparam          = ixgbe_set_ringparam, +	.get_pauseparam         = ixgbe_get_pauseparam, +	.set_pauseparam         = ixgbe_set_pauseparam, +	.get_msglevel           = ixgbe_get_msglevel, +	.set_msglevel           = ixgbe_set_msglevel, +	.self_test              = ixgbe_diag_test, +	.get_strings            = ixgbe_get_strings, +	.set_phys_id            = ixgbe_set_phys_id, +	.get_sset_count         = ixgbe_get_sset_count, +	.get_ethtool_stats      = ixgbe_get_ethtool_stats, +	.get_coalesce           = ixgbe_get_coalesce, +	.set_coalesce           = ixgbe_set_coalesce, +	.get_rxnfc		= ixgbe_get_rxnfc, +	.set_rxnfc		= ixgbe_set_rxnfc, +}; + +void ixgbe_set_ethtool_ops(struct net_device *netdev) +{ +	SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops); +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c new file mode 100644 index 00000000000..824edae7786 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -0,0 +1,836 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include <linux/if_ether.h> +#include <linux/gfp.h> +#include <linux/if_vlan.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <scsi/fc/fc_fs.h> +#include <scsi/fc/fc_fcoe.h> +#include <scsi/libfc.h> +#include <scsi/libfcoe.h> + +/** + * ixgbe_fcoe_clear_ddp - clear the given ddp context + * @ddp - ptr to the ixgbe_fcoe_ddp + * + * Returns : none + * + */ +static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) +{ +	ddp->len = 0; +	ddp->err = 1; +	ddp->udl = NULL; +	ddp->udp = 0UL; +	ddp->sgl = NULL; +	ddp->sgc = 0; +} + +/** + * ixgbe_fcoe_ddp_put - free the ddp context for a given xid + * @netdev: the corresponding net_device + * @xid: the xid that corresponding ddp will be freed + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_done + * and it is expected to be called by ULD, i.e., FCP layer of libfc + * to release the corresponding ddp context when the I/O is done. + * + * Returns : data length already ddp-ed in bytes + */ +int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) +{ +	int len = 0; +	struct ixgbe_fcoe *fcoe; +	struct ixgbe_adapter *adapter; +	struct ixgbe_fcoe_ddp *ddp; +	u32 fcbuff; + +	if (!netdev) +		goto out_ddp_put; + +	if (xid >= IXGBE_FCOE_DDP_MAX) +		goto out_ddp_put; + +	adapter = netdev_priv(netdev); +	fcoe = &adapter->fcoe; +	ddp = &fcoe->ddp[xid]; +	if (!ddp->udl) +		goto out_ddp_put; + +	len = ddp->len; +	/* if there an error, force to invalidate ddp context */ +	if (ddp->err) { +		spin_lock_bh(&fcoe->lock); +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0); +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW, +				(xid | IXGBE_FCFLTRW_WE)); +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, +				(xid | IXGBE_FCDMARW_WE)); + +		/* guaranteed to be invalidated after 100us */ +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, +				(xid | IXGBE_FCDMARW_RE)); +		fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF); +		spin_unlock_bh(&fcoe->lock); +		if (fcbuff & IXGBE_FCBUFF_VALID) +			udelay(100); +	} +	if (ddp->sgl) +		pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc, +			     DMA_FROM_DEVICE); +	if (ddp->pool) { +		pci_pool_free(ddp->pool, ddp->udl, ddp->udp); +		ddp->pool = NULL; +	} + +	ixgbe_fcoe_clear_ddp(ddp); + +out_ddp_put: +	return len; +} + +/** + * ixgbe_fcoe_ddp_setup - called to set up ddp context + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * Returns : 1 for success and 0 for no ddp + */ +static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, +				struct scatterlist *sgl, unsigned int sgc, +				int target_mode) +{ +	struct ixgbe_adapter *adapter; +	struct ixgbe_hw *hw; +	struct ixgbe_fcoe *fcoe; +	struct ixgbe_fcoe_ddp *ddp; +	struct scatterlist *sg; +	unsigned int i, j, dmacount; +	unsigned int len; +	static const unsigned int bufflen = IXGBE_FCBUFF_MIN; +	unsigned int firstoff = 0; +	unsigned int lastsize; +	unsigned int thisoff = 0; +	unsigned int thislen = 0; +	u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; +	dma_addr_t addr = 0; +	struct pci_pool *pool; + +	if (!netdev || !sgl) +		return 0; + +	adapter = netdev_priv(netdev); +	if (xid >= IXGBE_FCOE_DDP_MAX) { +		e_warn(drv, "xid=0x%x out-of-range\n", xid); +		return 0; +	} + +	/* no DDP if we are already down or resetting */ +	if (test_bit(__IXGBE_DOWN, &adapter->state) || +	    test_bit(__IXGBE_RESETTING, &adapter->state)) +		return 0; + +	fcoe = &adapter->fcoe; +	if (!fcoe->pool) { +		e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); +		return 0; +	} + +	ddp = &fcoe->ddp[xid]; +	if (ddp->sgl) { +		e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", +		      xid, ddp->sgl, ddp->sgc); +		return 0; +	} +	ixgbe_fcoe_clear_ddp(ddp); + +	/* setup dma from scsi command sgl */ +	dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); +	if (dmacount == 0) { +		e_err(drv, "xid 0x%x DMA map error\n", xid); +		return 0; +	} + +	/* alloc the udl from per cpu ddp pool */ +	pool = *per_cpu_ptr(fcoe->pool, get_cpu()); +	ddp->udl = pci_pool_alloc(pool, GFP_ATOMIC, &ddp->udp); +	if (!ddp->udl) { +		e_err(drv, "failed allocated ddp context\n"); +		goto out_noddp_unmap; +	} +	ddp->pool = pool; +	ddp->sgl = sgl; +	ddp->sgc = sgc; + +	j = 0; +	for_each_sg(sgl, sg, dmacount, i) { +		addr = sg_dma_address(sg); +		len = sg_dma_len(sg); +		while (len) { +			/* max number of buffers allowed in one DDP context */ +			if (j >= IXGBE_BUFFCNT_MAX) { +				e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " +				      "not enough descriptors\n", +				      xid, i, j, dmacount, (u64)addr); +				goto out_noddp_free; +			} + +			/* get the offset of length of current buffer */ +			thisoff = addr & ((dma_addr_t)bufflen - 1); +			thislen = min((bufflen - thisoff), len); +			/* +			 * all but the 1st buffer (j == 0) +			 * must be aligned on bufflen +			 */ +			if ((j != 0) && (thisoff)) +				goto out_noddp_free; +			/* +			 * all but the last buffer +			 * ((i == (dmacount - 1)) && (thislen == len)) +			 * must end at bufflen +			 */ +			if (((i != (dmacount - 1)) || (thislen != len)) +			    && ((thislen + thisoff) != bufflen)) +				goto out_noddp_free; + +			ddp->udl[j] = (u64)(addr - thisoff); +			/* only the first buffer may have none-zero offset */ +			if (j == 0) +				firstoff = thisoff; +			len -= thislen; +			addr += thislen; +			j++; +		} +	} +	/* only the last buffer may have non-full bufflen */ +	lastsize = thisoff + thislen; + +	/* +	 * lastsize can not be buffer len. +	 * If it is then adding another buffer with lastsize = 1. +	 */ +	if (lastsize == bufflen) { +		if (j >= IXGBE_BUFFCNT_MAX) { +			e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " +				"not enough user buffers. We need an extra " +				"buffer because lastsize is bufflen.\n", +				xid, i, j, dmacount, (u64)addr); +			goto out_noddp_free; +		} + +		ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); +		j++; +		lastsize = 1; +	} +	put_cpu(); + +	fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); +	fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); +	fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); +	/* Set WRCONTX bit to allow DDP for target */ +	if (target_mode) +		fcbuff |= (IXGBE_FCBUFF_WRCONTX); +	fcbuff |= (IXGBE_FCBUFF_VALID); + +	fcdmarw = xid; +	fcdmarw |= IXGBE_FCDMARW_WE; +	fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT); + +	fcfltrw = xid; +	fcfltrw |= IXGBE_FCFLTRW_WE; + +	/* program DMA context */ +	hw = &adapter->hw; +	spin_lock_bh(&fcoe->lock); + +	/* turn on last frame indication for target mode as FCP_RSPtarget is +	 * supposed to send FCP_RSP when it is done. */ +	if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) { +		set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode); +		fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL); +		fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH; +		IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl); +	} + +	IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); +	IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); +	IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); +	IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); +	/* program filter context */ +	IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); +	IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); +	IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); + +	spin_unlock_bh(&fcoe->lock); + +	return 1; + +out_noddp_free: +	pci_pool_free(pool, ddp->udl, ddp->udp); +	ixgbe_fcoe_clear_ddp(ddp); + +out_noddp_unmap: +	pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); +	put_cpu(); +	return 0; +} + +/** + * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup + * and is expected to be called from ULD, e.g., FCP layer of libfc + * to set up ddp for the corresponding xid of the given sglist for + * the corresponding I/O. + * + * Returns : 1 for success and 0 for no ddp + */ +int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, +		       struct scatterlist *sgl, unsigned int sgc) +{ +	return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); +} + +/** + * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_target + * and is expected to be called from ULD, e.g., FCP layer of libfc + * to set up ddp for the corresponding xid of the given sglist for + * the corresponding I/O. The DDP in target mode is a write I/O request + * from the initiator. + * + * Returns : 1 for success and 0 for no ddp + */ +int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, +			    struct scatterlist *sgl, unsigned int sgc) +{ +	return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); +} + +/** + * ixgbe_fcoe_ddp - check ddp status and mark it done + * @adapter: ixgbe adapter + * @rx_desc: advanced rx descriptor + * @skb: the skb holding the received data + * + * This checks ddp status. + * + * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates + * not passing the skb to ULD, > 0 indicates is the length of data + * being ddped. + */ +int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, +		   union ixgbe_adv_rx_desc *rx_desc, +		   struct sk_buff *skb, +		   u32 staterr) +{ +	u16 xid; +	u32 fctl; +	u32 fceofe, fcerr, fcstat; +	int rc = -EINVAL; +	struct ixgbe_fcoe *fcoe; +	struct ixgbe_fcoe_ddp *ddp; +	struct fc_frame_header *fh; +	struct fcoe_crc_eof *crc; + +	fcerr = (staterr & IXGBE_RXDADV_ERR_FCERR); +	fceofe = (staterr & IXGBE_RXDADV_ERR_FCEOFE); +	if (fcerr == IXGBE_FCERR_BADCRC) +		skb_checksum_none_assert(skb); +	else +		skb->ip_summed = CHECKSUM_UNNECESSARY; + +	if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) +		fh = (struct fc_frame_header *)(skb->data + +			sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr)); +	else +		fh = (struct fc_frame_header *)(skb->data + +			sizeof(struct fcoe_hdr)); +	fctl = ntoh24(fh->fh_f_ctl); +	if (fctl & FC_FC_EX_CTX) +		xid =  be16_to_cpu(fh->fh_ox_id); +	else +		xid =  be16_to_cpu(fh->fh_rx_id); + +	if (xid >= IXGBE_FCOE_DDP_MAX) +		goto ddp_out; + +	fcoe = &adapter->fcoe; +	ddp = &fcoe->ddp[xid]; +	if (!ddp->udl) +		goto ddp_out; + +	if (fcerr | fceofe) +		goto ddp_out; + +	fcstat = (staterr & IXGBE_RXDADV_STAT_FCSTAT); +	if (fcstat) { +		/* update length of DDPed data */ +		ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); +		/* unmap the sg list when FCP_RSP is received */ +		if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) { +			pci_unmap_sg(adapter->pdev, ddp->sgl, +				     ddp->sgc, DMA_FROM_DEVICE); +			ddp->err = (fcerr | fceofe); +			ddp->sgl = NULL; +			ddp->sgc = 0; +		} +		/* return 0 to bypass going to ULD for DDPed data */ +		if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP) +			rc = 0; +		else if (ddp->len) +			rc = ddp->len; +	} +	/* In target mode, check the last data frame of the sequence. +	 * For DDP in target mode, data is already DDPed but the header +	 * indication of the last data frame ould allow is to tell if we +	 * got all the data and the ULP can send FCP_RSP back, as this is +	 * not a full fcoe frame, we fill the trailer here so it won't be +	 * dropped by the ULP stack. +	 */ +	if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && +	    (fctl & FC_FC_END_SEQ)) { +		crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); +		crc->fcoe_eof = FC_EOF_T; +	} +ddp_out: +	return rc; +} + +/** + * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) + * @adapter: ixgbe adapter + * @tx_ring: tx desc ring + * @skb: associated skb + * @tx_flags: tx flags + * @hdr_len: hdr_len to be returned + * + * This sets up large send offload for FCoE + * + * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error + */ +int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, +              u32 tx_flags, u8 *hdr_len) +{ +	struct fc_frame_header *fh; +	u32 vlan_macip_lens; +	u32 fcoe_sof_eof = 0; +	u32 mss_l4len_idx; +	u8 sof, eof; + +	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { +		dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", +			skb_shinfo(skb)->gso_type); +		return -EINVAL; +	} + +	/* resets the header to point fcoe/fc */ +	skb_set_network_header(skb, skb->mac_len); +	skb_set_transport_header(skb, skb->mac_len + +				 sizeof(struct fcoe_hdr)); + +	/* sets up SOF and ORIS */ +	sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; +	switch (sof) { +	case FC_SOF_I2: +		fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS; +		break; +	case FC_SOF_I3: +		fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF | +			       IXGBE_ADVTXD_FCOEF_ORIS; +		break; +	case FC_SOF_N2: +		break; +	case FC_SOF_N3: +		fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF; +		break; +	default: +		dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof); +		return -EINVAL; +	} + +	/* the first byte of the last dword is EOF */ +	skb_copy_bits(skb, skb->len - 4, &eof, 1); +	/* sets up EOF and ORIE */ +	switch (eof) { +	case FC_EOF_N: +		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; +		break; +	case FC_EOF_T: +		/* lso needs ORIE */ +		if (skb_is_gso(skb)) +			fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N | +					IXGBE_ADVTXD_FCOEF_ORIE; +		else +			fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T; +		break; +	case FC_EOF_NI: +		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI; +		break; +	case FC_EOF_A: +		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; +		break; +	default: +		dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof); +		return -EINVAL; +	} + +	/* sets up PARINC indicating data offset */ +	fh = (struct fc_frame_header *)skb_transport_header(skb); +	if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) +		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC; + +	/* include trailer in headlen as it is replicated per frame */ +	*hdr_len = sizeof(struct fcoe_crc_eof); + +	/* hdr_len includes fc_hdr if FCoE LSO is enabled */ +	if (skb_is_gso(skb)) +		*hdr_len += (skb_transport_offset(skb) + +			     sizeof(struct fc_frame_header)); + +	/* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ +	mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; +	mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; + +	/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ +	vlan_macip_lens = skb_transport_offset(skb) + +			  sizeof(struct fc_frame_header); +	vlan_macip_lens |= (skb_transport_offset(skb) - 4) +			   << IXGBE_ADVTXD_MACLEN_SHIFT; +	vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + +	/* write context desc */ +	ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, +			  IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx); + +	return skb_is_gso(skb); +} + +static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe) +{ +	unsigned int cpu; +	struct pci_pool **pool; + +	for_each_possible_cpu(cpu) { +		pool = per_cpu_ptr(fcoe->pool, cpu); +		if (*pool) +			pci_pool_destroy(*pool); +	} +	free_percpu(fcoe->pool); +	fcoe->pool = NULL; +} + +static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_fcoe *fcoe = &adapter->fcoe; +	unsigned int cpu; +	struct pci_pool **pool; +	char pool_name[32]; + +	fcoe->pool = alloc_percpu(struct pci_pool *); +	if (!fcoe->pool) +		return; + +	/* allocate pci pool for each cpu */ +	for_each_possible_cpu(cpu) { +		snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu); +		pool = per_cpu_ptr(fcoe->pool, cpu); +		*pool = pci_pool_create(pool_name, +					adapter->pdev, IXGBE_FCPTR_MAX, +					IXGBE_FCPTR_ALIGN, PAGE_SIZE); +		if (!*pool) { +			e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); +			ixgbe_fcoe_ddp_pools_free(fcoe); +			return; +		} +	} +} + +/** + * ixgbe_configure_fcoe - configures registers for fcoe at start + * @adapter: ptr to ixgbe adapter + * + * This sets up FCoE related registers + * + * Returns : none + */ +void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) +{ +	int i, fcoe_q, fcoe_i; +	struct ixgbe_hw *hw = &adapter->hw; +	struct ixgbe_fcoe *fcoe = &adapter->fcoe; +	struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; + +	if (!fcoe->pool) { +		spin_lock_init(&fcoe->lock); + +		ixgbe_fcoe_ddp_pools_alloc(adapter); +		if (!fcoe->pool) { +			e_err(drv, "failed to alloc percpu fcoe DDP pools\n"); +			return; +		} + +		/* Extra buffer to be shared by all DDPs for HW work around */ +		fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); +		if (fcoe->extra_ddp_buffer == NULL) { +			e_err(drv, "failed to allocated extra DDP buffer\n"); +			goto out_ddp_pools; +		} + +		fcoe->extra_ddp_buffer_dma = +			dma_map_single(&adapter->pdev->dev, +				       fcoe->extra_ddp_buffer, +				       IXGBE_FCBUFF_MIN, +				       DMA_FROM_DEVICE); +		if (dma_mapping_error(&adapter->pdev->dev, +				      fcoe->extra_ddp_buffer_dma)) { +			e_err(drv, "failed to map extra DDP buffer\n"); +			goto out_extra_ddp_buffer; +		} +	} + +	/* Enable L2 eth type filter for FCoE */ +	IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), +			(ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN)); +	/* Enable L2 eth type filter for FIP */ +	IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), +			(ETH_P_FIP | IXGBE_ETQF_FILTER_EN)); +	if (adapter->ring_feature[RING_F_FCOE].indices) { +		/* Use multiple rx queues for FCoE by redirection table */ +		for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { +			fcoe_i = f->mask + i % f->indices; +			fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; +			fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; +			IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); +		} +		IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); +		IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); +	} else  { +		/* Use single rx queue for FCoE */ +		fcoe_i = f->mask; +		fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; +		IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0); +		IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), +				IXGBE_ETQS_QUEUE_EN | +				(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); +	} +	/* send FIP frames to the first FCoE queue */ +	fcoe_i = f->mask; +	fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; +	IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), +			IXGBE_ETQS_QUEUE_EN | +			(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); + +	IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, +			IXGBE_FCRXCTRL_FCOELLI | +			IXGBE_FCRXCTRL_FCCRCBO | +			(FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); +	return; + +out_extra_ddp_buffer: +	kfree(fcoe->extra_ddp_buffer); +out_ddp_pools: +	ixgbe_fcoe_ddp_pools_free(fcoe); +} + +/** + * ixgbe_cleanup_fcoe - release all fcoe ddp context resources + * @adapter : ixgbe adapter + * + * Cleans up outstanding ddp context resources + * + * Returns : none + */ +void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) +{ +	int i; +	struct ixgbe_fcoe *fcoe = &adapter->fcoe; + +	if (!fcoe->pool) +		return; + +	for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) +		ixgbe_fcoe_ddp_put(adapter->netdev, i); +	dma_unmap_single(&adapter->pdev->dev, +			 fcoe->extra_ddp_buffer_dma, +			 IXGBE_FCBUFF_MIN, +			 DMA_FROM_DEVICE); +	kfree(fcoe->extra_ddp_buffer); +	ixgbe_fcoe_ddp_pools_free(fcoe); +} + +/** + * ixgbe_fcoe_enable - turn on FCoE offload feature + * @netdev: the corresponding netdev + * + * Turns on FCoE offload feature in 82599. + * + * Returns : 0 indicates success or -EINVAL on failure + */ +int ixgbe_fcoe_enable(struct net_device *netdev) +{ +	int rc = -EINVAL; +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_fcoe *fcoe = &adapter->fcoe; + + +	if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) +		goto out_enable; + +	atomic_inc(&fcoe->refcnt); +	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) +		goto out_enable; + +	e_info(drv, "Enabling FCoE offload features.\n"); +	if (netif_running(netdev)) +		netdev->netdev_ops->ndo_stop(netdev); + +	ixgbe_clear_interrupt_scheme(adapter); + +	adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; +	adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE; +	netdev->features |= NETIF_F_FCOE_CRC; +	netdev->features |= NETIF_F_FSO; +	netdev->features |= NETIF_F_FCOE_MTU; +	netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; + +	ixgbe_init_interrupt_scheme(adapter); +	netdev_features_change(netdev); + +	if (netif_running(netdev)) +		netdev->netdev_ops->ndo_open(netdev); +	rc = 0; + +out_enable: +	return rc; +} + +/** + * ixgbe_fcoe_disable - turn off FCoE offload feature + * @netdev: the corresponding netdev + * + * Turns off FCoE offload feature in 82599. + * + * Returns : 0 indicates success or -EINVAL on failure + */ +int ixgbe_fcoe_disable(struct net_device *netdev) +{ +	int rc = -EINVAL; +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_fcoe *fcoe = &adapter->fcoe; + +	if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) +		goto out_disable; + +	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) +		goto out_disable; + +	if (!atomic_dec_and_test(&fcoe->refcnt)) +		goto out_disable; + +	e_info(drv, "Disabling FCoE offload features.\n"); +	netdev->features &= ~NETIF_F_FCOE_CRC; +	netdev->features &= ~NETIF_F_FSO; +	netdev->features &= ~NETIF_F_FCOE_MTU; +	netdev->fcoe_ddp_xid = 0; +	netdev_features_change(netdev); + +	if (netif_running(netdev)) +		netdev->netdev_ops->ndo_stop(netdev); + +	ixgbe_clear_interrupt_scheme(adapter); +	adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; +	adapter->ring_feature[RING_F_FCOE].indices = 0; +	ixgbe_cleanup_fcoe(adapter); +	ixgbe_init_interrupt_scheme(adapter); + +	if (netif_running(netdev)) +		netdev->netdev_ops->ndo_open(netdev); +	rc = 0; + +out_disable: +	return rc; +} + +/** + * ixgbe_fcoe_get_wwn - get world wide name for the node or the port + * @netdev : ixgbe adapter + * @wwn : the world wide name + * @type: the type of world wide name + * + * Returns the node or port world wide name if both the prefix and the san + * mac address are valid, then the wwn is formed based on the NAA-2 for + * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3). + * + * Returns : 0 on success + */ +int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) +{ +	int rc = -EINVAL; +	u16 prefix = 0xffff; +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_mac_info *mac = &adapter->hw.mac; + +	switch (type) { +	case NETDEV_FCOE_WWNN: +		prefix = mac->wwnn_prefix; +		break; +	case NETDEV_FCOE_WWPN: +		prefix = mac->wwpn_prefix; +		break; +	default: +		break; +	} + +	if ((prefix != 0xffff) && +	    is_valid_ether_addr(mac->san_addr)) { +		*wwn = ((u64) prefix << 48) | +		       ((u64) mac->san_addr[0] << 40) | +		       ((u64) mac->san_addr[1] << 32) | +		       ((u64) mac->san_addr[2] << 24) | +		       ((u64) mac->san_addr[3] << 16) | +		       ((u64) mac->san_addr[4] << 8)  | +		       ((u64) mac->san_addr[5]); +		rc = 0; +	} +	return rc; +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h new file mode 100644 index 00000000000..99de145e290 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h @@ -0,0 +1,81 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_FCOE_H +#define _IXGBE_FCOE_H + +#include <scsi/fc/fc_fs.h> +#include <scsi/fc/fc_fcoe.h> + +/* shift bits within STAT fo FCSTAT */ +#define IXGBE_RXDADV_FCSTAT_SHIFT	4 + +/* ddp user buffer */ +#define IXGBE_BUFFCNT_MAX	256	/* 8 bits bufcnt */ +#define IXGBE_FCPTR_ALIGN	16 +#define IXGBE_FCPTR_MAX	(IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t)) +#define IXGBE_FCBUFF_4KB	0x0 +#define IXGBE_FCBUFF_8KB	0x1 +#define IXGBE_FCBUFF_16KB	0x2 +#define IXGBE_FCBUFF_64KB	0x3 +#define IXGBE_FCBUFF_MAX	65536	/* 64KB max */ +#define IXGBE_FCBUFF_MIN	4096	/* 4KB min */ +#define IXGBE_FCOE_DDP_MAX	512	/* 9 bits xid */ + +/* Default traffic class to use for FCoE */ +#define IXGBE_FCOE_DEFTC	3 + +/* fcerr */ +#define IXGBE_FCERR_BADCRC       0x00100000 + +/* FCoE DDP for target mode */ +#define __IXGBE_FCOE_TARGET	1 + +struct ixgbe_fcoe_ddp { +	int len; +	u32 err; +	unsigned int sgc; +	struct scatterlist *sgl; +	dma_addr_t udp; +	u64 *udl; +	struct pci_pool *pool; +}; + +struct ixgbe_fcoe { +	struct pci_pool **pool; +	atomic_t refcnt; +	spinlock_t lock; +	struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; +	unsigned char *extra_ddp_buffer; +	dma_addr_t extra_ddp_buffer_dma; +	unsigned long mode; +#ifdef CONFIG_IXGBE_DCB +	u8 up; +#endif +}; + +#endif /* _IXGBE_FCOE_H */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c new file mode 100644 index 00000000000..e86297b3273 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -0,0 +1,7934 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/vmalloc.h> +#include <linux/string.h> +#include <linux/in.h> +#include <linux/interrupt.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/sctp.h> +#include <linux/pkt_sched.h> +#include <linux/ipv6.h> +#include <linux/slab.h> +#include <net/checksum.h> +#include <net/ip6_checksum.h> +#include <linux/ethtool.h> +#include <linux/if_vlan.h> +#include <linux/prefetch.h> +#include <scsi/fc/fc_fcoe.h> + +#include "ixgbe.h" +#include "ixgbe_common.h" +#include "ixgbe_dcb_82599.h" +#include "ixgbe_sriov.h" + +char ixgbe_driver_name[] = "ixgbe"; +static const char ixgbe_driver_string[] = +			      "Intel(R) 10 Gigabit PCI Express Network Driver"; +#define MAJ 3 +#define MIN 4 +#define BUILD 8 +#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ +	__stringify(BUILD) "-k" +const char ixgbe_driver_version[] = DRV_VERSION; +static const char ixgbe_copyright[] = +				"Copyright (c) 1999-2011 Intel Corporation."; + +static const struct ixgbe_info *ixgbe_info_tbl[] = { +	[board_82598] = &ixgbe_82598_info, +	[board_82599] = &ixgbe_82599_info, +	[board_X540] = &ixgbe_X540_info, +}; + +/* ixgbe_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + *   Class, Class Mask, private data (not used) } + */ +static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), +	 board_82598 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), +	 board_82598 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), +	 board_82598 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), +	 board_82598 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), +	 board_82598 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), +	 board_82598 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), +	 board_82598 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), +	 board_82598 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), +	 board_82598 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), +	 board_82598 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), +	 board_82598 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), +	 board_82598 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), +	 board_82599 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), +	 board_82599 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), +	 board_82599 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), +	 board_82599 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), +	 board_82599 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), +	 board_82599 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), +	 board_82599 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), +	 board_82599 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), +	 board_82599 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), +	 board_82599 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), +	 board_82599 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), +	 board_X540 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), +	 board_82599 }, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), +	 board_82599 }, + +	/* required last entry */ +	{0, } +}; +MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); + +#ifdef CONFIG_IXGBE_DCA +static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, +			    void *p); +static struct notifier_block dca_notifier = { +	.notifier_call = ixgbe_notify_dca, +	.next          = NULL, +	.priority      = 0 +}; +#endif + +#ifdef CONFIG_PCI_IOV +static unsigned int max_vfs; +module_param(max_vfs, uint, 0); +MODULE_PARM_DESC(max_vfs, +		 "Maximum number of virtual functions to allocate per physical function"); +#endif /* CONFIG_PCI_IOV */ + +MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); +MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 + +static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u32 gcr; +	u32 gpie; +	u32 vmdctl; + +#ifdef CONFIG_PCI_IOV +	/* disable iov and allow time for transactions to clear */ +	pci_disable_sriov(adapter->pdev); +#endif + +	/* turn off device IOV mode */ +	gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); +	gcr &= ~(IXGBE_GCR_EXT_SRIOV); +	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr); +	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); +	gpie &= ~IXGBE_GPIE_VTMODE_MASK; +	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + +	/* set default pool back to 0 */ +	vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); +	vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; +	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); +	IXGBE_WRITE_FLUSH(hw); + +	/* take a breather then clean up driver data */ +	msleep(100); + +	kfree(adapter->vfinfo); +	adapter->vfinfo = NULL; + +	adapter->num_vfs = 0; +	adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; +} + +static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) +{ +	if (!test_bit(__IXGBE_DOWN, &adapter->state) && +	    !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state)) +		schedule_work(&adapter->service_task); +} + +static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) +{ +	BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); + +	/* flush memory to make sure state is correct before next watchog */ +	smp_mb__before_clear_bit(); +	clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); +} + +struct ixgbe_reg_info { +	u32 ofs; +	char *name; +}; + +static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { + +	/* General Registers */ +	{IXGBE_CTRL, "CTRL"}, +	{IXGBE_STATUS, "STATUS"}, +	{IXGBE_CTRL_EXT, "CTRL_EXT"}, + +	/* Interrupt Registers */ +	{IXGBE_EICR, "EICR"}, + +	/* RX Registers */ +	{IXGBE_SRRCTL(0), "SRRCTL"}, +	{IXGBE_DCA_RXCTRL(0), "DRXCTL"}, +	{IXGBE_RDLEN(0), "RDLEN"}, +	{IXGBE_RDH(0), "RDH"}, +	{IXGBE_RDT(0), "RDT"}, +	{IXGBE_RXDCTL(0), "RXDCTL"}, +	{IXGBE_RDBAL(0), "RDBAL"}, +	{IXGBE_RDBAH(0), "RDBAH"}, + +	/* TX Registers */ +	{IXGBE_TDBAL(0), "TDBAL"}, +	{IXGBE_TDBAH(0), "TDBAH"}, +	{IXGBE_TDLEN(0), "TDLEN"}, +	{IXGBE_TDH(0), "TDH"}, +	{IXGBE_TDT(0), "TDT"}, +	{IXGBE_TXDCTL(0), "TXDCTL"}, + +	/* List Terminator */ +	{} +}; + + +/* + * ixgbe_regdump - register printout routine + */ +static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo) +{ +	int i = 0, j = 0; +	char rname[16]; +	u32 regs[64]; + +	switch (reginfo->ofs) { +	case IXGBE_SRRCTL(0): +		for (i = 0; i < 64; i++) +			regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); +		break; +	case IXGBE_DCA_RXCTRL(0): +		for (i = 0; i < 64; i++) +			regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); +		break; +	case IXGBE_RDLEN(0): +		for (i = 0; i < 64; i++) +			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); +		break; +	case IXGBE_RDH(0): +		for (i = 0; i < 64; i++) +			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); +		break; +	case IXGBE_RDT(0): +		for (i = 0; i < 64; i++) +			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); +		break; +	case IXGBE_RXDCTL(0): +		for (i = 0; i < 64; i++) +			regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); +		break; +	case IXGBE_RDBAL(0): +		for (i = 0; i < 64; i++) +			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); +		break; +	case IXGBE_RDBAH(0): +		for (i = 0; i < 64; i++) +			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); +		break; +	case IXGBE_TDBAL(0): +		for (i = 0; i < 64; i++) +			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); +		break; +	case IXGBE_TDBAH(0): +		for (i = 0; i < 64; i++) +			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); +		break; +	case IXGBE_TDLEN(0): +		for (i = 0; i < 64; i++) +			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); +		break; +	case IXGBE_TDH(0): +		for (i = 0; i < 64; i++) +			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); +		break; +	case IXGBE_TDT(0): +		for (i = 0; i < 64; i++) +			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); +		break; +	case IXGBE_TXDCTL(0): +		for (i = 0; i < 64; i++) +			regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); +		break; +	default: +		pr_info("%-15s %08x\n", reginfo->name, +			IXGBE_READ_REG(hw, reginfo->ofs)); +		return; +	} + +	for (i = 0; i < 8; i++) { +		snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7); +		pr_err("%-15s", rname); +		for (j = 0; j < 8; j++) +			pr_cont(" %08x", regs[i*8+j]); +		pr_cont("\n"); +	} + +} + +/* + * ixgbe_dump - Print registers, tx-rings and rx-rings + */ +static void ixgbe_dump(struct ixgbe_adapter *adapter) +{ +	struct net_device *netdev = adapter->netdev; +	struct ixgbe_hw *hw = &adapter->hw; +	struct ixgbe_reg_info *reginfo; +	int n = 0; +	struct ixgbe_ring *tx_ring; +	struct ixgbe_tx_buffer *tx_buffer_info; +	union ixgbe_adv_tx_desc *tx_desc; +	struct my_u0 { u64 a; u64 b; } *u0; +	struct ixgbe_ring *rx_ring; +	union ixgbe_adv_rx_desc *rx_desc; +	struct ixgbe_rx_buffer *rx_buffer_info; +	u32 staterr; +	int i = 0; + +	if (!netif_msg_hw(adapter)) +		return; + +	/* Print netdevice Info */ +	if (netdev) { +		dev_info(&adapter->pdev->dev, "Net device Info\n"); +		pr_info("Device Name     state            " +			"trans_start      last_rx\n"); +		pr_info("%-15s %016lX %016lX %016lX\n", +			netdev->name, +			netdev->state, +			netdev->trans_start, +			netdev->last_rx); +	} + +	/* Print Registers */ +	dev_info(&adapter->pdev->dev, "Register Dump\n"); +	pr_info(" Register Name   Value\n"); +	for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; +	     reginfo->name; reginfo++) { +		ixgbe_regdump(hw, reginfo); +	} + +	/* Print TX Ring Summary */ +	if (!netdev || !netif_running(netdev)) +		goto exit; + +	dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); +	pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n"); +	for (n = 0; n < adapter->num_tx_queues; n++) { +		tx_ring = adapter->tx_ring[n]; +		tx_buffer_info = +			&tx_ring->tx_buffer_info[tx_ring->next_to_clean]; +		pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n", +			   n, tx_ring->next_to_use, tx_ring->next_to_clean, +			   (u64)tx_buffer_info->dma, +			   tx_buffer_info->length, +			   tx_buffer_info->next_to_watch, +			   (u64)tx_buffer_info->time_stamp); +	} + +	/* Print TX Rings */ +	if (!netif_msg_tx_done(adapter)) +		goto rx_ring_summary; + +	dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); + +	/* Transmit Descriptor Formats +	 * +	 * Advanced Transmit Descriptor +	 *   +--------------------------------------------------------------+ +	 * 0 |         Buffer Address [63:0]                                | +	 *   +--------------------------------------------------------------+ +	 * 8 |  PAYLEN  | PORTS  | IDX | STA | DCMD  |DTYP |  RSV |  DTALEN | +	 *   +--------------------------------------------------------------+ +	 *   63       46 45    40 39 36 35 32 31   24 23 20 19              0 +	 */ + +	for (n = 0; n < adapter->num_tx_queues; n++) { +		tx_ring = adapter->tx_ring[n]; +		pr_info("------------------------------------\n"); +		pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); +		pr_info("------------------------------------\n"); +		pr_info("T [desc]     [address 63:0  ] " +			"[PlPOIdStDDt Ln] [bi->dma       ] " +			"leng  ntw timestamp        bi->skb\n"); + +		for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { +			tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); +			tx_buffer_info = &tx_ring->tx_buffer_info[i]; +			u0 = (struct my_u0 *)tx_desc; +			pr_info("T [0x%03X]    %016llX %016llX %016llX" +				" %04X  %3X %016llX %p", i, +				le64_to_cpu(u0->a), +				le64_to_cpu(u0->b), +				(u64)tx_buffer_info->dma, +				tx_buffer_info->length, +				tx_buffer_info->next_to_watch, +				(u64)tx_buffer_info->time_stamp, +				tx_buffer_info->skb); +			if (i == tx_ring->next_to_use && +				i == tx_ring->next_to_clean) +				pr_cont(" NTC/U\n"); +			else if (i == tx_ring->next_to_use) +				pr_cont(" NTU\n"); +			else if (i == tx_ring->next_to_clean) +				pr_cont(" NTC\n"); +			else +				pr_cont("\n"); + +			if (netif_msg_pktdata(adapter) && +				tx_buffer_info->dma != 0) +				print_hex_dump(KERN_INFO, "", +					DUMP_PREFIX_ADDRESS, 16, 1, +					phys_to_virt(tx_buffer_info->dma), +					tx_buffer_info->length, true); +		} +	} + +	/* Print RX Rings Summary */ +rx_ring_summary: +	dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); +	pr_info("Queue [NTU] [NTC]\n"); +	for (n = 0; n < adapter->num_rx_queues; n++) { +		rx_ring = adapter->rx_ring[n]; +		pr_info("%5d %5X %5X\n", +			n, rx_ring->next_to_use, rx_ring->next_to_clean); +	} + +	/* Print RX Rings */ +	if (!netif_msg_rx_status(adapter)) +		goto exit; + +	dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); + +	/* Advanced Receive Descriptor (Read) Format +	 *    63                                           1        0 +	 *    +-----------------------------------------------------+ +	 *  0 |       Packet Buffer Address [63:1]           |A0/NSE| +	 *    +----------------------------------------------+------+ +	 *  8 |       Header Buffer Address [63:1]           |  DD  | +	 *    +-----------------------------------------------------+ +	 * +	 * +	 * Advanced Receive Descriptor (Write-Back) Format +	 * +	 *   63       48 47    32 31  30      21 20 16 15   4 3     0 +	 *   +------------------------------------------------------+ +	 * 0 | Packet     IP     |SPH| HDR_LEN   | RSV|Packet|  RSS | +	 *   | Checksum   Ident  |   |           |    | Type | Type | +	 *   +------------------------------------------------------+ +	 * 8 | VLAN Tag | Length | Extended Error | Extended Status | +	 *   +------------------------------------------------------+ +	 *   63       48 47    32 31            20 19               0 +	 */ +	for (n = 0; n < adapter->num_rx_queues; n++) { +		rx_ring = adapter->rx_ring[n]; +		pr_info("------------------------------------\n"); +		pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); +		pr_info("------------------------------------\n"); +		pr_info("R  [desc]      [ PktBuf     A0] " +			"[  HeadBuf   DD] [bi->dma       ] [bi->skb] " +			"<-- Adv Rx Read format\n"); +		pr_info("RWB[desc]      [PcsmIpSHl PtRs] " +			"[vl er S cks ln] ---------------- [bi->skb] " +			"<-- Adv Rx Write-Back format\n"); + +		for (i = 0; i < rx_ring->count; i++) { +			rx_buffer_info = &rx_ring->rx_buffer_info[i]; +			rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); +			u0 = (struct my_u0 *)rx_desc; +			staterr = le32_to_cpu(rx_desc->wb.upper.status_error); +			if (staterr & IXGBE_RXD_STAT_DD) { +				/* Descriptor Done */ +				pr_info("RWB[0x%03X]     %016llX " +					"%016llX ---------------- %p", i, +					le64_to_cpu(u0->a), +					le64_to_cpu(u0->b), +					rx_buffer_info->skb); +			} else { +				pr_info("R  [0x%03X]     %016llX " +					"%016llX %016llX %p", i, +					le64_to_cpu(u0->a), +					le64_to_cpu(u0->b), +					(u64)rx_buffer_info->dma, +					rx_buffer_info->skb); + +				if (netif_msg_pktdata(adapter)) { +					print_hex_dump(KERN_INFO, "", +					   DUMP_PREFIX_ADDRESS, 16, 1, +					   phys_to_virt(rx_buffer_info->dma), +					   rx_ring->rx_buf_len, true); + +					if (rx_ring->rx_buf_len +						< IXGBE_RXBUFFER_2048) +						print_hex_dump(KERN_INFO, "", +						  DUMP_PREFIX_ADDRESS, 16, 1, +						  phys_to_virt( +						    rx_buffer_info->page_dma + +						    rx_buffer_info->page_offset +						  ), +						  PAGE_SIZE/2, true); +				} +			} + +			if (i == rx_ring->next_to_use) +				pr_cont(" NTU\n"); +			else if (i == rx_ring->next_to_clean) +				pr_cont(" NTC\n"); +			else +				pr_cont("\n"); + +		} +	} + +exit: +	return; +} + +static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) +{ +	u32 ctrl_ext; + +	/* Let firmware take over control of h/w */ +	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, +			ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); +} + +static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) +{ +	u32 ctrl_ext; + +	/* Let firmware know the driver has taken over */ +	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, +			ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); +} + +/* + * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors + * @adapter: pointer to adapter struct + * @direction: 0 for Rx, 1 for Tx, -1 for other causes + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + * + */ +static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, +			   u8 queue, u8 msix_vector) +{ +	u32 ivar, index; +	struct ixgbe_hw *hw = &adapter->hw; +	switch (hw->mac.type) { +	case ixgbe_mac_82598EB: +		msix_vector |= IXGBE_IVAR_ALLOC_VAL; +		if (direction == -1) +			direction = 0; +		index = (((direction * 64) + queue) >> 2) & 0x1F; +		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); +		ivar &= ~(0xFF << (8 * (queue & 0x3))); +		ivar |= (msix_vector << (8 * (queue & 0x3))); +		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); +		break; +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		if (direction == -1) { +			/* other causes */ +			msix_vector |= IXGBE_IVAR_ALLOC_VAL; +			index = ((queue & 1) * 8); +			ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC); +			ivar &= ~(0xFF << index); +			ivar |= (msix_vector << index); +			IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar); +			break; +		} else { +			/* tx or rx causes */ +			msix_vector |= IXGBE_IVAR_ALLOC_VAL; +			index = ((16 * (queue & 1)) + (8 * direction)); +			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); +			ivar &= ~(0xFF << index); +			ivar |= (msix_vector << index); +			IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar); +			break; +		} +	default: +		break; +	} +} + +static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, +					  u64 qmask) +{ +	u32 mask; + +	switch (adapter->hw.mac.type) { +	case ixgbe_mac_82598EB: +		mask = (IXGBE_EIMS_RTX_QUEUE & qmask); +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); +		break; +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		mask = (qmask & 0xFFFFFFFF); +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); +		mask = (qmask >> 32); +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); +		break; +	default: +		break; +	} +} + +void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring, +				      struct ixgbe_tx_buffer *tx_buffer_info) +{ +	if (tx_buffer_info->dma) { +		if (tx_buffer_info->mapped_as_page) +			dma_unmap_page(tx_ring->dev, +				       tx_buffer_info->dma, +				       tx_buffer_info->length, +				       DMA_TO_DEVICE); +		else +			dma_unmap_single(tx_ring->dev, +					 tx_buffer_info->dma, +					 tx_buffer_info->length, +					 DMA_TO_DEVICE); +		tx_buffer_info->dma = 0; +	} +	if (tx_buffer_info->skb) { +		dev_kfree_skb_any(tx_buffer_info->skb); +		tx_buffer_info->skb = NULL; +	} +	tx_buffer_info->time_stamp = 0; +	/* tx_buffer_info must be completely set up in the transmit path */ +} + +static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	struct ixgbe_hw_stats *hwstats = &adapter->stats; +	u32 data = 0; +	u32 xoff[8] = {0}; +	int i; + +	if ((hw->fc.current_mode == ixgbe_fc_full) || +	    (hw->fc.current_mode == ixgbe_fc_rx_pause)) { +		switch (hw->mac.type) { +		case ixgbe_mac_82598EB: +			data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); +			break; +		default: +			data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); +		} +		hwstats->lxoffrxc += data; + +		/* refill credits (no tx hang) if we received xoff */ +		if (!data) +			return; + +		for (i = 0; i < adapter->num_tx_queues; i++) +			clear_bit(__IXGBE_HANG_CHECK_ARMED, +				  &adapter->tx_ring[i]->state); +		return; +	} else if (!(adapter->dcb_cfg.pfc_mode_enable)) +		return; + +	/* update stats for each tc, only valid with PFC enabled */ +	for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { +		switch (hw->mac.type) { +		case ixgbe_mac_82598EB: +			xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); +			break; +		default: +			xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); +		} +		hwstats->pxoffrxc[i] += xoff[i]; +	} + +	/* disarm tx queues that have received xoff frames */ +	for (i = 0; i < adapter->num_tx_queues; i++) { +		struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; +		u8 tc = tx_ring->dcb_tc; + +		if (xoff[tc]) +			clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); +	} +} + +static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) +{ +	return ring->tx_stats.completed; +} + +static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) +{ +	struct ixgbe_adapter *adapter = netdev_priv(ring->netdev); +	struct ixgbe_hw *hw = &adapter->hw; + +	u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); +	u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); + +	if (head != tail) +		return (head < tail) ? +			tail - head : (tail + ring->count - head); + +	return 0; +} + +static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) +{ +	u32 tx_done = ixgbe_get_tx_completed(tx_ring); +	u32 tx_done_old = tx_ring->tx_stats.tx_done_old; +	u32 tx_pending = ixgbe_get_tx_pending(tx_ring); +	bool ret = false; + +	clear_check_for_tx_hang(tx_ring); + +	/* +	 * Check for a hung queue, but be thorough. This verifies +	 * that a transmit has been completed since the previous +	 * check AND there is at least one packet pending. The +	 * ARMED bit is set to indicate a potential hang. The +	 * bit is cleared if a pause frame is received to remove +	 * false hang detection due to PFC or 802.3x frames. By +	 * requiring this to fail twice we avoid races with +	 * pfc clearing the ARMED bit and conditions where we +	 * run the check_tx_hang logic with a transmit completion +	 * pending but without time to complete it yet. +	 */ +	if ((tx_done_old == tx_done) && tx_pending) { +		/* make sure it is true for two checks in a row */ +		ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED, +				       &tx_ring->state); +	} else { +		/* update completed stats and continue */ +		tx_ring->tx_stats.tx_done_old = tx_done; +		/* reset the countdown */ +		clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); +	} + +	return ret; +} + +/** + * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout + * @adapter: driver private struct + **/ +static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) +{ + +	/* Do the reset outside of interrupt context */ +	if (!test_bit(__IXGBE_DOWN, &adapter->state)) { +		adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; +		ixgbe_service_event_schedule(adapter); +	} +} + +/** + * ixgbe_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: structure containing interrupt and ring information + * @tx_ring: tx ring to clean + **/ +static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, +			       struct ixgbe_ring *tx_ring) +{ +	struct ixgbe_adapter *adapter = q_vector->adapter; +	union ixgbe_adv_tx_desc *tx_desc, *eop_desc; +	struct ixgbe_tx_buffer *tx_buffer_info; +	unsigned int total_bytes = 0, total_packets = 0; +	u16 i, eop, count = 0; + +	i = tx_ring->next_to_clean; +	eop = tx_ring->tx_buffer_info[i].next_to_watch; +	eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); + +	while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && +	       (count < q_vector->tx.work_limit)) { +		bool cleaned = false; +		rmb(); /* read buffer_info after eop_desc */ +		for ( ; !cleaned; count++) { +			tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); +			tx_buffer_info = &tx_ring->tx_buffer_info[i]; + +			tx_desc->wb.status = 0; +			cleaned = (i == eop); + +			i++; +			if (i == tx_ring->count) +				i = 0; + +			if (cleaned && tx_buffer_info->skb) { +				total_bytes += tx_buffer_info->bytecount; +				total_packets += tx_buffer_info->gso_segs; +			} + +			ixgbe_unmap_and_free_tx_resource(tx_ring, +							 tx_buffer_info); +		} + +		tx_ring->tx_stats.completed++; +		eop = tx_ring->tx_buffer_info[i].next_to_watch; +		eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); +	} + +	tx_ring->next_to_clean = i; +	tx_ring->stats.bytes += total_bytes; +	tx_ring->stats.packets += total_packets; +	u64_stats_update_begin(&tx_ring->syncp); +	q_vector->tx.total_bytes += total_bytes; +	q_vector->tx.total_packets += total_packets; +	u64_stats_update_end(&tx_ring->syncp); + +	if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { +		/* schedule immediate reset if we believe we hung */ +		struct ixgbe_hw *hw = &adapter->hw; +		tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); +		e_err(drv, "Detected Tx Unit Hang\n" +			"  Tx Queue             <%d>\n" +			"  TDH, TDT             <%x>, <%x>\n" +			"  next_to_use          <%x>\n" +			"  next_to_clean        <%x>\n" +			"tx_buffer_info[next_to_clean]\n" +			"  time_stamp           <%lx>\n" +			"  jiffies              <%lx>\n", +			tx_ring->queue_index, +			IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), +			IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), +			tx_ring->next_to_use, eop, +			tx_ring->tx_buffer_info[eop].time_stamp, jiffies); + +		netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + +		e_info(probe, +		       "tx hang %d detected on queue %d, resetting adapter\n", +			adapter->tx_timeout_count + 1, tx_ring->queue_index); + +		/* schedule immediate reset if we believe we hung */ +		ixgbe_tx_timeout_reset(adapter); + +		/* the adapter is about to reset, no point in enabling stuff */ +		return true; +	} + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) +	if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && +		     (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { +		/* Make sure that anybody stopping the queue after this +		 * sees the new next_to_clean. +		 */ +		smp_mb(); +		if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && +		    !test_bit(__IXGBE_DOWN, &adapter->state)) { +			netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); +			++tx_ring->tx_stats.restart_queue; +		} +	} + +	return count < q_vector->tx.work_limit; +} + +#ifdef CONFIG_IXGBE_DCA +static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, +				struct ixgbe_ring *rx_ring, +				int cpu) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u32 rxctrl; +	u8 reg_idx = rx_ring->reg_idx; + +	rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx)); +	switch (hw->mac.type) { +	case ixgbe_mac_82598EB: +		rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; +		rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); +		break; +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; +		rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << +			   IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); +		break; +	default: +		break; +	} +	rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; +	rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; +	rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); +	IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); +} + +static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, +				struct ixgbe_ring *tx_ring, +				int cpu) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u32 txctrl; +	u8 reg_idx = tx_ring->reg_idx; + +	switch (hw->mac.type) { +	case ixgbe_mac_82598EB: +		txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx)); +		txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; +		txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); +		txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; +		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl); +		break; +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx)); +		txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; +		txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << +			   IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); +		txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; +		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl); +		break; +	default: +		break; +	} +} + +static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) +{ +	struct ixgbe_adapter *adapter = q_vector->adapter; +	int cpu = get_cpu(); +	long r_idx; +	int i; + +	if (q_vector->cpu == cpu) +		goto out_no_update; + +	r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); +	for (i = 0; i < q_vector->tx.count; i++) { +		ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu); +		r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues, +				      r_idx + 1); +	} + +	r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); +	for (i = 0; i < q_vector->rx.count; i++) { +		ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu); +		r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues, +				      r_idx + 1); +	} + +	q_vector->cpu = cpu; +out_no_update: +	put_cpu(); +} + +static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) +{ +	int num_q_vectors; +	int i; + +	if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) +		return; + +	/* always use CB2 mode, difference is masked in the CB driver */ +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); + +	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) +		num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; +	else +		num_q_vectors = 1; + +	for (i = 0; i < num_q_vectors; i++) { +		adapter->q_vector[i]->cpu = -1; +		ixgbe_update_dca(adapter->q_vector[i]); +	} +} + +static int __ixgbe_notify_dca(struct device *dev, void *data) +{ +	struct ixgbe_adapter *adapter = dev_get_drvdata(dev); +	unsigned long event = *(unsigned long *)data; + +	if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE)) +		return 0; + +	switch (event) { +	case DCA_PROVIDER_ADD: +		/* if we're already enabled, don't do it again */ +		if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) +			break; +		if (dca_add_requester(dev) == 0) { +			adapter->flags |= IXGBE_FLAG_DCA_ENABLED; +			ixgbe_setup_dca(adapter); +			break; +		} +		/* Fall Through since DCA is disabled. */ +	case DCA_PROVIDER_REMOVE: +		if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { +			dca_remove_requester(dev); +			adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; +			IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); +		} +		break; +	} + +	return 0; +} +#endif /* CONFIG_IXGBE_DCA */ + +static inline void ixgbe_rx_hash(union ixgbe_adv_rx_desc *rx_desc, +				 struct sk_buff *skb) +{ +	skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); +} + +/** + * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type + * @adapter: address of board private structure + * @rx_desc: advanced rx descriptor + * + * Returns : true if it is FCoE pkt + */ +static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter, +				    union ixgbe_adv_rx_desc *rx_desc) +{ +	__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; + +	return (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && +	       ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) == +		(cpu_to_le16(IXGBE_ETQF_FILTER_FCOE << +			     IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT))); +} + +/** + * ixgbe_receive_skb - Send a completed packet up the stack + * @adapter: board private structure + * @skb: packet to send up + * @status: hardware indication of status of receive + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * @rx_desc: rx descriptor + **/ +static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, +			      struct sk_buff *skb, u8 status, +			      struct ixgbe_ring *ring, +			      union ixgbe_adv_rx_desc *rx_desc) +{ +	struct ixgbe_adapter *adapter = q_vector->adapter; +	struct napi_struct *napi = &q_vector->napi; +	bool is_vlan = (status & IXGBE_RXD_STAT_VP); +	u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); + +	if (is_vlan && (tag & VLAN_VID_MASK)) +		__vlan_hwaccel_put_tag(skb, tag); + +	if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) +		napi_gro_receive(napi, skb); +	else +		netif_rx(skb); +} + +/** + * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum + * @adapter: address of board private structure + * @status_err: hardware indication of status of receive + * @skb: skb currently being received and modified + * @status_err: status error value of last descriptor in packet + **/ +static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, +				     union ixgbe_adv_rx_desc *rx_desc, +				     struct sk_buff *skb, +				     u32 status_err) +{ +	skb->ip_summed = CHECKSUM_NONE; + +	/* Rx csum disabled */ +	if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) +		return; + +	/* if IP and error */ +	if ((status_err & IXGBE_RXD_STAT_IPCS) && +	    (status_err & IXGBE_RXDADV_ERR_IPE)) { +		adapter->hw_csum_rx_error++; +		return; +	} + +	if (!(status_err & IXGBE_RXD_STAT_L4CS)) +		return; + +	if (status_err & IXGBE_RXDADV_ERR_TCPE) { +		u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; + +		/* +		 * 82599 errata, UDP frames with a 0 checksum can be marked as +		 * checksum errors. +		 */ +		if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) && +		    (adapter->hw.mac.type == ixgbe_mac_82599EB)) +			return; + +		adapter->hw_csum_rx_error++; +		return; +	} + +	/* It must be a TCP or UDP packet with a valid checksum */ +	skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) +{ +	/* +	 * Force memory writes to complete before letting h/w +	 * know there are new descriptors to fetch.  (Only +	 * applicable for weak-ordered memory model archs, +	 * such as IA-64). +	 */ +	wmb(); +	writel(val, rx_ring->tail); +} + +/** + * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) +{ +	union ixgbe_adv_rx_desc *rx_desc; +	struct ixgbe_rx_buffer *bi; +	struct sk_buff *skb; +	u16 i = rx_ring->next_to_use; + +	/* do nothing if no valid netdev defined */ +	if (!rx_ring->netdev) +		return; + +	while (cleaned_count--) { +		rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); +		bi = &rx_ring->rx_buffer_info[i]; +		skb = bi->skb; + +		if (!skb) { +			skb = netdev_alloc_skb_ip_align(rx_ring->netdev, +							rx_ring->rx_buf_len); +			if (!skb) { +				rx_ring->rx_stats.alloc_rx_buff_failed++; +				goto no_buffers; +			} +			/* initialize queue mapping */ +			skb_record_rx_queue(skb, rx_ring->queue_index); +			bi->skb = skb; +		} + +		if (!bi->dma) { +			bi->dma = dma_map_single(rx_ring->dev, +						 skb->data, +						 rx_ring->rx_buf_len, +						 DMA_FROM_DEVICE); +			if (dma_mapping_error(rx_ring->dev, bi->dma)) { +				rx_ring->rx_stats.alloc_rx_buff_failed++; +				bi->dma = 0; +				goto no_buffers; +			} +		} + +		if (ring_is_ps_enabled(rx_ring)) { +			if (!bi->page) { +				bi->page = netdev_alloc_page(rx_ring->netdev); +				if (!bi->page) { +					rx_ring->rx_stats.alloc_rx_page_failed++; +					goto no_buffers; +				} +			} + +			if (!bi->page_dma) { +				/* use a half page if we're re-using */ +				bi->page_offset ^= PAGE_SIZE / 2; +				bi->page_dma = dma_map_page(rx_ring->dev, +							    bi->page, +							    bi->page_offset, +							    PAGE_SIZE / 2, +							    DMA_FROM_DEVICE); +				if (dma_mapping_error(rx_ring->dev, +						      bi->page_dma)) { +					rx_ring->rx_stats.alloc_rx_page_failed++; +					bi->page_dma = 0; +					goto no_buffers; +				} +			} + +			/* Refresh the desc even if buffer_addrs didn't change +			 * because each write-back erases this info. */ +			rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); +			rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); +		} else { +			rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); +			rx_desc->read.hdr_addr = 0; +		} + +		i++; +		if (i == rx_ring->count) +			i = 0; +	} + +no_buffers: +	if (rx_ring->next_to_use != i) { +		rx_ring->next_to_use = i; +		ixgbe_release_rx_desc(rx_ring, i); +	} +} + +static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc) +{ +	/* HW will not DMA in data larger than the given buffer, even if it +	 * parses the (NFS, of course) header to be larger.  In that case, it +	 * fills the header buffer and spills the rest into the page. +	 */ +	u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info); +	u16 hlen = (hdr_info &  IXGBE_RXDADV_HDRBUFLEN_MASK) >> +		    IXGBE_RXDADV_HDRBUFLEN_SHIFT; +	if (hlen > IXGBE_RX_HDR_SIZE) +		hlen = IXGBE_RX_HDR_SIZE; +	return hlen; +} + +/** + * ixgbe_transform_rsc_queue - change rsc queue into a full packet + * @skb: pointer to the last skb in the rsc queue + * + * This function changes a queue full of hw rsc buffers into a completed + * packet.  It uses the ->prev pointers to find the first packet and then + * turns it into the frag list owner. + **/ +static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb) +{ +	unsigned int frag_list_size = 0; +	unsigned int skb_cnt = 1; + +	while (skb->prev) { +		struct sk_buff *prev = skb->prev; +		frag_list_size += skb->len; +		skb->prev = NULL; +		skb = prev; +		skb_cnt++; +	} + +	skb_shinfo(skb)->frag_list = skb->next; +	skb->next = NULL; +	skb->len += frag_list_size; +	skb->data_len += frag_list_size; +	skb->truesize += frag_list_size; +	IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt; + +	return skb; +} + +static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc) +{ +	return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & +		IXGBE_RXDADV_RSCCNT_MASK); +} + +static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, +			       struct ixgbe_ring *rx_ring, +			       int *work_done, int work_to_do) +{ +	struct ixgbe_adapter *adapter = q_vector->adapter; +	union ixgbe_adv_rx_desc *rx_desc, *next_rxd; +	struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; +	struct sk_buff *skb; +	unsigned int total_rx_bytes = 0, total_rx_packets = 0; +	const int current_node = numa_node_id(); +#ifdef IXGBE_FCOE +	int ddp_bytes = 0; +#endif /* IXGBE_FCOE */ +	u32 staterr; +	u16 i; +	u16 cleaned_count = 0; +	bool pkt_is_rsc = false; + +	i = rx_ring->next_to_clean; +	rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); +	staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + +	while (staterr & IXGBE_RXD_STAT_DD) { +		u32 upper_len = 0; + +		rmb(); /* read descriptor and rx_buffer_info after status DD */ + +		rx_buffer_info = &rx_ring->rx_buffer_info[i]; + +		skb = rx_buffer_info->skb; +		rx_buffer_info->skb = NULL; +		prefetch(skb->data); + +		if (ring_is_rsc_enabled(rx_ring)) +			pkt_is_rsc = ixgbe_get_rsc_state(rx_desc); + +		/* if this is a skb from previous receive DMA will be 0 */ +		if (rx_buffer_info->dma) { +			u16 hlen; +			if (pkt_is_rsc && +			    !(staterr & IXGBE_RXD_STAT_EOP) && +			    !skb->prev) { +				/* +				 * When HWRSC is enabled, delay unmapping +				 * of the first packet. It carries the +				 * header information, HW may still +				 * access the header after the writeback. +				 * Only unmap it when EOP is reached +				 */ +				IXGBE_RSC_CB(skb)->delay_unmap = true; +				IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; +			} else { +				dma_unmap_single(rx_ring->dev, +						 rx_buffer_info->dma, +						 rx_ring->rx_buf_len, +						 DMA_FROM_DEVICE); +			} +			rx_buffer_info->dma = 0; + +			if (ring_is_ps_enabled(rx_ring)) { +				hlen = ixgbe_get_hlen(rx_desc); +				upper_len = le16_to_cpu(rx_desc->wb.upper.length); +			} else { +				hlen = le16_to_cpu(rx_desc->wb.upper.length); +			} + +			skb_put(skb, hlen); +		} else { +			/* assume packet split since header is unmapped */ +			upper_len = le16_to_cpu(rx_desc->wb.upper.length); +		} + +		if (upper_len) { +			dma_unmap_page(rx_ring->dev, +				       rx_buffer_info->page_dma, +				       PAGE_SIZE / 2, +				       DMA_FROM_DEVICE); +			rx_buffer_info->page_dma = 0; +			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, +					   rx_buffer_info->page, +					   rx_buffer_info->page_offset, +					   upper_len); + +			if ((page_count(rx_buffer_info->page) == 1) && +			    (page_to_nid(rx_buffer_info->page) == current_node)) +				get_page(rx_buffer_info->page); +			else +				rx_buffer_info->page = NULL; + +			skb->len += upper_len; +			skb->data_len += upper_len; +			skb->truesize += upper_len; +		} + +		i++; +		if (i == rx_ring->count) +			i = 0; + +		next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i); +		prefetch(next_rxd); +		cleaned_count++; + +		if (pkt_is_rsc) { +			u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> +				     IXGBE_RXDADV_NEXTP_SHIFT; +			next_buffer = &rx_ring->rx_buffer_info[nextp]; +		} else { +			next_buffer = &rx_ring->rx_buffer_info[i]; +		} + +		if (!(staterr & IXGBE_RXD_STAT_EOP)) { +			if (ring_is_ps_enabled(rx_ring)) { +				rx_buffer_info->skb = next_buffer->skb; +				rx_buffer_info->dma = next_buffer->dma; +				next_buffer->skb = skb; +				next_buffer->dma = 0; +			} else { +				skb->next = next_buffer->skb; +				skb->next->prev = skb; +			} +			rx_ring->rx_stats.non_eop_descs++; +			goto next_desc; +		} + +		if (skb->prev) { +			skb = ixgbe_transform_rsc_queue(skb); +			/* if we got here without RSC the packet is invalid */ +			if (!pkt_is_rsc) { +				__pskb_trim(skb, 0); +				rx_buffer_info->skb = skb; +				goto next_desc; +			} +		} + +		if (ring_is_rsc_enabled(rx_ring)) { +			if (IXGBE_RSC_CB(skb)->delay_unmap) { +				dma_unmap_single(rx_ring->dev, +						 IXGBE_RSC_CB(skb)->dma, +						 rx_ring->rx_buf_len, +						 DMA_FROM_DEVICE); +				IXGBE_RSC_CB(skb)->dma = 0; +				IXGBE_RSC_CB(skb)->delay_unmap = false; +			} +		} +		if (pkt_is_rsc) { +			if (ring_is_ps_enabled(rx_ring)) +				rx_ring->rx_stats.rsc_count += +					skb_shinfo(skb)->nr_frags; +			else +				rx_ring->rx_stats.rsc_count += +					IXGBE_RSC_CB(skb)->skb_cnt; +			rx_ring->rx_stats.rsc_flush++; +		} + +		/* ERR_MASK will only have valid bits if EOP set */ +		if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { +			dev_kfree_skb_any(skb); +			goto next_desc; +		} + +		ixgbe_rx_checksum(adapter, rx_desc, skb, staterr); +		if (adapter->netdev->features & NETIF_F_RXHASH) +			ixgbe_rx_hash(rx_desc, skb); + +		/* probably a little skewed due to removing CRC */ +		total_rx_bytes += skb->len; +		total_rx_packets++; + +		skb->protocol = eth_type_trans(skb, rx_ring->netdev); +#ifdef IXGBE_FCOE +		/* if ddp, not passing to ULD unless for FCP_RSP or error */ +		if (ixgbe_rx_is_fcoe(adapter, rx_desc)) { +			ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb, +						   staterr); +			if (!ddp_bytes) +				goto next_desc; +		} +#endif /* IXGBE_FCOE */ +		ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); + +next_desc: +		rx_desc->wb.upper.status_error = 0; + +		(*work_done)++; +		if (*work_done >= work_to_do) +			break; + +		/* return some buffers to hardware, one at a time is too slow */ +		if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { +			ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); +			cleaned_count = 0; +		} + +		/* use prefetched values */ +		rx_desc = next_rxd; +		staterr = le32_to_cpu(rx_desc->wb.upper.status_error); +	} + +	rx_ring->next_to_clean = i; +	cleaned_count = ixgbe_desc_unused(rx_ring); + +	if (cleaned_count) +		ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); + +#ifdef IXGBE_FCOE +	/* include DDPed FCoE data */ +	if (ddp_bytes > 0) { +		unsigned int mss; + +		mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) - +			sizeof(struct fc_frame_header) - +			sizeof(struct fcoe_crc_eof); +		if (mss > 512) +			mss &= ~511; +		total_rx_bytes += ddp_bytes; +		total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss); +	} +#endif /* IXGBE_FCOE */ + +	u64_stats_update_begin(&rx_ring->syncp); +	rx_ring->stats.packets += total_rx_packets; +	rx_ring->stats.bytes += total_rx_bytes; +	u64_stats_update_end(&rx_ring->syncp); +	q_vector->rx.total_packets += total_rx_packets; +	q_vector->rx.total_bytes += total_rx_bytes; +} + +static int ixgbe_clean_rxonly(struct napi_struct *, int); +/** + * ixgbe_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * ixgbe_configure_msix sets up the hardware to properly generate MSI-X + * interrupts. + **/ +static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_q_vector *q_vector; +	int i, q_vectors, v_idx, r_idx; +	u32 mask; + +	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + +	/* +	 * Populate the IVAR table and set the ITR values to the +	 * corresponding register. +	 */ +	for (v_idx = 0; v_idx < q_vectors; v_idx++) { +		q_vector = adapter->q_vector[v_idx]; +		/* XXX for_each_set_bit(...) */ +		r_idx = find_first_bit(q_vector->rx.idx, +				       adapter->num_rx_queues); + +		for (i = 0; i < q_vector->rx.count; i++) { +			u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx; +			ixgbe_set_ivar(adapter, 0, reg_idx, v_idx); +			r_idx = find_next_bit(q_vector->rx.idx, +					      adapter->num_rx_queues, +					      r_idx + 1); +		} +		r_idx = find_first_bit(q_vector->tx.idx, +				       adapter->num_tx_queues); + +		for (i = 0; i < q_vector->tx.count; i++) { +			u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx; +			ixgbe_set_ivar(adapter, 1, reg_idx, v_idx); +			r_idx = find_next_bit(q_vector->tx.idx, +					      adapter->num_tx_queues, +					      r_idx + 1); +		} + +		if (q_vector->tx.count && !q_vector->rx.count) +			/* tx only */ +			q_vector->eitr = adapter->tx_eitr_param; +		else if (q_vector->rx.count) +			/* rx or mixed */ +			q_vector->eitr = adapter->rx_eitr_param; + +		ixgbe_write_eitr(q_vector); +		/* If ATR is enabled, set interrupt affinity */ +		if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { +			/* +			 * Allocate the affinity_hint cpumask, assign the mask +			 * for this vector, and set our affinity_hint for +			 * this irq. +			 */ +			if (!alloc_cpumask_var(&q_vector->affinity_mask, +			                       GFP_KERNEL)) +				return; +			cpumask_set_cpu(v_idx, q_vector->affinity_mask); +			irq_set_affinity_hint(adapter->msix_entries[v_idx].vector, +			                      q_vector->affinity_mask); +		} +	} + +	switch (adapter->hw.mac.type) { +	case ixgbe_mac_82598EB: +		ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, +			       v_idx); +		break; +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		ixgbe_set_ivar(adapter, -1, 1, v_idx); +		break; + +	default: +		break; +	} +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); + +	/* set up to autoclear timer, and the vectors */ +	mask = IXGBE_EIMS_ENABLE_MASK; +	if (adapter->num_vfs) +		mask &= ~(IXGBE_EIMS_OTHER | +			  IXGBE_EIMS_MAILBOX | +			  IXGBE_EIMS_LSC); +	else +		mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); +} + +enum latency_range { +	lowest_latency = 0, +	low_latency = 1, +	bulk_latency = 2, +	latency_invalid = 255 +}; + +/** + * ixgbe_update_itr - update the dynamic ITR value based on statistics + * @q_vector: structure containing interrupt and ring information + * @ring_container: structure containing ring performance data + * + *      Stores a new ITR value based on packets and byte + *      counts during the last interrupt.  The advantage of per interrupt + *      computation is faster updates and more accurate ITR for the current + *      traffic pattern.  Constants in this function were computed + *      based on theoretical maximum wire speed and thresholds were set based + *      on testing data as well as attempting to minimize response time + *      while increasing bulk throughput. + *      this functionality is controlled by the InterruptThrottleRate module + *      parameter (see ixgbe_param.c) + **/ +static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, +			     struct ixgbe_ring_container *ring_container) +{ +	u64 bytes_perint; +	struct ixgbe_adapter *adapter = q_vector->adapter; +	int bytes = ring_container->total_bytes; +	int packets = ring_container->total_packets; +	u32 timepassed_us; +	u8 itr_setting = ring_container->itr; + +	if (packets == 0) +		return; + +	/* simple throttlerate management +	 *    0-20MB/s lowest (100000 ints/s) +	 *   20-100MB/s low   (20000 ints/s) +	 *  100-1249MB/s bulk (8000 ints/s) +	 */ +	/* what was last interrupt timeslice? */ +	timepassed_us = 1000000/q_vector->eitr; +	bytes_perint = bytes / timepassed_us; /* bytes/usec */ + +	switch (itr_setting) { +	case lowest_latency: +		if (bytes_perint > adapter->eitr_low) +			itr_setting = low_latency; +		break; +	case low_latency: +		if (bytes_perint > adapter->eitr_high) +			itr_setting = bulk_latency; +		else if (bytes_perint <= adapter->eitr_low) +			itr_setting = lowest_latency; +		break; +	case bulk_latency: +		if (bytes_perint <= adapter->eitr_high) +			itr_setting = low_latency; +		break; +	} + +	/* clear work counters since we have the values we need */ +	ring_container->total_bytes = 0; +	ring_container->total_packets = 0; + +	/* write updated itr to ring container */ +	ring_container->itr = itr_setting; +} + +/** + * ixgbe_write_eitr - write EITR register in hardware specific way + * @q_vector: structure containing interrupt and ring information + * + * This function is made to be called by ethtool and by the driver + * when it needs to update EITR registers at runtime.  Hardware + * specific quirks/differences are taken care of here. + */ +void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) +{ +	struct ixgbe_adapter *adapter = q_vector->adapter; +	struct ixgbe_hw *hw = &adapter->hw; +	int v_idx = q_vector->v_idx; +	u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr); + +	switch (adapter->hw.mac.type) { +	case ixgbe_mac_82598EB: +		/* must write high and low 16 bits to reset counter */ +		itr_reg |= (itr_reg << 16); +		break; +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		/* +		 * 82599 and X540 can support a value of zero, so allow it for +		 * max interrupt rate, but there is an errata where it can +		 * not be zero with RSC +		 */ +		if (itr_reg == 8 && +		    !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) +			itr_reg = 0; + +		/* +		 * set the WDIS bit to not clear the timer bits and cause an +		 * immediate assertion of the interrupt +		 */ +		itr_reg |= IXGBE_EITR_CNT_WDIS; +		break; +	default: +		break; +	} +	IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); +} + +static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector) +{ +	u32 new_itr = q_vector->eitr; +	u8 current_itr; + +	ixgbe_update_itr(q_vector, &q_vector->tx); +	ixgbe_update_itr(q_vector, &q_vector->rx); + +	current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + +	switch (current_itr) { +	/* counts and packets in update_itr are dependent on these numbers */ +	case lowest_latency: +		new_itr = 100000; +		break; +	case low_latency: +		new_itr = 20000; /* aka hwitr = ~200 */ +		break; +	case bulk_latency: +		new_itr = 8000; +		break; +	default: +		break; +	} + +	if (new_itr != q_vector->eitr) { +		/* do an exponential smoothing */ +		new_itr = ((q_vector->eitr * 9) + new_itr)/10; + +		/* save the algorithm value here */ +		q_vector->eitr = new_itr; + +		ixgbe_write_eitr(q_vector); +	} +} + +/** + * ixgbe_check_overtemp_subtask - check for over tempurature + * @adapter: pointer to adapter + **/ +static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u32 eicr = adapter->interrupt_event; + +	if (test_bit(__IXGBE_DOWN, &adapter->state)) +		return; + +	if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && +	    !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) +		return; + +	adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT; + +	switch (hw->device_id) { +	case IXGBE_DEV_ID_82599_T3_LOM: +		/* +		 * Since the warning interrupt is for both ports +		 * we don't have to check if: +		 *  - This interrupt wasn't for our port. +		 *  - We may have missed the interrupt so always have to +		 *    check if we  got a LSC +		 */ +		if (!(eicr & IXGBE_EICR_GPI_SDP0) && +		    !(eicr & IXGBE_EICR_LSC)) +			return; + +		if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) { +			u32 autoneg; +			bool link_up = false; + +			hw->mac.ops.check_link(hw, &autoneg, &link_up, false); + +			if (link_up) +				return; +		} + +		/* Check if this is not due to overtemp */ +		if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP) +			return; + +		break; +	default: +		if (!(eicr & IXGBE_EICR_GPI_SDP0)) +			return; +		break; +	} +	e_crit(drv, +	       "Network adapter has been stopped because it has over heated. " +	       "Restart the computer. If the problem persists, " +	       "power off the system and replace the adapter\n"); + +	adapter->interrupt_event = 0; +} + +static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) +{ +	struct ixgbe_hw *hw = &adapter->hw; + +	if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && +	    (eicr & IXGBE_EICR_GPI_SDP1)) { +		e_crit(probe, "Fan has stopped, replace the adapter\n"); +		/* write to clear the interrupt */ +		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); +	} +} + +static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) +{ +	struct ixgbe_hw *hw = &adapter->hw; + +	if (eicr & IXGBE_EICR_GPI_SDP2) { +		/* Clear the interrupt */ +		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); +		if (!test_bit(__IXGBE_DOWN, &adapter->state)) { +			adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; +			ixgbe_service_event_schedule(adapter); +		} +	} + +	if (eicr & IXGBE_EICR_GPI_SDP1) { +		/* Clear the interrupt */ +		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); +		if (!test_bit(__IXGBE_DOWN, &adapter->state)) { +			adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; +			ixgbe_service_event_schedule(adapter); +		} +	} +} + +static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; + +	adapter->lsc_int++; +	adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; +	adapter->link_check_timeout = jiffies; +	if (!test_bit(__IXGBE_DOWN, &adapter->state)) { +		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); +		IXGBE_WRITE_FLUSH(hw); +		ixgbe_service_event_schedule(adapter); +	} +} + +static irqreturn_t ixgbe_msix_lsc(int irq, void *data) +{ +	struct ixgbe_adapter *adapter = data; +	struct ixgbe_hw *hw = &adapter->hw; +	u32 eicr; + +	/* +	 * Workaround for Silicon errata.  Use clear-by-write instead +	 * of clear-by-read.  Reading with EICS will return the +	 * interrupt causes without clearing, which later be done +	 * with the write to EICR. +	 */ +	eicr = IXGBE_READ_REG(hw, IXGBE_EICS); +	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); + +	if (eicr & IXGBE_EICR_LSC) +		ixgbe_check_lsc(adapter); + +	if (eicr & IXGBE_EICR_MAILBOX) +		ixgbe_msg_task(adapter); + +	switch (hw->mac.type) { +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		/* Handle Flow Director Full threshold interrupt */ +		if (eicr & IXGBE_EICR_FLOW_DIR) { +			int reinit_count = 0; +			int i; +			for (i = 0; i < adapter->num_tx_queues; i++) { +				struct ixgbe_ring *ring = adapter->tx_ring[i]; +				if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE, +						       &ring->state)) +					reinit_count++; +			} +			if (reinit_count) { +				/* no more flow director interrupts until after init */ +				IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); +				eicr &= ~IXGBE_EICR_FLOW_DIR; +				adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT; +				ixgbe_service_event_schedule(adapter); +			} +		} +		ixgbe_check_sfp_event(adapter, eicr); +		if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && +		    ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { +			if (!test_bit(__IXGBE_DOWN, &adapter->state)) { +				adapter->interrupt_event = eicr; +				adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; +				ixgbe_service_event_schedule(adapter); +			} +		} +		break; +	default: +		break; +	} + +	ixgbe_check_fan_failure(adapter, eicr); + +	/* re-enable the original interrupt state, no lsc, no queues */ +	if (!test_bit(__IXGBE_DOWN, &adapter->state)) +		IXGBE_WRITE_REG(hw, IXGBE_EIMS, eicr & +		                ~(IXGBE_EIMS_LSC | IXGBE_EIMS_RTX_QUEUE)); + +	return IRQ_HANDLED; +} + +static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, +					   u64 qmask) +{ +	u32 mask; +	struct ixgbe_hw *hw = &adapter->hw; + +	switch (hw->mac.type) { +	case ixgbe_mac_82598EB: +		mask = (IXGBE_EIMS_RTX_QUEUE & qmask); +		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); +		break; +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		mask = (qmask & 0xFFFFFFFF); +		if (mask) +			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); +		mask = (qmask >> 32); +		if (mask) +			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); +		break; +	default: +		break; +	} +	/* skip the flush */ +} + +static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, +					    u64 qmask) +{ +	u32 mask; +	struct ixgbe_hw *hw = &adapter->hw; + +	switch (hw->mac.type) { +	case ixgbe_mac_82598EB: +		mask = (IXGBE_EIMS_RTX_QUEUE & qmask); +		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); +		break; +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		mask = (qmask & 0xFFFFFFFF); +		if (mask) +			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); +		mask = (qmask >> 32); +		if (mask) +			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); +		break; +	default: +		break; +	} +	/* skip the flush */ +} + +static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) +{ +	struct ixgbe_q_vector *q_vector = data; +	struct ixgbe_adapter  *adapter = q_vector->adapter; +	struct ixgbe_ring     *tx_ring; +	int i, r_idx; + +	if (!q_vector->tx.count) +		return IRQ_HANDLED; + +	r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); +	for (i = 0; i < q_vector->tx.count; i++) { +		tx_ring = adapter->tx_ring[r_idx]; +		r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues, +				      r_idx + 1); +	} + +	/* EIAM disabled interrupts (on this vector) for us */ +	napi_schedule(&q_vector->napi); + +	return IRQ_HANDLED; +} + +/** + * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues) + * @irq: unused + * @data: pointer to our q_vector struct for this interrupt vector + **/ +static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) +{ +	struct ixgbe_q_vector *q_vector = data; +	struct ixgbe_adapter  *adapter = q_vector->adapter; +	struct ixgbe_ring  *rx_ring; +	int r_idx; +	int i; + +#ifdef CONFIG_IXGBE_DCA +	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) +		ixgbe_update_dca(q_vector); +#endif + +	r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); +	for (i = 0; i < q_vector->rx.count; i++) { +		rx_ring = adapter->rx_ring[r_idx]; +		r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues, +				      r_idx + 1); +	} + +	if (!q_vector->rx.count) +		return IRQ_HANDLED; + +	/* EIAM disabled interrupts (on this vector) for us */ +	napi_schedule(&q_vector->napi); + +	return IRQ_HANDLED; +} + +static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) +{ +	struct ixgbe_q_vector *q_vector = data; +	struct ixgbe_adapter  *adapter = q_vector->adapter; +	struct ixgbe_ring  *ring; +	int r_idx; +	int i; + +	if (!q_vector->tx.count && !q_vector->rx.count) +		return IRQ_HANDLED; + +	r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); +	for (i = 0; i < q_vector->tx.count; i++) { +		ring = adapter->tx_ring[r_idx]; +		r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues, +				      r_idx + 1); +	} + +	r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); +	for (i = 0; i < q_vector->rx.count; i++) { +		ring = adapter->rx_ring[r_idx]; +		r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues, +				      r_idx + 1); +	} + +	/* EIAM disabled interrupts (on this vector) for us */ +	napi_schedule(&q_vector->napi); + +	return IRQ_HANDLED; +} + +/** + * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function is optimized for cleaning one queue only on a single + * q_vector!!! + **/ +static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) +{ +	struct ixgbe_q_vector *q_vector = +			       container_of(napi, struct ixgbe_q_vector, napi); +	struct ixgbe_adapter *adapter = q_vector->adapter; +	struct ixgbe_ring *rx_ring = NULL; +	int work_done = 0; +	long r_idx; + +#ifdef CONFIG_IXGBE_DCA +	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) +		ixgbe_update_dca(q_vector); +#endif + +	r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); +	rx_ring = adapter->rx_ring[r_idx]; + +	ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); + +	/* If all Rx work done, exit the polling mode */ +	if (work_done < budget) { +		napi_complete(napi); +		if (adapter->rx_itr_setting & 1) +			ixgbe_set_itr(q_vector); +		if (!test_bit(__IXGBE_DOWN, &adapter->state)) +			ixgbe_irq_enable_queues(adapter, +						((u64)1 << q_vector->v_idx)); +	} + +	return work_done; +} + +/** + * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean more than one rx queue associated with a + * q_vector. + **/ +static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) +{ +	struct ixgbe_q_vector *q_vector = +			       container_of(napi, struct ixgbe_q_vector, napi); +	struct ixgbe_adapter *adapter = q_vector->adapter; +	struct ixgbe_ring *ring = NULL; +	int work_done = 0, i; +	long r_idx; +	bool tx_clean_complete = true; + +#ifdef CONFIG_IXGBE_DCA +	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) +		ixgbe_update_dca(q_vector); +#endif + +	r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); +	for (i = 0; i < q_vector->tx.count; i++) { +		ring = adapter->tx_ring[r_idx]; +		tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); +		r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues, +				      r_idx + 1); +	} + +	/* attempt to distribute budget to each queue fairly, but don't allow +	 * the budget to go below 1 because we'll exit polling */ +	budget /= (q_vector->rx.count ?: 1); +	budget = max(budget, 1); +	r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); +	for (i = 0; i < q_vector->rx.count; i++) { +		ring = adapter->rx_ring[r_idx]; +		ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); +		r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues, +				      r_idx + 1); +	} + +	r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); +	ring = adapter->rx_ring[r_idx]; +	/* If all Rx work done, exit the polling mode */ +	if (work_done < budget) { +		napi_complete(napi); +		if (adapter->rx_itr_setting & 1) +			ixgbe_set_itr(q_vector); +		if (!test_bit(__IXGBE_DOWN, &adapter->state)) +			ixgbe_irq_enable_queues(adapter, +						((u64)1 << q_vector->v_idx)); +		return 0; +	} + +	return work_done; +} + +/** + * ixgbe_clean_txonly - msix (aka one shot) tx clean routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function is optimized for cleaning one queue only on a single + * q_vector!!! + **/ +static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) +{ +	struct ixgbe_q_vector *q_vector = +			       container_of(napi, struct ixgbe_q_vector, napi); +	struct ixgbe_adapter *adapter = q_vector->adapter; +	struct ixgbe_ring *tx_ring = NULL; +	int work_done = 0; +	long r_idx; + +#ifdef CONFIG_IXGBE_DCA +	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) +		ixgbe_update_dca(q_vector); +#endif + +	r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); +	tx_ring = adapter->tx_ring[r_idx]; + +	if (!ixgbe_clean_tx_irq(q_vector, tx_ring)) +		work_done = budget; + +	/* If all Tx work done, exit the polling mode */ +	if (work_done < budget) { +		napi_complete(napi); +		if (adapter->tx_itr_setting & 1) +			ixgbe_set_itr(q_vector); +		if (!test_bit(__IXGBE_DOWN, &adapter->state)) +			ixgbe_irq_enable_queues(adapter, +						((u64)1 << q_vector->v_idx)); +	} + +	return work_done; +} + +static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, +				     int r_idx) +{ +	struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; +	struct ixgbe_ring *rx_ring = a->rx_ring[r_idx]; + +	set_bit(r_idx, q_vector->rx.idx); +	q_vector->rx.count++; +	rx_ring->q_vector = q_vector; +} + +static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, +				     int t_idx) +{ +	struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; +	struct ixgbe_ring *tx_ring = a->tx_ring[t_idx]; + +	set_bit(t_idx, q_vector->tx.idx); +	q_vector->tx.count++; +	tx_ring->q_vector = q_vector; +	q_vector->tx.work_limit = a->tx_work_limit; +} + +/** + * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors + * @adapter: board private structure to initialize + * + * This function maps descriptor rings to the queue-specific vectors + * we were allotted through the MSI-X enabling code.  Ideally, we'd have + * one vector per ring/queue, but on a constrained vector budget, we + * group the rings as "efficiently" as possible.  You would add new + * mapping configurations in here. + **/ +static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter) +{ +	int q_vectors; +	int v_start = 0; +	int rxr_idx = 0, txr_idx = 0; +	int rxr_remaining = adapter->num_rx_queues; +	int txr_remaining = adapter->num_tx_queues; +	int i, j; +	int rqpv, tqpv; +	int err = 0; + +	/* No mapping required if MSI-X is disabled. */ +	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) +		goto out; + +	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + +	/* +	 * The ideal configuration... +	 * We have enough vectors to map one per queue. +	 */ +	if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { +		for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) +			map_vector_to_rxq(adapter, v_start, rxr_idx); + +		for (; txr_idx < txr_remaining; v_start++, txr_idx++) +			map_vector_to_txq(adapter, v_start, txr_idx); + +		goto out; +	} + +	/* +	 * If we don't have enough vectors for a 1-to-1 +	 * mapping, we'll have to group them so there are +	 * multiple queues per vector. +	 */ +	/* Re-adjusting *qpv takes care of the remainder. */ +	for (i = v_start; i < q_vectors; i++) { +		rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); +		for (j = 0; j < rqpv; j++) { +			map_vector_to_rxq(adapter, i, rxr_idx); +			rxr_idx++; +			rxr_remaining--; +		} +		tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); +		for (j = 0; j < tqpv; j++) { +			map_vector_to_txq(adapter, i, txr_idx); +			txr_idx++; +			txr_remaining--; +		} +	} +out: +	return err; +} + +/** + * ixgbe_request_msix_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * + * ixgbe_request_msix_irqs allocates MSI-X vectors and requests + * interrupts from the kernel. + **/ +static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) +{ +	struct net_device *netdev = adapter->netdev; +	irqreturn_t (*handler)(int, void *); +	int i, vector, q_vectors, err; +	int ri = 0, ti = 0; + +	/* Decrement for Other and TCP Timer vectors */ +	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + +	err = ixgbe_map_rings_to_vectors(adapter); +	if (err) +		return err; + +#define SET_HANDLER(_v) (((_v)->rx.count && (_v)->tx.count)        \ +					  ? &ixgbe_msix_clean_many : \ +			  (_v)->rx.count ? &ixgbe_msix_clean_rx   : \ +			  (_v)->tx.count ? &ixgbe_msix_clean_tx   : \ +			  NULL) +	for (vector = 0; vector < q_vectors; vector++) { +		struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; +		handler = SET_HANDLER(q_vector); + +		if (handler == &ixgbe_msix_clean_rx) { +			snprintf(q_vector->name, sizeof(q_vector->name) - 1, +			         "%s-%s-%d", netdev->name, "rx", ri++); +		} else if (handler == &ixgbe_msix_clean_tx) { +			snprintf(q_vector->name, sizeof(q_vector->name) - 1, +			         "%s-%s-%d", netdev->name, "tx", ti++); +		} else if (handler == &ixgbe_msix_clean_many) { +			snprintf(q_vector->name, sizeof(q_vector->name) - 1, +			         "%s-%s-%d", netdev->name, "TxRx", ri++); +			ti++; +		} else { +			/* skip this unused q_vector */ +			continue; +		} +		err = request_irq(adapter->msix_entries[vector].vector, +				  handler, 0, q_vector->name, +				  q_vector); +		if (err) { +			e_err(probe, "request_irq failed for MSIX interrupt " +			      "Error: %d\n", err); +			goto free_queue_irqs; +		} +	} + +	sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name); +	err = request_irq(adapter->msix_entries[vector].vector, +			  ixgbe_msix_lsc, 0, adapter->lsc_int_name, adapter); +	if (err) { +		e_err(probe, "request_irq for msix_lsc failed: %d\n", err); +		goto free_queue_irqs; +	} + +	return 0; + +free_queue_irqs: +	for (i = vector - 1; i >= 0; i--) +		free_irq(adapter->msix_entries[--vector].vector, +			 adapter->q_vector[i]); +	adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; +	pci_disable_msix(adapter->pdev); +	kfree(adapter->msix_entries); +	adapter->msix_entries = NULL; +	return err; +} + +/** + * ixgbe_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, +				    bool flush) +{ +	u32 mask; + +	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); +	if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) +		mask |= IXGBE_EIMS_GPI_SDP0; +	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) +		mask |= IXGBE_EIMS_GPI_SDP1; +	switch (adapter->hw.mac.type) { +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		mask |= IXGBE_EIMS_ECC; +		mask |= IXGBE_EIMS_GPI_SDP1; +		mask |= IXGBE_EIMS_GPI_SDP2; +		if (adapter->num_vfs) +			mask |= IXGBE_EIMS_MAILBOX; +		break; +	default: +		break; +	} +	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) +		mask |= IXGBE_EIMS_FLOW_DIR; + +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); +	if (queues) +		ixgbe_irq_enable_queues(adapter, ~0); +	if (flush) +		IXGBE_WRITE_FLUSH(&adapter->hw); + +	if (adapter->num_vfs > 32) { +		u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); +	} +} + +/** + * ixgbe_intr - legacy mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t ixgbe_intr(int irq, void *data) +{ +	struct ixgbe_adapter *adapter = data; +	struct ixgbe_hw *hw = &adapter->hw; +	struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; +	u32 eicr; + +	/* +	 * Workaround for silicon errata on 82598.  Mask the interrupts +	 * before the read of EICR. +	 */ +	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); + +	/* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read +	 * therefore no explict interrupt disable is necessary */ +	eicr = IXGBE_READ_REG(hw, IXGBE_EICR); +	if (!eicr) { +		/* +		 * shared interrupt alert! +		 * make sure interrupts are enabled because the read will +		 * have disabled interrupts due to EIAM +		 * finish the workaround of silicon errata on 82598.  Unmask +		 * the interrupt that we masked before the EICR read. +		 */ +		if (!test_bit(__IXGBE_DOWN, &adapter->state)) +			ixgbe_irq_enable(adapter, true, true); +		return IRQ_NONE;	/* Not our interrupt */ +	} + +	if (eicr & IXGBE_EICR_LSC) +		ixgbe_check_lsc(adapter); + +	switch (hw->mac.type) { +	case ixgbe_mac_82599EB: +		ixgbe_check_sfp_event(adapter, eicr); +		if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && +		    ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { +			if (!test_bit(__IXGBE_DOWN, &adapter->state)) { +				adapter->interrupt_event = eicr; +				adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; +				ixgbe_service_event_schedule(adapter); +			} +		} +		break; +	default: +		break; +	} + +	ixgbe_check_fan_failure(adapter, eicr); + +	if (napi_schedule_prep(&(q_vector->napi))) { +		/* would disable interrupts here but EIAM disabled it */ +		__napi_schedule(&(q_vector->napi)); +	} + +	/* +	 * re-enable link(maybe) and non-queue interrupts, no flush. +	 * ixgbe_poll will re-enable the queue interrupts +	 */ + +	if (!test_bit(__IXGBE_DOWN, &adapter->state)) +		ixgbe_irq_enable(adapter, false, false); + +	return IRQ_HANDLED; +} + +static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter) +{ +	int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + +	for (i = 0; i < q_vectors; i++) { +		struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; +		bitmap_zero(q_vector->rx.idx, MAX_RX_QUEUES); +		bitmap_zero(q_vector->tx.idx, MAX_TX_QUEUES); +		q_vector->rx.count = 0; +		q_vector->tx.count = 0; +	} +} + +/** + * ixgbe_request_irq - initialize interrupts + * @adapter: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int ixgbe_request_irq(struct ixgbe_adapter *adapter) +{ +	struct net_device *netdev = adapter->netdev; +	int err; + +	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { +		err = ixgbe_request_msix_irqs(adapter); +	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { +		err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, +				  netdev->name, adapter); +	} else { +		err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, +				  netdev->name, adapter); +	} + +	if (err) +		e_err(probe, "request_irq failed, Error %d\n", err); + +	return err; +} + +static void ixgbe_free_irq(struct ixgbe_adapter *adapter) +{ +	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { +		int i, q_vectors; + +		q_vectors = adapter->num_msix_vectors; + +		i = q_vectors - 1; +		free_irq(adapter->msix_entries[i].vector, adapter); + +		i--; +		for (; i >= 0; i--) { +			/* free only the irqs that were actually requested */ +			if (!adapter->q_vector[i]->rx.count && +			    !adapter->q_vector[i]->tx.count) +				continue; + +			free_irq(adapter->msix_entries[i].vector, +				 adapter->q_vector[i]); +		} + +		ixgbe_reset_q_vectors(adapter); +	} else { +		free_irq(adapter->pdev->irq, adapter); +	} +} + +/** + * ixgbe_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) +{ +	switch (adapter->hw.mac.type) { +	case ixgbe_mac_82598EB: +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); +		break; +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); +		if (adapter->num_vfs > 32) +			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); +		break; +	default: +		break; +	} +	IXGBE_WRITE_FLUSH(&adapter->hw); +	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { +		int i; +		for (i = 0; i < adapter->num_msix_vectors; i++) +			synchronize_irq(adapter->msix_entries[i].vector); +	} else { +		synchronize_irq(adapter->pdev->irq); +	} +} + +/** + * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts + * + **/ +static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; + +	IXGBE_WRITE_REG(hw, IXGBE_EITR(0), +			EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param)); + +	ixgbe_set_ivar(adapter, 0, 0, 0); +	ixgbe_set_ivar(adapter, 1, 0, 0); + +	map_vector_to_rxq(adapter, 0, 0); +	map_vector_to_txq(adapter, 0, 0); + +	e_info(hw, "Legacy interrupt IVAR setup done\n"); +} + +/** + * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset + * @adapter: board private structure + * @ring: structure containing ring specific data + * + * Configure the Tx descriptor ring after a reset. + **/ +void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, +			     struct ixgbe_ring *ring) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u64 tdba = ring->dma; +	int wait_loop = 10; +	u32 txdctl; +	u8 reg_idx = ring->reg_idx; + +	/* disable queue to avoid issues while updating state */ +	txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); +	IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), +			txdctl & ~IXGBE_TXDCTL_ENABLE); +	IXGBE_WRITE_FLUSH(hw); + +	IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), +			(tdba & DMA_BIT_MASK(32))); +	IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32)); +	IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx), +			ring->count * sizeof(union ixgbe_adv_tx_desc)); +	IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); +	IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); +	ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx); + +	/* configure fetching thresholds */ +	if (adapter->rx_itr_setting == 0) { +		/* cannot set wthresh when itr==0 */ +		txdctl &= ~0x007F0000; +	} else { +		/* enable WTHRESH=8 descriptors, to encourage burst writeback */ +		txdctl |= (8 << 16); +	} +	if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { +		/* PThresh workaround for Tx hang with DFP enabled. */ +		txdctl |= 32; +	} + +	/* reinitialize flowdirector state */ +	if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && +	    adapter->atr_sample_rate) { +		ring->atr_sample_rate = adapter->atr_sample_rate; +		ring->atr_count = 0; +		set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); +	} else { +		ring->atr_sample_rate = 0; +	} + +	clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); + +	/* enable queue */ +	txdctl |= IXGBE_TXDCTL_ENABLE; +	IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); + +	/* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ +	if (hw->mac.type == ixgbe_mac_82598EB && +	    !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) +		return; + +	/* poll to verify queue is enabled */ +	do { +		usleep_range(1000, 2000); +		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); +	} while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); +	if (!wait_loop) +		e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +} + +static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u32 rttdcs; +	u32 reg; +	u8 tcs = netdev_get_num_tc(adapter->netdev); + +	if (hw->mac.type == ixgbe_mac_82598EB) +		return; + +	/* disable the arbiter while setting MTQC */ +	rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); +	rttdcs |= IXGBE_RTTDCS_ARBDIS; +	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); + +	/* set transmit pool layout */ +	switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { +	case (IXGBE_FLAG_SRIOV_ENABLED): +		IXGBE_WRITE_REG(hw, IXGBE_MTQC, +				(IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); +		break; +	default: +		if (!tcs) +			reg = IXGBE_MTQC_64Q_1PB; +		else if (tcs <= 4) +			reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; +		else +			reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; + +		IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); + +		/* Enable Security TX Buffer IFG for multiple pb */ +		if (tcs) { +			reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); +			reg |= IXGBE_SECTX_DCB; +			IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); +		} +		break; +	} + +	/* re-enable the arbiter */ +	rttdcs &= ~IXGBE_RTTDCS_ARBDIS; +	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); +} + +/** + * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u32 dmatxctl; +	u32 i; + +	ixgbe_setup_mtqc(adapter); + +	if (hw->mac.type != ixgbe_mac_82598EB) { +		/* DMATXCTL.EN must be before Tx queues are enabled */ +		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); +		dmatxctl |= IXGBE_DMATXCTL_TE; +		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); +	} + +	/* Setup the HW Tx Head and Tail descriptor pointers */ +	for (i = 0; i < adapter->num_tx_queues; i++) +		ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 + +static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, +				   struct ixgbe_ring *rx_ring) +{ +	u32 srrctl; +	u8 reg_idx = rx_ring->reg_idx; + +	switch (adapter->hw.mac.type) { +	case ixgbe_mac_82598EB: { +		struct ixgbe_ring_feature *feature = adapter->ring_feature; +		const int mask = feature[RING_F_RSS].mask; +		reg_idx = reg_idx & mask; +	} +		break; +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +	default: +		break; +	} + +	srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx)); + +	srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; +	srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; +	if (adapter->num_vfs) +		srrctl |= IXGBE_SRRCTL_DROP_EN; + +	srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & +		  IXGBE_SRRCTL_BSIZEHDR_MASK; + +	if (ring_is_ps_enabled(rx_ring)) { +#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER +		srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; +#else +		srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; +#endif +		srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; +	} else { +		srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> +			  IXGBE_SRRCTL_BSIZEPKT_SHIFT; +		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; +	} + +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl); +} + +static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D, +			  0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, +			  0x6A3E67EA, 0x14364D17, 0x3BED200D}; +	u32 mrqc = 0, reta = 0; +	u32 rxcsum; +	int i, j; +	u8 tcs = netdev_get_num_tc(adapter->netdev); +	int maxq = adapter->ring_feature[RING_F_RSS].indices; + +	if (tcs) +		maxq = min(maxq, adapter->num_tx_queues / tcs); + +	/* Fill out hash function seeds */ +	for (i = 0; i < 10; i++) +		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); + +	/* Fill out redirection table */ +	for (i = 0, j = 0; i < 128; i++, j++) { +		if (j == maxq) +			j = 0; +		/* reta = 4-byte sliding window of +		 * 0x00..(indices-1)(indices-1)00..etc. */ +		reta = (reta << 8) | (j * 0x11); +		if ((i & 3) == 3) +			IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); +	} + +	/* Disable indicating checksum in descriptor, enables RSS hash */ +	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); +	rxcsum |= IXGBE_RXCSUM_PCSD; +	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); + +	if (adapter->hw.mac.type == ixgbe_mac_82598EB && +	    (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { +		mrqc = IXGBE_MRQC_RSSEN; +	} else { +		int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED +					     | IXGBE_FLAG_SRIOV_ENABLED); + +		switch (mask) { +		case (IXGBE_FLAG_RSS_ENABLED): +			if (!tcs) +				mrqc = IXGBE_MRQC_RSSEN; +			else if (tcs <= 4) +				mrqc = IXGBE_MRQC_RTRSS4TCEN; +			else +				mrqc = IXGBE_MRQC_RTRSS8TCEN; +			break; +		case (IXGBE_FLAG_SRIOV_ENABLED): +			mrqc = IXGBE_MRQC_VMDQEN; +			break; +		default: +			break; +		} +	} + +	/* Perform hash on these packet types */ +	mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 +	      | IXGBE_MRQC_RSS_FIELD_IPV4_TCP +	      | IXGBE_MRQC_RSS_FIELD_IPV6 +	      | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; + +	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); +} + +/** + * ixgbe_configure_rscctl - enable RSC for the indicated ring + * @adapter:    address of board private structure + * @index:      index of ring to set + **/ +static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, +				   struct ixgbe_ring *ring) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u32 rscctrl; +	int rx_buf_len; +	u8 reg_idx = ring->reg_idx; + +	if (!ring_is_rsc_enabled(ring)) +		return; + +	rx_buf_len = ring->rx_buf_len; +	rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); +	rscctrl |= IXGBE_RSCCTL_RSCEN; +	/* +	 * we must limit the number of descriptors so that the +	 * total size of max desc * buf_len is not greater +	 * than 65535 +	 */ +	if (ring_is_ps_enabled(ring)) { +#if (MAX_SKB_FRAGS > 16) +		rscctrl |= IXGBE_RSCCTL_MAXDESC_16; +#elif (MAX_SKB_FRAGS > 8) +		rscctrl |= IXGBE_RSCCTL_MAXDESC_8; +#elif (MAX_SKB_FRAGS > 4) +		rscctrl |= IXGBE_RSCCTL_MAXDESC_4; +#else +		rscctrl |= IXGBE_RSCCTL_MAXDESC_1; +#endif +	} else { +		if (rx_buf_len < IXGBE_RXBUFFER_4096) +			rscctrl |= IXGBE_RSCCTL_MAXDESC_16; +		else if (rx_buf_len < IXGBE_RXBUFFER_8192) +			rscctrl |= IXGBE_RSCCTL_MAXDESC_8; +		else +			rscctrl |= IXGBE_RSCCTL_MAXDESC_4; +	} +	IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); +} + +/** + *  ixgbe_set_uta - Set unicast filter table address + *  @adapter: board private structure + * + *  The unicast table address is a register array of 32-bit registers. + *  The table is meant to be used in a way similar to how the MTA is used + *  however due to certain limitations in the hardware it is necessary to + *  set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous + *  enable bit to allow vlan tag stripping when promiscuous mode is enabled + **/ +static void ixgbe_set_uta(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	int i; + +	/* The UTA table only exists on 82599 hardware and newer */ +	if (hw->mac.type < ixgbe_mac_82599EB) +		return; + +	/* we only need to do this if VMDq is enabled */ +	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) +		return; + +	for (i = 0; i < 128; i++) +		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); +} + +#define IXGBE_MAX_RX_DESC_POLL 10 +static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, +				       struct ixgbe_ring *ring) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	int wait_loop = IXGBE_MAX_RX_DESC_POLL; +	u32 rxdctl; +	u8 reg_idx = ring->reg_idx; + +	/* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ +	if (hw->mac.type == ixgbe_mac_82598EB && +	    !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) +		return; + +	do { +		usleep_range(1000, 2000); +		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); +	} while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); + +	if (!wait_loop) { +		e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within " +		      "the polling period\n", reg_idx); +	} +} + +void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, +			    struct ixgbe_ring *ring) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	int wait_loop = IXGBE_MAX_RX_DESC_POLL; +	u32 rxdctl; +	u8 reg_idx = ring->reg_idx; + +	rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); +	rxdctl &= ~IXGBE_RXDCTL_ENABLE; + +	/* write value back with RXDCTL.ENABLE bit cleared */ +	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); + +	if (hw->mac.type == ixgbe_mac_82598EB && +	    !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) +		return; + +	/* the hardware may take up to 100us to really disable the rx queue */ +	do { +		udelay(10); +		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); +	} while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); + +	if (!wait_loop) { +		e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " +		      "the polling period\n", reg_idx); +	} +} + +void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, +			     struct ixgbe_ring *ring) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u64 rdba = ring->dma; +	u32 rxdctl; +	u8 reg_idx = ring->reg_idx; + +	/* disable queue to avoid issues while updating state */ +	rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); +	ixgbe_disable_rx_queue(adapter, ring); + +	IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); +	IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); +	IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), +			ring->count * sizeof(union ixgbe_adv_rx_desc)); +	IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); +	IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); +	ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx); + +	ixgbe_configure_srrctl(adapter, ring); +	ixgbe_configure_rscctl(adapter, ring); + +	/* If operating in IOV mode set RLPML for X540 */ +	if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && +	    hw->mac.type == ixgbe_mac_X540) { +		rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; +		rxdctl |= ((ring->netdev->mtu + ETH_HLEN + +			    ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN); +	} + +	if (hw->mac.type == ixgbe_mac_82598EB) { +		/* +		 * enable cache line friendly hardware writes: +		 * PTHRESH=32 descriptors (half the internal cache), +		 * this also removes ugly rx_no_buffer_count increment +		 * HTHRESH=4 descriptors (to minimize latency on fetch) +		 * WTHRESH=8 burst writeback up to two cache lines +		 */ +		rxdctl &= ~0x3FFFFF; +		rxdctl |=  0x080420; +	} + +	/* enable receive descriptor ring */ +	rxdctl |= IXGBE_RXDCTL_ENABLE; +	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); + +	ixgbe_rx_desc_queue_enable(adapter, ring); +	ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); +} + +static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	int p; + +	/* PSRTYPE must be initialized in non 82598 adapters */ +	u32 psrtype = IXGBE_PSRTYPE_TCPHDR | +		      IXGBE_PSRTYPE_UDPHDR | +		      IXGBE_PSRTYPE_IPV4HDR | +		      IXGBE_PSRTYPE_L2HDR | +		      IXGBE_PSRTYPE_IPV6HDR; + +	if (hw->mac.type == ixgbe_mac_82598EB) +		return; + +	if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) +		psrtype |= (adapter->num_rx_queues_per_pool << 29); + +	for (p = 0; p < adapter->num_rx_pools; p++) +		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p), +				psrtype); +} + +static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u32 gcr_ext; +	u32 vt_reg_bits; +	u32 reg_offset, vf_shift; +	u32 vmdctl; + +	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) +		return; + +	vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); +	vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN; +	vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT); +	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits); + +	vf_shift = adapter->num_vfs % 32; +	reg_offset = (adapter->num_vfs > 32) ? 1 : 0; + +	/* Enable only the PF's pool for Tx/Rx */ +	IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); +	IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0); +	IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift)); +	IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0); +	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + +	/* Map PF MAC address in RAR Entry 0 to first pool following VFs */ +	hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs); + +	/* +	 * Set up VF register offsets for selected VT Mode, +	 * i.e. 32 or 64 VFs for SR-IOV +	 */ +	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); +	gcr_ext |= IXGBE_GCR_EXT_MSIX_EN; +	gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; +	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); + +	/* enable Tx loopback for VF/PF communication */ +	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); +	/* Enable MAC Anti-Spoofing */ +	hw->mac.ops.set_mac_anti_spoofing(hw, +					  (adapter->antispoofing_enabled = +					   (adapter->num_vfs != 0)), +					  adapter->num_vfs); +} + +static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	struct net_device *netdev = adapter->netdev; +	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; +	int rx_buf_len; +	struct ixgbe_ring *rx_ring; +	int i; +	u32 mhadd, hlreg0; + +	/* Decide whether to use packet split mode or not */ +	/* On by default */ +	adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; + +	/* Do not use packet split if we're in SR-IOV Mode */ +	if (adapter->num_vfs) +		adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; + +	/* Disable packet split due to 82599 erratum #45 */ +	if (hw->mac.type == ixgbe_mac_82599EB) +		adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; + +	/* Set the RX buffer length according to the mode */ +	if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { +		rx_buf_len = IXGBE_RX_HDR_SIZE; +	} else { +		if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && +		    (netdev->mtu <= ETH_DATA_LEN)) +			rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; +		else +			rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024); +	} + +#ifdef IXGBE_FCOE +	/* adjust max frame to be able to do baby jumbo for FCoE */ +	if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && +	    (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) +		max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; + +#endif /* IXGBE_FCOE */ +	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); +	if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { +		mhadd &= ~IXGBE_MHADD_MFS_MASK; +		mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; + +		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); +	} + +	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); +	/* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ +	hlreg0 |= IXGBE_HLREG0_JUMBOEN; +	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + +	/* +	 * Setup the HW Rx Head and Tail Descriptor Pointers and +	 * the Base and Length of the Rx Descriptor Ring +	 */ +	for (i = 0; i < adapter->num_rx_queues; i++) { +		rx_ring = adapter->rx_ring[i]; +		rx_ring->rx_buf_len = rx_buf_len; + +		if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) +			set_ring_ps_enabled(rx_ring); +		else +			clear_ring_ps_enabled(rx_ring); + +		if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) +			set_ring_rsc_enabled(rx_ring); +		else +			clear_ring_rsc_enabled(rx_ring); + +#ifdef IXGBE_FCOE +		if (netdev->features & NETIF_F_FCOE_MTU) { +			struct ixgbe_ring_feature *f; +			f = &adapter->ring_feature[RING_F_FCOE]; +			if ((i >= f->mask) && (i < f->mask + f->indices)) { +				clear_ring_ps_enabled(rx_ring); +				if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) +					rx_ring->rx_buf_len = +						IXGBE_FCOE_JUMBO_FRAME_SIZE; +			} else if (!ring_is_rsc_enabled(rx_ring) && +				   !ring_is_ps_enabled(rx_ring)) { +				rx_ring->rx_buf_len = +						IXGBE_FCOE_JUMBO_FRAME_SIZE; +			} +		} +#endif /* IXGBE_FCOE */ +	} +} + +static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + +	switch (hw->mac.type) { +	case ixgbe_mac_82598EB: +		/* +		 * For VMDq support of different descriptor types or +		 * buffer sizes through the use of multiple SRRCTL +		 * registers, RDRXCTL.MVMEN must be set to 1 +		 * +		 * also, the manual doesn't mention it clearly but DCA hints +		 * will only use queue 0's tags unless this bit is set.  Side +		 * effects of setting this bit are only that SRRCTL must be +		 * fully programmed [0..15] +		 */ +		rdrxctl |= IXGBE_RDRXCTL_MVMEN; +		break; +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		/* Disable RSC for ACK packets */ +		IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, +		   (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); +		rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; +		/* hardware requires some bits to be set by default */ +		rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX); +		rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; +		break; +	default: +		/* We should do nothing since we don't know this hardware */ +		return; +	} + +	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); +} + +/** + * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	int i; +	u32 rxctrl; + +	/* disable receives while setting up the descriptors */ +	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); +	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); + +	ixgbe_setup_psrtype(adapter); +	ixgbe_setup_rdrxctl(adapter); + +	/* Program registers for the distribution of queues */ +	ixgbe_setup_mrqc(adapter); + +	ixgbe_set_uta(adapter); + +	/* set_rx_buffer_len must be called before ring initialization */ +	ixgbe_set_rx_buffer_len(adapter); + +	/* +	 * Setup the HW Rx Head and Tail Descriptor Pointers and +	 * the Base and Length of the Rx Descriptor Ring +	 */ +	for (i = 0; i < adapter->num_rx_queues; i++) +		ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); + +	/* disable drop enable for 82598 parts */ +	if (hw->mac.type == ixgbe_mac_82598EB) +		rxctrl |= IXGBE_RXCTRL_DMBYPS; + +	/* enable all receives */ +	rxctrl |= IXGBE_RXCTRL_RXEN; +	hw->mac.ops.enable_rx_dma(hw, rxctrl); +} + +static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_hw *hw = &adapter->hw; +	int pool_ndx = adapter->num_vfs; + +	/* add VID to filter table */ +	hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true); +	set_bit(vid, adapter->active_vlans); +} + +static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_hw *hw = &adapter->hw; +	int pool_ndx = adapter->num_vfs; + +	/* remove VID from filter table */ +	hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); +	clear_bit(vid, adapter->active_vlans); +} + +/** + * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering + * @adapter: driver data + */ +static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u32 vlnctrl; + +	vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); +	vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); +	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); +} + +/** + * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering + * @adapter: driver data + */ +static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u32 vlnctrl; + +	vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); +	vlnctrl |= IXGBE_VLNCTRL_VFE; +	vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; +	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); +} + +/** + * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping + * @adapter: driver data + */ +static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u32 vlnctrl; +	int i, j; + +	switch (hw->mac.type) { +	case ixgbe_mac_82598EB: +		vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); +		vlnctrl &= ~IXGBE_VLNCTRL_VME; +		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); +		break; +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		for (i = 0; i < adapter->num_rx_queues; i++) { +			j = adapter->rx_ring[i]->reg_idx; +			vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); +			vlnctrl &= ~IXGBE_RXDCTL_VME; +			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); +		} +		break; +	default: +		break; +	} +} + +/** + * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping + * @adapter: driver data + */ +static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u32 vlnctrl; +	int i, j; + +	switch (hw->mac.type) { +	case ixgbe_mac_82598EB: +		vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); +		vlnctrl |= IXGBE_VLNCTRL_VME; +		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); +		break; +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		for (i = 0; i < adapter->num_rx_queues; i++) { +			j = adapter->rx_ring[i]->reg_idx; +			vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); +			vlnctrl |= IXGBE_RXDCTL_VME; +			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); +		} +		break; +	default: +		break; +	} +} + +static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) +{ +	u16 vid; + +	ixgbe_vlan_rx_add_vid(adapter->netdev, 0); + +	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) +		ixgbe_vlan_rx_add_vid(adapter->netdev, vid); +} + +/** + * ixgbe_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + *                0 on no addresses written + *                X on writing X addresses to the RAR table + **/ +static int ixgbe_write_uc_addr_list(struct net_device *netdev) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_hw *hw = &adapter->hw; +	unsigned int vfn = adapter->num_vfs; +	unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS; +	int count = 0; + +	/* return ENOMEM indicating insufficient memory for addresses */ +	if (netdev_uc_count(netdev) > rar_entries) +		return -ENOMEM; + +	if (!netdev_uc_empty(netdev) && rar_entries) { +		struct netdev_hw_addr *ha; +		/* return error if we do not support writing to RAR table */ +		if (!hw->mac.ops.set_rar) +			return -ENOMEM; + +		netdev_for_each_uc_addr(ha, netdev) { +			if (!rar_entries) +				break; +			hw->mac.ops.set_rar(hw, rar_entries--, ha->addr, +					    vfn, IXGBE_RAH_AV); +			count++; +		} +	} +	/* write the addresses in reverse order to avoid write combining */ +	for (; rar_entries > 0 ; rar_entries--) +		hw->mac.ops.clear_rar(hw, rar_entries); + +	return count; +} + +/** + * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_method entry point is called whenever the unicast/multicast + * address list or the network interface flags are updated.  This routine is + * responsible for configuring the hardware for proper unicast, multicast and + * promiscuous mode. + **/ +void ixgbe_set_rx_mode(struct net_device *netdev) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_hw *hw = &adapter->hw; +	u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; +	int count; + +	/* Check for Promiscuous and All Multicast modes */ + +	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + +	/* set all bits that we expect to always be set */ +	fctrl |= IXGBE_FCTRL_BAM; +	fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ +	fctrl |= IXGBE_FCTRL_PMCF; + +	/* clear the bits we are changing the status of */ +	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + +	if (netdev->flags & IFF_PROMISC) { +		hw->addr_ctrl.user_set_promisc = true; +		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); +		vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE); +		/* don't hardware filter vlans in promisc mode */ +		ixgbe_vlan_filter_disable(adapter); +	} else { +		if (netdev->flags & IFF_ALLMULTI) { +			fctrl |= IXGBE_FCTRL_MPE; +			vmolr |= IXGBE_VMOLR_MPE; +		} else { +			/* +			 * Write addresses to the MTA, if the attempt fails +			 * then we should just turn on promiscuous mode so +			 * that we can at least receive multicast traffic +			 */ +			hw->mac.ops.update_mc_addr_list(hw, netdev); +			vmolr |= IXGBE_VMOLR_ROMPE; +		} +		ixgbe_vlan_filter_enable(adapter); +		hw->addr_ctrl.user_set_promisc = false; +		/* +		 * Write addresses to available RAR registers, if there is not +		 * sufficient space to store all the addresses then enable +		 * unicast promiscuous mode +		 */ +		count = ixgbe_write_uc_addr_list(netdev); +		if (count < 0) { +			fctrl |= IXGBE_FCTRL_UPE; +			vmolr |= IXGBE_VMOLR_ROPE; +		} +	} + +	if (adapter->num_vfs) { +		ixgbe_restore_vf_multicasts(adapter); +		vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) & +			 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE | +			   IXGBE_VMOLR_ROPE); +		IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr); +	} + +	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + +	if (netdev->features & NETIF_F_HW_VLAN_RX) +		ixgbe_vlan_strip_enable(adapter); +	else +		ixgbe_vlan_strip_disable(adapter); +} + +static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) +{ +	int q_idx; +	struct ixgbe_q_vector *q_vector; +	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + +	/* legacy and MSI only use one vector */ +	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) +		q_vectors = 1; + +	for (q_idx = 0; q_idx < q_vectors; q_idx++) { +		struct napi_struct *napi; +		q_vector = adapter->q_vector[q_idx]; +		napi = &q_vector->napi; +		if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { +			if (!q_vector->rx.count || !q_vector->tx.count) { +				if (q_vector->tx.count == 1) +					napi->poll = &ixgbe_clean_txonly; +				else if (q_vector->rx.count == 1) +					napi->poll = &ixgbe_clean_rxonly; +			} +		} + +		napi_enable(napi); +	} +} + +static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) +{ +	int q_idx; +	struct ixgbe_q_vector *q_vector; +	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + +	/* legacy and MSI only use one vector */ +	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) +		q_vectors = 1; + +	for (q_idx = 0; q_idx < q_vectors; q_idx++) { +		q_vector = adapter->q_vector[q_idx]; +		napi_disable(&q_vector->napi); +	} +} + +#ifdef CONFIG_IXGBE_DCB +/* + * ixgbe_configure_dcb - Configure DCB hardware + * @adapter: ixgbe adapter struct + * + * This is called by the driver on open to configure the DCB hardware. + * This is also called by the gennetlink interface when reconfiguring + * the DCB state. + */ +static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + +	if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { +		if (hw->mac.type == ixgbe_mac_82598EB) +			netif_set_gso_max_size(adapter->netdev, 65536); +		return; +	} + +	if (hw->mac.type == ixgbe_mac_82598EB) +		netif_set_gso_max_size(adapter->netdev, 32768); + + +	/* Enable VLAN tag insert/strip */ +	adapter->netdev->features |= NETIF_F_HW_VLAN_RX; + +	hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); + +	/* reconfigure the hardware */ +	if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) { +#ifdef CONFIG_FCOE +		if (adapter->netdev->features & NETIF_F_FCOE_MTU) +			max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif +		ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, +						DCB_TX_CONFIG); +		ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, +						DCB_RX_CONFIG); +		ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); +	} else { +		struct net_device *dev = adapter->netdev; + +		if (adapter->ixgbe_ieee_ets) +			dev->dcbnl_ops->ieee_setets(dev, +						    adapter->ixgbe_ieee_ets); +		if (adapter->ixgbe_ieee_pfc) +			dev->dcbnl_ops->ieee_setpfc(dev, +						    adapter->ixgbe_ieee_pfc); +	} + +	/* Enable RSS Hash per TC */ +	if (hw->mac.type != ixgbe_mac_82598EB) { +		int i; +		u32 reg = 0; + +		for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { +			u8 msb = 0; +			u8 cnt = adapter->netdev->tc_to_txq[i].count; + +			while (cnt >>= 1) +				msb++; + +			reg |= msb << IXGBE_RQTC_SHIFT_TC(i); +		} +		IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg); +	} +} + +#endif + +static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) +{ +	int hdrm = 0; +	int num_tc = netdev_get_num_tc(adapter->netdev); +	struct ixgbe_hw *hw = &adapter->hw; + +	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || +	    adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) +		hdrm = 64 << adapter->fdir_pballoc; + +	hw->mac.ops.set_rxpba(&adapter->hw, num_tc, hdrm, PBA_STRATEGY_EQUAL); +} + +static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	struct hlist_node *node, *node2; +	struct ixgbe_fdir_filter *filter; + +	spin_lock(&adapter->fdir_perfect_lock); + +	if (!hlist_empty(&adapter->fdir_filter_list)) +		ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); + +	hlist_for_each_entry_safe(filter, node, node2, +				  &adapter->fdir_filter_list, fdir_node) { +		ixgbe_fdir_write_perfect_filter_82599(hw, +				&filter->filter, +				filter->sw_idx, +				(filter->action == IXGBE_FDIR_DROP_QUEUE) ? +				IXGBE_FDIR_DROP_QUEUE : +				adapter->rx_ring[filter->action]->reg_idx); +	} + +	spin_unlock(&adapter->fdir_perfect_lock); +} + +static void ixgbe_configure(struct ixgbe_adapter *adapter) +{ +	struct net_device *netdev = adapter->netdev; +	struct ixgbe_hw *hw = &adapter->hw; +	int i; + +	ixgbe_configure_pb(adapter); +#ifdef CONFIG_IXGBE_DCB +	ixgbe_configure_dcb(adapter); +#endif + +	ixgbe_set_rx_mode(netdev); +	ixgbe_restore_vlan(adapter); + +#ifdef IXGBE_FCOE +	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) +		ixgbe_configure_fcoe(adapter); + +#endif /* IXGBE_FCOE */ +	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { +		for (i = 0; i < adapter->num_tx_queues; i++) +			adapter->tx_ring[i]->atr_sample_rate = +						       adapter->atr_sample_rate; +		ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); +	} else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { +		ixgbe_init_fdir_perfect_82599(&adapter->hw, +					      adapter->fdir_pballoc); +		ixgbe_fdir_filter_restore(adapter); +	} +	ixgbe_configure_virtualization(adapter); + +	ixgbe_configure_tx(adapter); +	ixgbe_configure_rx(adapter); +} + +static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) +{ +	switch (hw->phy.type) { +	case ixgbe_phy_sfp_avago: +	case ixgbe_phy_sfp_ftl: +	case ixgbe_phy_sfp_intel: +	case ixgbe_phy_sfp_unknown: +	case ixgbe_phy_sfp_passive_tyco: +	case ixgbe_phy_sfp_passive_unknown: +	case ixgbe_phy_sfp_active_unknown: +	case ixgbe_phy_sfp_ftl_active: +		return true; +	default: +		return false; +	} +} + +/** + * ixgbe_sfp_link_config - set up SFP+ link + * @adapter: pointer to private adapter struct + **/ +static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) +{ +	/* +	 * We are assuming the worst case scenerio here, and that +	 * is that an SFP was inserted/removed after the reset +	 * but before SFP detection was enabled.  As such the best +	 * solution is to just start searching as soon as we start +	 */ +	if (adapter->hw.mac.type == ixgbe_mac_82598EB) +		adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; + +	adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; +} + +/** + * ixgbe_non_sfp_link_config - set up non-SFP+ link + * @hw: pointer to private hardware struct + * + * Returns 0 on success, negative on failure + **/ +static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) +{ +	u32 autoneg; +	bool negotiation, link_up = false; +	u32 ret = IXGBE_ERR_LINK_SETUP; + +	if (hw->mac.ops.check_link) +		ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false); + +	if (ret) +		goto link_cfg_out; + +	autoneg = hw->phy.autoneg_advertised; +	if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) +		ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, +							&negotiation); +	if (ret) +		goto link_cfg_out; + +	if (hw->mac.ops.setup_link) +		ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up); +link_cfg_out: +	return ret; +} + +static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u32 gpie = 0; + +	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { +		gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | +		       IXGBE_GPIE_OCD; +		gpie |= IXGBE_GPIE_EIAME; +		/* +		 * use EIAM to auto-mask when MSI-X interrupt is asserted +		 * this saves a register write for every interrupt +		 */ +		switch (hw->mac.type) { +		case ixgbe_mac_82598EB: +			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); +			break; +		case ixgbe_mac_82599EB: +		case ixgbe_mac_X540: +		default: +			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); +			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); +			break; +		} +	} else { +		/* legacy interrupts, use EIAM to auto-mask when reading EICR, +		 * specifically only auto mask tx and rx interrupts */ +		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); +	} + +	/* XXX: to interrupt immediately for EICS writes, enable this */ +	/* gpie |= IXGBE_GPIE_EIMEN; */ + +	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { +		gpie &= ~IXGBE_GPIE_VTMODE_MASK; +		gpie |= IXGBE_GPIE_VTMODE_64; +	} + +	/* Enable fan failure interrupt */ +	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) +		gpie |= IXGBE_SDP1_GPIEN; + +	if (hw->mac.type == ixgbe_mac_82599EB) { +		gpie |= IXGBE_SDP1_GPIEN; +		gpie |= IXGBE_SDP2_GPIEN; +	} + +	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); +} + +static int ixgbe_up_complete(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	int err; +	u32 ctrl_ext; + +	ixgbe_get_hw_control(adapter); +	ixgbe_setup_gpie(adapter); + +	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) +		ixgbe_configure_msix(adapter); +	else +		ixgbe_configure_msi_and_legacy(adapter); + +	/* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */ +	if (hw->mac.ops.enable_tx_laser && +	    ((hw->phy.multispeed_fiber) || +	     ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && +	      (hw->mac.type == ixgbe_mac_82599EB)))) +		hw->mac.ops.enable_tx_laser(hw); + +	clear_bit(__IXGBE_DOWN, &adapter->state); +	ixgbe_napi_enable_all(adapter); + +	if (ixgbe_is_sfp(hw)) { +		ixgbe_sfp_link_config(adapter); +	} else { +		err = ixgbe_non_sfp_link_config(hw); +		if (err) +			e_err(probe, "link_config FAILED %d\n", err); +	} + +	/* clear any pending interrupts, may auto mask */ +	IXGBE_READ_REG(hw, IXGBE_EICR); +	ixgbe_irq_enable(adapter, true, true); + +	/* +	 * If this adapter has a fan, check to see if we had a failure +	 * before we enabled the interrupt. +	 */ +	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { +		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); +		if (esdp & IXGBE_ESDP_SDP1) +			e_crit(drv, "Fan has stopped, replace the adapter\n"); +	} + +	/* enable transmits */ +	netif_tx_start_all_queues(adapter->netdev); + +	/* bring the link up in the watchdog, this could race with our first +	 * link up interrupt but shouldn't be a problem */ +	adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; +	adapter->link_check_timeout = jiffies; +	mod_timer(&adapter->service_timer, jiffies); + +	/* Set PF Reset Done bit so PF/VF Mail Ops can work */ +	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); +	ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; +	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + +	return 0; +} + +void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) +{ +	WARN_ON(in_interrupt()); +	/* put off any impending NetWatchDogTimeout */ +	adapter->netdev->trans_start = jiffies; + +	while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) +		usleep_range(1000, 2000); +	ixgbe_down(adapter); +	/* +	 * If SR-IOV enabled then wait a bit before bringing the adapter +	 * back up to give the VFs time to respond to the reset.  The +	 * two second wait is based upon the watchdog timer cycle in +	 * the VF driver. +	 */ +	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) +		msleep(2000); +	ixgbe_up(adapter); +	clear_bit(__IXGBE_RESETTING, &adapter->state); +} + +int ixgbe_up(struct ixgbe_adapter *adapter) +{ +	/* hardware has been reset, we need to reload some things */ +	ixgbe_configure(adapter); + +	return ixgbe_up_complete(adapter); +} + +void ixgbe_reset(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	int err; + +	/* lock SFP init bit to prevent race conditions with the watchdog */ +	while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) +		usleep_range(1000, 2000); + +	/* clear all SFP and link config related flags while holding SFP_INIT */ +	adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP | +			     IXGBE_FLAG2_SFP_NEEDS_RESET); +	adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; + +	err = hw->mac.ops.init_hw(hw); +	switch (err) { +	case 0: +	case IXGBE_ERR_SFP_NOT_PRESENT: +	case IXGBE_ERR_SFP_NOT_SUPPORTED: +		break; +	case IXGBE_ERR_MASTER_REQUESTS_PENDING: +		e_dev_err("master disable timed out\n"); +		break; +	case IXGBE_ERR_EEPROM_VERSION: +		/* We are running on a pre-production device, log a warning */ +		e_dev_warn("This device is a pre-production adapter/LOM. " +			   "Please be aware there may be issuesassociated with " +			   "your hardware.  If you are experiencing problems " +			   "please contact your Intel or hardware " +			   "representative who provided you with this " +			   "hardware.\n"); +		break; +	default: +		e_dev_err("Hardware Error: %d\n", err); +	} + +	clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); + +	/* reprogram the RAR[0] in case user changed it. */ +	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, +			    IXGBE_RAH_AV); +} + +/** + * ixgbe_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) +{ +	struct device *dev = rx_ring->dev; +	unsigned long size; +	u16 i; + +	/* ring already cleared, nothing to do */ +	if (!rx_ring->rx_buffer_info) +		return; + +	/* Free all the Rx ring sk_buffs */ +	for (i = 0; i < rx_ring->count; i++) { +		struct ixgbe_rx_buffer *rx_buffer_info; + +		rx_buffer_info = &rx_ring->rx_buffer_info[i]; +		if (rx_buffer_info->dma) { +			dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, +					 rx_ring->rx_buf_len, +					 DMA_FROM_DEVICE); +			rx_buffer_info->dma = 0; +		} +		if (rx_buffer_info->skb) { +			struct sk_buff *skb = rx_buffer_info->skb; +			rx_buffer_info->skb = NULL; +			do { +				struct sk_buff *this = skb; +				if (IXGBE_RSC_CB(this)->delay_unmap) { +					dma_unmap_single(dev, +							 IXGBE_RSC_CB(this)->dma, +							 rx_ring->rx_buf_len, +							 DMA_FROM_DEVICE); +					IXGBE_RSC_CB(this)->dma = 0; +					IXGBE_RSC_CB(skb)->delay_unmap = false; +				} +				skb = skb->prev; +				dev_kfree_skb(this); +			} while (skb); +		} +		if (!rx_buffer_info->page) +			continue; +		if (rx_buffer_info->page_dma) { +			dma_unmap_page(dev, rx_buffer_info->page_dma, +				       PAGE_SIZE / 2, DMA_FROM_DEVICE); +			rx_buffer_info->page_dma = 0; +		} +		put_page(rx_buffer_info->page); +		rx_buffer_info->page = NULL; +		rx_buffer_info->page_offset = 0; +	} + +	size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; +	memset(rx_ring->rx_buffer_info, 0, size); + +	/* Zero out the descriptor ring */ +	memset(rx_ring->desc, 0, rx_ring->size); + +	rx_ring->next_to_clean = 0; +	rx_ring->next_to_use = 0; +} + +/** + * ixgbe_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) +{ +	struct ixgbe_tx_buffer *tx_buffer_info; +	unsigned long size; +	u16 i; + +	/* ring already cleared, nothing to do */ +	if (!tx_ring->tx_buffer_info) +		return; + +	/* Free all the Tx ring sk_buffs */ +	for (i = 0; i < tx_ring->count; i++) { +		tx_buffer_info = &tx_ring->tx_buffer_info[i]; +		ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); +	} + +	size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; +	memset(tx_ring->tx_buffer_info, 0, size); + +	/* Zero out the descriptor ring */ +	memset(tx_ring->desc, 0, tx_ring->size); + +	tx_ring->next_to_use = 0; +	tx_ring->next_to_clean = 0; +} + +/** + * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) +{ +	int i; + +	for (i = 0; i < adapter->num_rx_queues; i++) +		ixgbe_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) +{ +	int i; + +	for (i = 0; i < adapter->num_tx_queues; i++) +		ixgbe_clean_tx_ring(adapter->tx_ring[i]); +} + +static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) +{ +	struct hlist_node *node, *node2; +	struct ixgbe_fdir_filter *filter; + +	spin_lock(&adapter->fdir_perfect_lock); + +	hlist_for_each_entry_safe(filter, node, node2, +				  &adapter->fdir_filter_list, fdir_node) { +		hlist_del(&filter->fdir_node); +		kfree(filter); +	} +	adapter->fdir_filter_count = 0; + +	spin_unlock(&adapter->fdir_perfect_lock); +} + +void ixgbe_down(struct ixgbe_adapter *adapter) +{ +	struct net_device *netdev = adapter->netdev; +	struct ixgbe_hw *hw = &adapter->hw; +	u32 rxctrl; +	int i; +	int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + +	/* signal that we are down to the interrupt handler */ +	set_bit(__IXGBE_DOWN, &adapter->state); + +	/* disable receives */ +	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); +	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); + +	/* disable all enabled rx queues */ +	for (i = 0; i < adapter->num_rx_queues; i++) +		/* this call also flushes the previous write */ +		ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); + +	usleep_range(10000, 20000); + +	netif_tx_stop_all_queues(netdev); + +	/* call carrier off first to avoid false dev_watchdog timeouts */ +	netif_carrier_off(netdev); +	netif_tx_disable(netdev); + +	ixgbe_irq_disable(adapter); + +	ixgbe_napi_disable_all(adapter); + +	adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT | +			     IXGBE_FLAG2_RESET_REQUESTED); +	adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; + +	del_timer_sync(&adapter->service_timer); + +	/* disable receive for all VFs and wait one second */ +	if (adapter->num_vfs) { +		/* ping all the active vfs to let them know we are going down */ +		ixgbe_ping_all_vfs(adapter); + +		/* Disable all VFTE/VFRE TX/RX */ +		ixgbe_disable_tx_rx(adapter); + +		/* Mark all the VFs as inactive */ +		for (i = 0 ; i < adapter->num_vfs; i++) +			adapter->vfinfo[i].clear_to_send = 0; +	} + +	/* Cleanup the affinity_hint CPU mask memory and callback */ +	for (i = 0; i < num_q_vectors; i++) { +		struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; +		/* clear the affinity_mask in the IRQ descriptor */ +		irq_set_affinity_hint(adapter->msix_entries[i]. vector, NULL); +		/* release the CPU mask memory */ +		free_cpumask_var(q_vector->affinity_mask); +	} + +	/* disable transmits in the hardware now that interrupts are off */ +	for (i = 0; i < adapter->num_tx_queues; i++) { +		u8 reg_idx = adapter->tx_ring[i]->reg_idx; +		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); +	} + +	/* Disable the Tx DMA engine on 82599 and X540 */ +	switch (hw->mac.type) { +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, +				(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & +				 ~IXGBE_DMATXCTL_TE)); +		break; +	default: +		break; +	} + +	if (!pci_channel_offline(adapter->pdev)) +		ixgbe_reset(adapter); + +	/* power down the optics for multispeed fiber and 82599 SFP+ fiber */ +	if (hw->mac.ops.disable_tx_laser && +	    ((hw->phy.multispeed_fiber) || +	     ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && +	      (hw->mac.type == ixgbe_mac_82599EB)))) +		hw->mac.ops.disable_tx_laser(hw); + +	ixgbe_clean_all_tx_rings(adapter); +	ixgbe_clean_all_rx_rings(adapter); + +#ifdef CONFIG_IXGBE_DCA +	/* since we reset the hardware DCA settings were cleared */ +	ixgbe_setup_dca(adapter); +#endif +} + +/** + * ixgbe_poll - NAPI Rx polling callback + * @napi: structure for representing this polling device + * @budget: how many packets driver is allowed to clean + * + * This function is used for legacy and MSI, NAPI mode + **/ +static int ixgbe_poll(struct napi_struct *napi, int budget) +{ +	struct ixgbe_q_vector *q_vector = +				container_of(napi, struct ixgbe_q_vector, napi); +	struct ixgbe_adapter *adapter = q_vector->adapter; +	int tx_clean_complete, work_done = 0; + +#ifdef CONFIG_IXGBE_DCA +	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) +		ixgbe_update_dca(q_vector); +#endif + +	tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]); +	ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget); + +	if (!tx_clean_complete) +		work_done = budget; + +	/* If budget not fully consumed, exit the polling mode */ +	if (work_done < budget) { +		napi_complete(napi); +		if (adapter->rx_itr_setting & 1) +			ixgbe_set_itr(q_vector); +		if (!test_bit(__IXGBE_DOWN, &adapter->state)) +			ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE); +	} +	return work_done; +} + +/** + * ixgbe_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void ixgbe_tx_timeout(struct net_device *netdev) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	/* Do the reset outside of interrupt context */ +	ixgbe_tx_timeout_reset(adapter); +} + +/** + * ixgbe_set_rss_queues: Allocate queues for RSS + * @adapter: board private structure to initialize + * + * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) +{ +	bool ret = false; +	struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS]; + +	if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { +		f->mask = 0xF; +		adapter->num_rx_queues = f->indices; +		adapter->num_tx_queues = f->indices; +		ret = true; +	} else { +		ret = false; +	} + +	return ret; +} + +/** + * ixgbe_set_fdir_queues: Allocate queues for Flow Director + * @adapter: board private structure to initialize + * + * Flow Director is an advanced Rx filter, attempting to get Rx flows back + * to the original CPU that initiated the Tx session.  This runs in addition + * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the + * Rx load across CPUs using RSS. + * + **/ +static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) +{ +	bool ret = false; +	struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; + +	f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices); +	f_fdir->mask = 0; + +	/* Flow Director must have RSS enabled */ +	if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && +	    (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { +		adapter->num_tx_queues = f_fdir->indices; +		adapter->num_rx_queues = f_fdir->indices; +		ret = true; +	} else { +		adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; +	} +	return ret; +} + +#ifdef IXGBE_FCOE +/** + * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) + * @adapter: board private structure to initialize + * + * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. + * The ring feature mask is not used as a mask for FCoE, as it can take any 8 + * rx queues out of the max number of rx queues, instead, it is used as the + * index of the first rx queue used by FCoE. + * + **/ +static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; + +	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) +		return false; + +	f->indices = min((int)num_online_cpus(), f->indices); + +	adapter->num_rx_queues = 1; +	adapter->num_tx_queues = 1; + +	if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { +		e_info(probe, "FCoE enabled with RSS\n"); +		if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) +			ixgbe_set_fdir_queues(adapter); +		else +			ixgbe_set_rss_queues(adapter); +	} + +	/* adding FCoE rx rings to the end */ +	f->mask = adapter->num_rx_queues; +	adapter->num_rx_queues += f->indices; +	adapter->num_tx_queues += f->indices; + +	return true; +} +#endif /* IXGBE_FCOE */ + +/* Artificial max queue cap per traffic class in DCB mode */ +#define DCB_QUEUE_CAP 8 + +#ifdef CONFIG_IXGBE_DCB +static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) +{ +	int per_tc_q, q, i, offset = 0; +	struct net_device *dev = adapter->netdev; +	int tcs = netdev_get_num_tc(dev); + +	if (!tcs) +		return false; + +	/* Map queue offset and counts onto allocated tx queues */ +	per_tc_q = min(dev->num_tx_queues / tcs, (unsigned int)DCB_QUEUE_CAP); +	q = min((int)num_online_cpus(), per_tc_q); + +	for (i = 0; i < tcs; i++) { +		netdev_set_prio_tc_map(dev, i, i); +		netdev_set_tc_queue(dev, i, q, offset); +		offset += q; +	} + +	adapter->num_tx_queues = q * tcs; +	adapter->num_rx_queues = q * tcs; + +#ifdef IXGBE_FCOE +	/* FCoE enabled queues require special configuration indexed +	 * by feature specific indices and mask. Here we map FCoE +	 * indices onto the DCB queue pairs allowing FCoE to own +	 * configuration later. +	 */ +	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { +		int tc; +		struct ixgbe_ring_feature *f = +					&adapter->ring_feature[RING_F_FCOE]; + +		tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up); +		f->indices = dev->tc_to_txq[tc].count; +		f->mask = dev->tc_to_txq[tc].offset; +	} +#endif + +	return true; +} +#endif + +/** + * ixgbe_set_sriov_queues: Allocate queues for IOV use + * @adapter: board private structure to initialize + * + * IOV doesn't actually use anything, so just NAK the + * request for now and let the other queue routines + * figure out what to do. + */ +static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) +{ +	return false; +} + +/* + * ixgbe_set_num_queues: Allocate queues for device, feature dependent + * @adapter: board private structure to initialize + * + * This is the top level queue allocation routine.  The order here is very + * important, starting with the "most" number of features turned on at once, + * and ending with the smallest set of features.  This way large combinations + * can be allocated if they're turned on, and smaller combinations are the + * fallthrough conditions. + * + **/ +static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) +{ +	/* Start with base case */ +	adapter->num_rx_queues = 1; +	adapter->num_tx_queues = 1; +	adapter->num_rx_pools = adapter->num_rx_queues; +	adapter->num_rx_queues_per_pool = 1; + +	if (ixgbe_set_sriov_queues(adapter)) +		goto done; + +#ifdef CONFIG_IXGBE_DCB +	if (ixgbe_set_dcb_queues(adapter)) +		goto done; + +#endif +#ifdef IXGBE_FCOE +	if (ixgbe_set_fcoe_queues(adapter)) +		goto done; + +#endif /* IXGBE_FCOE */ +	if (ixgbe_set_fdir_queues(adapter)) +		goto done; + +	if (ixgbe_set_rss_queues(adapter)) +		goto done; + +	/* fallback to base case */ +	adapter->num_rx_queues = 1; +	adapter->num_tx_queues = 1; + +done: +	/* Notify the stack of the (possibly) reduced queue counts. */ +	netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); +	return netif_set_real_num_rx_queues(adapter->netdev, +					    adapter->num_rx_queues); +} + +static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, +				       int vectors) +{ +	int err, vector_threshold; + +	/* We'll want at least 3 (vector_threshold): +	 * 1) TxQ[0] Cleanup +	 * 2) RxQ[0] Cleanup +	 * 3) Other (Link Status Change, etc.) +	 * 4) TCP Timer (optional) +	 */ +	vector_threshold = MIN_MSIX_COUNT; + +	/* The more we get, the more we will assign to Tx/Rx Cleanup +	 * for the separate queues...where Rx Cleanup >= Tx Cleanup. +	 * Right now, we simply care about how many we'll get; we'll +	 * set them up later while requesting irq's. +	 */ +	while (vectors >= vector_threshold) { +		err = pci_enable_msix(adapter->pdev, adapter->msix_entries, +				      vectors); +		if (!err) /* Success in acquiring all requested vectors. */ +			break; +		else if (err < 0) +			vectors = 0; /* Nasty failure, quit now */ +		else /* err == number of vectors we should try again with */ +			vectors = err; +	} + +	if (vectors < vector_threshold) { +		/* Can't allocate enough MSI-X interrupts?  Oh well. +		 * This just means we'll go with either a single MSI +		 * vector or fall back to legacy interrupts. +		 */ +		netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, +			     "Unable to allocate MSI-X interrupts\n"); +		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; +		kfree(adapter->msix_entries); +		adapter->msix_entries = NULL; +	} else { +		adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ +		/* +		 * Adjust for only the vectors we'll use, which is minimum +		 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of +		 * vectors we were allocated. +		 */ +		adapter->num_msix_vectors = min(vectors, +				   adapter->max_msix_q_vectors + NON_Q_VECTORS); +	} +} + +/** + * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for RSS to the assigned rings. + * + **/ +static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) +{ +	int i; + +	if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) +		return false; + +	for (i = 0; i < adapter->num_rx_queues; i++) +		adapter->rx_ring[i]->reg_idx = i; +	for (i = 0; i < adapter->num_tx_queues; i++) +		adapter->tx_ring[i]->reg_idx = i; + +	return true; +} + +#ifdef CONFIG_IXGBE_DCB + +/* ixgbe_get_first_reg_idx - Return first register index associated with ring */ +static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, +				    unsigned int *tx, unsigned int *rx) +{ +	struct net_device *dev = adapter->netdev; +	struct ixgbe_hw *hw = &adapter->hw; +	u8 num_tcs = netdev_get_num_tc(dev); + +	*tx = 0; +	*rx = 0; + +	switch (hw->mac.type) { +	case ixgbe_mac_82598EB: +		*tx = tc << 2; +		*rx = tc << 3; +		break; +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		if (num_tcs == 8) { +			if (tc < 3) { +				*tx = tc << 5; +				*rx = tc << 4; +			} else if (tc <  5) { +				*tx = ((tc + 2) << 4); +				*rx = tc << 4; +			} else if (tc < num_tcs) { +				*tx = ((tc + 8) << 3); +				*rx = tc << 4; +			} +		} else if (num_tcs == 4) { +			*rx =  tc << 5; +			switch (tc) { +			case 0: +				*tx =  0; +				break; +			case 1: +				*tx = 64; +				break; +			case 2: +				*tx = 96; +				break; +			case 3: +				*tx = 112; +				break; +			default: +				break; +			} +		} +		break; +	default: +		break; +	} +} + +/** + * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for DCB to the assigned rings. + * + **/ +static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) +{ +	struct net_device *dev = adapter->netdev; +	int i, j, k; +	u8 num_tcs = netdev_get_num_tc(dev); + +	if (!num_tcs) +		return false; + +	for (i = 0, k = 0; i < num_tcs; i++) { +		unsigned int tx_s, rx_s; +		u16 count = dev->tc_to_txq[i].count; + +		ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s); +		for (j = 0; j < count; j++, k++) { +			adapter->tx_ring[k]->reg_idx = tx_s + j; +			adapter->rx_ring[k]->reg_idx = rx_s + j; +			adapter->tx_ring[k]->dcb_tc = i; +			adapter->rx_ring[k]->dcb_tc = i; +		} +	} + +	return true; +} +#endif + +/** + * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for Flow Director to the assigned rings. + * + **/ +static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) +{ +	int i; +	bool ret = false; + +	if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && +	    (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { +		for (i = 0; i < adapter->num_rx_queues; i++) +			adapter->rx_ring[i]->reg_idx = i; +		for (i = 0; i < adapter->num_tx_queues; i++) +			adapter->tx_ring[i]->reg_idx = i; +		ret = true; +	} + +	return ret; +} + +#ifdef IXGBE_FCOE +/** + * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for FCoE mode to the assigned rings. + * + */ +static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; +	int i; +	u8 fcoe_rx_i = 0, fcoe_tx_i = 0; + +	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) +		return false; + +	if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { +		if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) +			ixgbe_cache_ring_fdir(adapter); +		else +			ixgbe_cache_ring_rss(adapter); + +		fcoe_rx_i = f->mask; +		fcoe_tx_i = f->mask; +	} +	for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { +		adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; +		adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; +	} +	return true; +} + +#endif /* IXGBE_FCOE */ +/** + * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov + * @adapter: board private structure to initialize + * + * SR-IOV doesn't use any descriptor rings but changes the default if + * no other mapping is used. + * + */ +static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) +{ +	adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2; +	adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2; +	if (adapter->num_vfs) +		return true; +	else +		return false; +} + +/** + * ixgbe_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + * + * Note, the order the various feature calls is important.  It must start with + * the "most" features enabled at the same time, then trickle down to the + * least amount of features turned on at once. + **/ +static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) +{ +	/* start with default case */ +	adapter->rx_ring[0]->reg_idx = 0; +	adapter->tx_ring[0]->reg_idx = 0; + +	if (ixgbe_cache_ring_sriov(adapter)) +		return; + +#ifdef CONFIG_IXGBE_DCB +	if (ixgbe_cache_ring_dcb(adapter)) +		return; +#endif + +#ifdef IXGBE_FCOE +	if (ixgbe_cache_ring_fcoe(adapter)) +		return; +#endif /* IXGBE_FCOE */ + +	if (ixgbe_cache_ring_fdir(adapter)) +		return; + +	if (ixgbe_cache_ring_rss(adapter)) +		return; +} + +/** + * ixgbe_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time.  The polling_netdev array is + * intended for Multiqueue, but should work fine with a single queue. + **/ +static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) +{ +	int rx = 0, tx = 0, nid = adapter->node; + +	if (nid < 0 || !node_online(nid)) +		nid = first_online_node; + +	for (; tx < adapter->num_tx_queues; tx++) { +		struct ixgbe_ring *ring; + +		ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid); +		if (!ring) +			ring = kzalloc(sizeof(*ring), GFP_KERNEL); +		if (!ring) +			goto err_allocation; +		ring->count = adapter->tx_ring_count; +		ring->queue_index = tx; +		ring->numa_node = nid; +		ring->dev = &adapter->pdev->dev; +		ring->netdev = adapter->netdev; + +		adapter->tx_ring[tx] = ring; +	} + +	for (; rx < adapter->num_rx_queues; rx++) { +		struct ixgbe_ring *ring; + +		ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid); +		if (!ring) +			ring = kzalloc(sizeof(*ring), GFP_KERNEL); +		if (!ring) +			goto err_allocation; +		ring->count = adapter->rx_ring_count; +		ring->queue_index = rx; +		ring->numa_node = nid; +		ring->dev = &adapter->pdev->dev; +		ring->netdev = adapter->netdev; + +		adapter->rx_ring[rx] = ring; +	} + +	ixgbe_cache_ring_register(adapter); + +	return 0; + +err_allocation: +	while (tx) +		kfree(adapter->tx_ring[--tx]); + +	while (rx) +		kfree(adapter->rx_ring[--rx]); +	return -ENOMEM; +} + +/** + * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	int err = 0; +	int vector, v_budget; + +	/* +	 * It's easy to be greedy for MSI-X vectors, but it really +	 * doesn't do us much good if we have a lot more vectors +	 * than CPU's.  So let's be conservative and only ask for +	 * (roughly) the same number of vectors as there are CPU's. +	 */ +	v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, +		       (int)num_online_cpus()) + NON_Q_VECTORS; + +	/* +	 * At the same time, hardware can only support a maximum of +	 * hw.mac->max_msix_vectors vectors.  With features +	 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx +	 * descriptor queues supported by our device.  Thus, we cap it off in +	 * those rare cases where the cpu count also exceeds our vector limit. +	 */ +	v_budget = min(v_budget, (int)hw->mac.max_msix_vectors); + +	/* A failure in MSI-X entry allocation isn't fatal, but it does +	 * mean we disable MSI-X capabilities of the adapter. */ +	adapter->msix_entries = kcalloc(v_budget, +					sizeof(struct msix_entry), GFP_KERNEL); +	if (adapter->msix_entries) { +		for (vector = 0; vector < v_budget; vector++) +			adapter->msix_entries[vector].entry = vector; + +		ixgbe_acquire_msix_vectors(adapter, v_budget); + +		if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) +			goto out; +	} + +	adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; +	adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; +	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { +		e_err(probe, +		      "ATR is not supported while multiple " +		      "queues are disabled.  Disabling Flow Director\n"); +	} +	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; +	adapter->atr_sample_rate = 0; +	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) +		ixgbe_disable_sriov(adapter); + +	err = ixgbe_set_num_queues(adapter); +	if (err) +		return err; + +	err = pci_enable_msi(adapter->pdev); +	if (!err) { +		adapter->flags |= IXGBE_FLAG_MSI_ENABLED; +	} else { +		netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, +			     "Unable to allocate MSI interrupt, " +			     "falling back to legacy.  Error: %d\n", err); +		/* reset err */ +		err = 0; +	} + +out: +	return err; +} + +/** + * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt.  If allocation fails we + * return -ENOMEM. + **/ +static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) +{ +	int q_idx, num_q_vectors; +	struct ixgbe_q_vector *q_vector; +	int (*poll)(struct napi_struct *, int); + +	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { +		num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; +		poll = &ixgbe_clean_rxtx_many; +	} else { +		num_q_vectors = 1; +		poll = &ixgbe_poll; +	} + +	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { +		q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector), +					GFP_KERNEL, adapter->node); +		if (!q_vector) +			q_vector = kzalloc(sizeof(struct ixgbe_q_vector), +					   GFP_KERNEL); +		if (!q_vector) +			goto err_out; +		q_vector->adapter = adapter; +		if (q_vector->tx.count && !q_vector->rx.count) +			q_vector->eitr = adapter->tx_eitr_param; +		else +			q_vector->eitr = adapter->rx_eitr_param; +		q_vector->v_idx = q_idx; +		netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64); +		adapter->q_vector[q_idx] = q_vector; +	} + +	return 0; + +err_out: +	while (q_idx) { +		q_idx--; +		q_vector = adapter->q_vector[q_idx]; +		netif_napi_del(&q_vector->napi); +		kfree(q_vector); +		adapter->q_vector[q_idx] = NULL; +	} +	return -ENOMEM; +} + +/** + * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors.  In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) +{ +	int q_idx, num_q_vectors; + +	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) +		num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; +	else +		num_q_vectors = 1; + +	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { +		struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx]; +		adapter->q_vector[q_idx] = NULL; +		netif_napi_del(&q_vector->napi); +		kfree(q_vector); +	} +} + +static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) +{ +	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { +		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; +		pci_disable_msix(adapter->pdev); +		kfree(adapter->msix_entries); +		adapter->msix_entries = NULL; +	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { +		adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; +		pci_disable_msi(adapter->pdev); +	} +} + +/** + * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme + * @adapter: board private structure to initialize + * + * We determine which interrupt scheme to use based on... + * - Kernel support (MSI, MSI-X) + *   - which can be user-defined (via MODULE_PARAM) + * - Hardware queue count (num_*_queues) + *   - defined by miscellaneous hardware support/features (RSS, etc.) + **/ +int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) +{ +	int err; + +	/* Number of supported queues */ +	err = ixgbe_set_num_queues(adapter); +	if (err) +		return err; + +	err = ixgbe_set_interrupt_capability(adapter); +	if (err) { +		e_dev_err("Unable to setup interrupt capabilities\n"); +		goto err_set_interrupt; +	} + +	err = ixgbe_alloc_q_vectors(adapter); +	if (err) { +		e_dev_err("Unable to allocate memory for queue vectors\n"); +		goto err_alloc_q_vectors; +	} + +	err = ixgbe_alloc_queues(adapter); +	if (err) { +		e_dev_err("Unable to allocate memory for queues\n"); +		goto err_alloc_queues; +	} + +	e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", +		   (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", +		   adapter->num_rx_queues, adapter->num_tx_queues); + +	set_bit(__IXGBE_DOWN, &adapter->state); + +	return 0; + +err_alloc_queues: +	ixgbe_free_q_vectors(adapter); +err_alloc_q_vectors: +	ixgbe_reset_interrupt_capability(adapter); +err_set_interrupt: +	return err; +} + +/** + * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) +{ +	int i; + +	for (i = 0; i < adapter->num_tx_queues; i++) { +		kfree(adapter->tx_ring[i]); +		adapter->tx_ring[i] = NULL; +	} +	for (i = 0; i < adapter->num_rx_queues; i++) { +		struct ixgbe_ring *ring = adapter->rx_ring[i]; + +		/* ixgbe_get_stats64() might access this ring, we must wait +		 * a grace period before freeing it. +		 */ +		kfree_rcu(ring, rcu); +		adapter->rx_ring[i] = NULL; +	} + +	adapter->num_tx_queues = 0; +	adapter->num_rx_queues = 0; + +	ixgbe_free_q_vectors(adapter); +	ixgbe_reset_interrupt_capability(adapter); +} + +/** + * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) + * @adapter: board private structure to initialize + * + * ixgbe_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	struct pci_dev *pdev = adapter->pdev; +	struct net_device *dev = adapter->netdev; +	unsigned int rss; +#ifdef CONFIG_IXGBE_DCB +	int j; +	struct tc_configuration *tc; +#endif +	int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; + +	/* PCI config space info */ + +	hw->vendor_id = pdev->vendor; +	hw->device_id = pdev->device; +	hw->revision_id = pdev->revision; +	hw->subsystem_vendor_id = pdev->subsystem_vendor; +	hw->subsystem_device_id = pdev->subsystem_device; + +	/* Set capability flags */ +	rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); +	adapter->ring_feature[RING_F_RSS].indices = rss; +	adapter->flags |= IXGBE_FLAG_RSS_ENABLED; +	switch (hw->mac.type) { +	case ixgbe_mac_82598EB: +		if (hw->device_id == IXGBE_DEV_ID_82598AT) +			adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; +		adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; +		break; +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; +		adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; +		adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; +		if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) +			adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; +		/* Flow Director hash filters enabled */ +		adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; +		adapter->atr_sample_rate = 20; +		adapter->ring_feature[RING_F_FDIR].indices = +							 IXGBE_MAX_FDIR_INDICES; +		adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; +#ifdef IXGBE_FCOE +		adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; +		adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; +		adapter->ring_feature[RING_F_FCOE].indices = 0; +#ifdef CONFIG_IXGBE_DCB +		/* Default traffic class to use for FCoE */ +		adapter->fcoe.up = IXGBE_FCOE_DEFTC; +#endif +#endif /* IXGBE_FCOE */ +		break; +	default: +		break; +	} + +	/* n-tuple support exists, always init our spinlock */ +	spin_lock_init(&adapter->fdir_perfect_lock); + +#ifdef CONFIG_IXGBE_DCB +	/* Configure DCB traffic classes */ +	for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { +		tc = &adapter->dcb_cfg.tc_config[j]; +		tc->path[DCB_TX_CONFIG].bwg_id = 0; +		tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); +		tc->path[DCB_RX_CONFIG].bwg_id = 0; +		tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); +		tc->dcb_pfc = pfc_disabled; +	} +	adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; +	adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; +	adapter->dcb_cfg.pfc_mode_enable = false; +	adapter->dcb_set_bitmap = 0x00; +	adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; +	ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, +			   MAX_TRAFFIC_CLASS); + +#endif + +	/* default flow control settings */ +	hw->fc.requested_mode = ixgbe_fc_full; +	hw->fc.current_mode = ixgbe_fc_full;	/* init for ethtool output */ +#ifdef CONFIG_DCB +	adapter->last_lfc_mode = hw->fc.current_mode; +#endif +	hw->fc.high_water = FC_HIGH_WATER(max_frame); +	hw->fc.low_water = FC_LOW_WATER(max_frame); +	hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; +	hw->fc.send_xon = true; +	hw->fc.disable_fc_autoneg = false; + +	/* enable itr by default in dynamic mode */ +	adapter->rx_itr_setting = 1; +	adapter->rx_eitr_param = 20000; +	adapter->tx_itr_setting = 1; +	adapter->tx_eitr_param = 10000; + +	/* set defaults for eitr in MegaBytes */ +	adapter->eitr_low = 10; +	adapter->eitr_high = 20; + +	/* set default ring sizes */ +	adapter->tx_ring_count = IXGBE_DEFAULT_TXD; +	adapter->rx_ring_count = IXGBE_DEFAULT_RXD; + +	/* set default work limits */ +	adapter->tx_work_limit = adapter->tx_ring_count; + +	/* initialize eeprom parameters */ +	if (ixgbe_init_eeprom_params_generic(hw)) { +		e_dev_err("EEPROM initialization failed\n"); +		return -EIO; +	} + +	/* enable rx csum by default */ +	adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; + +	/* get assigned NUMA node */ +	adapter->node = dev_to_node(&pdev->dev); + +	set_bit(__IXGBE_DOWN, &adapter->state); + +	return 0; +} + +/** + * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring:    tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) +{ +	struct device *dev = tx_ring->dev; +	int size; + +	size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; +	tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node); +	if (!tx_ring->tx_buffer_info) +		tx_ring->tx_buffer_info = vzalloc(size); +	if (!tx_ring->tx_buffer_info) +		goto err; + +	/* round up to nearest 4K */ +	tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); +	tx_ring->size = ALIGN(tx_ring->size, 4096); + +	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, +					   &tx_ring->dma, GFP_KERNEL); +	if (!tx_ring->desc) +		goto err; + +	tx_ring->next_to_use = 0; +	tx_ring->next_to_clean = 0; +	return 0; + +err: +	vfree(tx_ring->tx_buffer_info); +	tx_ring->tx_buffer_info = NULL; +	dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); +	return -ENOMEM; +} + +/** + * ixgbe_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not).  It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) +{ +	int i, err = 0; + +	for (i = 0; i < adapter->num_tx_queues; i++) { +		err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); +		if (!err) +			continue; +		e_err(probe, "Allocation for Tx Queue %u failed\n", i); +		break; +	} + +	return err; +} + +/** + * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring:    rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) +{ +	struct device *dev = rx_ring->dev; +	int size; + +	size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; +	rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node); +	if (!rx_ring->rx_buffer_info) +		rx_ring->rx_buffer_info = vzalloc(size); +	if (!rx_ring->rx_buffer_info) +		goto err; + +	/* Round up to nearest 4K */ +	rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); +	rx_ring->size = ALIGN(rx_ring->size, 4096); + +	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, +					   &rx_ring->dma, GFP_KERNEL); + +	if (!rx_ring->desc) +		goto err; + +	rx_ring->next_to_clean = 0; +	rx_ring->next_to_use = 0; + +	return 0; +err: +	vfree(rx_ring->rx_buffer_info); +	rx_ring->rx_buffer_info = NULL; +	dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); +	return -ENOMEM; +} + +/** + * ixgbe_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not).  It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) +{ +	int i, err = 0; + +	for (i = 0; i < adapter->num_rx_queues; i++) { +		err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); +		if (!err) +			continue; +		e_err(probe, "Allocation for Rx Queue %u failed\n", i); +		break; +	} + +	return err; +} + +/** + * ixgbe_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) +{ +	ixgbe_clean_tx_ring(tx_ring); + +	vfree(tx_ring->tx_buffer_info); +	tx_ring->tx_buffer_info = NULL; + +	/* if not set, then don't free */ +	if (!tx_ring->desc) +		return; + +	dma_free_coherent(tx_ring->dev, tx_ring->size, +			  tx_ring->desc, tx_ring->dma); + +	tx_ring->desc = NULL; +} + +/** + * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) +{ +	int i; + +	for (i = 0; i < adapter->num_tx_queues; i++) +		if (adapter->tx_ring[i]->desc) +			ixgbe_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * ixgbe_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) +{ +	ixgbe_clean_rx_ring(rx_ring); + +	vfree(rx_ring->rx_buffer_info); +	rx_ring->rx_buffer_info = NULL; + +	/* if not set, then don't free */ +	if (!rx_ring->desc) +		return; + +	dma_free_coherent(rx_ring->dev, rx_ring->size, +			  rx_ring->desc, rx_ring->dma); + +	rx_ring->desc = NULL; +} + +/** + * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) +{ +	int i; + +	for (i = 0; i < adapter->num_rx_queues; i++) +		if (adapter->rx_ring[i]->desc) +			ixgbe_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * ixgbe_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_hw *hw = &adapter->hw; +	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + +	/* MTU < 68 is an error and causes problems on some kernels */ +	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED && +	    hw->mac.type != ixgbe_mac_X540) { +		if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) +			return -EINVAL; +	} else { +		if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) +			return -EINVAL; +	} + +	e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); +	/* must set new MTU before calling down or up */ +	netdev->mtu = new_mtu; + +	hw->fc.high_water = FC_HIGH_WATER(max_frame); +	hw->fc.low_water = FC_LOW_WATER(max_frame); + +	if (netif_running(netdev)) +		ixgbe_reinit_locked(adapter); + +	return 0; +} + +/** + * ixgbe_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP).  At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int ixgbe_open(struct net_device *netdev) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	int err; + +	/* disallow open during test */ +	if (test_bit(__IXGBE_TESTING, &adapter->state)) +		return -EBUSY; + +	netif_carrier_off(netdev); + +	/* allocate transmit descriptors */ +	err = ixgbe_setup_all_tx_resources(adapter); +	if (err) +		goto err_setup_tx; + +	/* allocate receive descriptors */ +	err = ixgbe_setup_all_rx_resources(adapter); +	if (err) +		goto err_setup_rx; + +	ixgbe_configure(adapter); + +	err = ixgbe_request_irq(adapter); +	if (err) +		goto err_req_irq; + +	err = ixgbe_up_complete(adapter); +	if (err) +		goto err_up; + +	netif_tx_start_all_queues(netdev); + +	return 0; + +err_up: +	ixgbe_release_hw_control(adapter); +	ixgbe_free_irq(adapter); +err_req_irq: +err_setup_rx: +	ixgbe_free_all_rx_resources(adapter); +err_setup_tx: +	ixgbe_free_all_tx_resources(adapter); +	ixgbe_reset(adapter); + +	return err; +} + +/** + * ixgbe_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS.  The hardware is still under the drivers control, but + * needs to be disabled.  A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int ixgbe_close(struct net_device *netdev) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	ixgbe_down(adapter); +	ixgbe_free_irq(adapter); + +	ixgbe_fdir_filter_exit(adapter); + +	ixgbe_free_all_tx_resources(adapter); +	ixgbe_free_all_rx_resources(adapter); + +	ixgbe_release_hw_control(adapter); + +	return 0; +} + +#ifdef CONFIG_PM +static int ixgbe_resume(struct pci_dev *pdev) +{ +	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); +	struct net_device *netdev = adapter->netdev; +	u32 err; + +	pci_set_power_state(pdev, PCI_D0); +	pci_restore_state(pdev); +	/* +	 * pci_restore_state clears dev->state_saved so call +	 * pci_save_state to restore it. +	 */ +	pci_save_state(pdev); + +	err = pci_enable_device_mem(pdev); +	if (err) { +		e_dev_err("Cannot enable PCI device from suspend\n"); +		return err; +	} +	pci_set_master(pdev); + +	pci_wake_from_d3(pdev, false); + +	err = ixgbe_init_interrupt_scheme(adapter); +	if (err) { +		e_dev_err("Cannot initialize interrupts for device\n"); +		return err; +	} + +	ixgbe_reset(adapter); + +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); + +	if (netif_running(netdev)) { +		err = ixgbe_open(netdev); +		if (err) +			return err; +	} + +	netif_device_attach(netdev); + +	return 0; +} +#endif /* CONFIG_PM */ + +static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ +	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); +	struct net_device *netdev = adapter->netdev; +	struct ixgbe_hw *hw = &adapter->hw; +	u32 ctrl, fctrl; +	u32 wufc = adapter->wol; +#ifdef CONFIG_PM +	int retval = 0; +#endif + +	netif_device_detach(netdev); + +	if (netif_running(netdev)) { +		ixgbe_down(adapter); +		ixgbe_free_irq(adapter); +		ixgbe_free_all_tx_resources(adapter); +		ixgbe_free_all_rx_resources(adapter); +	} + +	ixgbe_clear_interrupt_scheme(adapter); +#ifdef CONFIG_DCB +	kfree(adapter->ixgbe_ieee_pfc); +	kfree(adapter->ixgbe_ieee_ets); +#endif + +#ifdef CONFIG_PM +	retval = pci_save_state(pdev); +	if (retval) +		return retval; + +#endif +	if (wufc) { +		ixgbe_set_rx_mode(netdev); + +		/* turn on all-multi mode if wake on multicast is enabled */ +		if (wufc & IXGBE_WUFC_MC) { +			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); +			fctrl |= IXGBE_FCTRL_MPE; +			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); +		} + +		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); +		ctrl |= IXGBE_CTRL_GIO_DIS; +		IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + +		IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc); +	} else { +		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); +		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); +	} + +	switch (hw->mac.type) { +	case ixgbe_mac_82598EB: +		pci_wake_from_d3(pdev, false); +		break; +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		pci_wake_from_d3(pdev, !!wufc); +		break; +	default: +		break; +	} + +	*enable_wake = !!wufc; + +	ixgbe_release_hw_control(adapter); + +	pci_disable_device(pdev); + +	return 0; +} + +#ifdef CONFIG_PM +static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) +{ +	int retval; +	bool wake; + +	retval = __ixgbe_shutdown(pdev, &wake); +	if (retval) +		return retval; + +	if (wake) { +		pci_prepare_to_sleep(pdev); +	} else { +		pci_wake_from_d3(pdev, false); +		pci_set_power_state(pdev, PCI_D3hot); +	} + +	return 0; +} +#endif /* CONFIG_PM */ + +static void ixgbe_shutdown(struct pci_dev *pdev) +{ +	bool wake; + +	__ixgbe_shutdown(pdev, &wake); + +	if (system_state == SYSTEM_POWER_OFF) { +		pci_wake_from_d3(pdev, wake); +		pci_set_power_state(pdev, PCI_D3hot); +	} +} + +/** + * ixgbe_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ +void ixgbe_update_stats(struct ixgbe_adapter *adapter) +{ +	struct net_device *netdev = adapter->netdev; +	struct ixgbe_hw *hw = &adapter->hw; +	struct ixgbe_hw_stats *hwstats = &adapter->stats; +	u64 total_mpc = 0; +	u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; +	u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; +	u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; +	u64 bytes = 0, packets = 0; + +	if (test_bit(__IXGBE_DOWN, &adapter->state) || +	    test_bit(__IXGBE_RESETTING, &adapter->state)) +		return; + +	if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { +		u64 rsc_count = 0; +		u64 rsc_flush = 0; +		for (i = 0; i < 16; i++) +			adapter->hw_rx_no_dma_resources += +				IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); +		for (i = 0; i < adapter->num_rx_queues; i++) { +			rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; +			rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; +		} +		adapter->rsc_total_count = rsc_count; +		adapter->rsc_total_flush = rsc_flush; +	} + +	for (i = 0; i < adapter->num_rx_queues; i++) { +		struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; +		non_eop_descs += rx_ring->rx_stats.non_eop_descs; +		alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; +		alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; +		bytes += rx_ring->stats.bytes; +		packets += rx_ring->stats.packets; +	} +	adapter->non_eop_descs = non_eop_descs; +	adapter->alloc_rx_page_failed = alloc_rx_page_failed; +	adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; +	netdev->stats.rx_bytes = bytes; +	netdev->stats.rx_packets = packets; + +	bytes = 0; +	packets = 0; +	/* gather some stats to the adapter struct that are per queue */ +	for (i = 0; i < adapter->num_tx_queues; i++) { +		struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; +		restart_queue += tx_ring->tx_stats.restart_queue; +		tx_busy += tx_ring->tx_stats.tx_busy; +		bytes += tx_ring->stats.bytes; +		packets += tx_ring->stats.packets; +	} +	adapter->restart_queue = restart_queue; +	adapter->tx_busy = tx_busy; +	netdev->stats.tx_bytes = bytes; +	netdev->stats.tx_packets = packets; + +	hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); +	for (i = 0; i < 8; i++) { +		/* for packet buffers not used, the register should read 0 */ +		mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i)); +		missed_rx += mpc; +		hwstats->mpc[i] += mpc; +		total_mpc += hwstats->mpc[i]; +		if (hw->mac.type == ixgbe_mac_82598EB) +			hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); +		hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); +		hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); +		hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); +		hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); +		switch (hw->mac.type) { +		case ixgbe_mac_82598EB: +			hwstats->pxonrxc[i] += +				IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); +			break; +		case ixgbe_mac_82599EB: +		case ixgbe_mac_X540: +			hwstats->pxonrxc[i] += +				IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); +			break; +		default: +			break; +		} +		hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); +		hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); +	} +	hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); +	/* work around hardware counting issue */ +	hwstats->gprc -= missed_rx; + +	ixgbe_update_xoff_received(adapter); + +	/* 82598 hardware only has a 32 bit counter in the high register */ +	switch (hw->mac.type) { +	case ixgbe_mac_82598EB: +		hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); +		hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); +		hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); +		hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); +		break; +	case ixgbe_mac_X540: +		/* OS2BMC stats are X540 only*/ +		hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); +		hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); +		hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); +		hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); +	case ixgbe_mac_82599EB: +		hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); +		IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ +		hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); +		IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ +		hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); +		IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ +		hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); +		hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); +		hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); +#ifdef IXGBE_FCOE +		hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); +		hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); +		hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); +		hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); +		hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); +		hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); +#endif /* IXGBE_FCOE */ +		break; +	default: +		break; +	} +	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); +	hwstats->bprc += bprc; +	hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); +	if (hw->mac.type == ixgbe_mac_82598EB) +		hwstats->mprc -= bprc; +	hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); +	hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); +	hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); +	hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); +	hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); +	hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); +	hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); +	hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); +	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); +	hwstats->lxontxc += lxon; +	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); +	hwstats->lxofftxc += lxoff; +	hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); +	hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); +	hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); +	/* +	 * 82598 errata - tx of flow control packets is included in tx counters +	 */ +	xon_off_tot = lxon + lxoff; +	hwstats->gptc -= xon_off_tot; +	hwstats->mptc -= xon_off_tot; +	hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); +	hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); +	hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); +	hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); +	hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); +	hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); +	hwstats->ptc64 -= xon_off_tot; +	hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); +	hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); +	hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); +	hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); +	hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); +	hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); + +	/* Fill out the OS statistics structure */ +	netdev->stats.multicast = hwstats->mprc; + +	/* Rx Errors */ +	netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec; +	netdev->stats.rx_dropped = 0; +	netdev->stats.rx_length_errors = hwstats->rlec; +	netdev->stats.rx_crc_errors = hwstats->crcerrs; +	netdev->stats.rx_missed_errors = total_mpc; +} + +/** + * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table + * @adapter - pointer to the device adapter structure + **/ +static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	int i; + +	if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) +		return; + +	adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; + +	/* if interface is down do nothing */ +	if (test_bit(__IXGBE_DOWN, &adapter->state)) +		return; + +	/* do nothing if we are not using signature filters */ +	if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) +		return; + +	adapter->fdir_overflow++; + +	if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { +		for (i = 0; i < adapter->num_tx_queues; i++) +			set_bit(__IXGBE_TX_FDIR_INIT_DONE, +			        &(adapter->tx_ring[i]->state)); +		/* re-enable flow director interrupts */ +		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); +	} else { +		e_err(probe, "failed to finish FDIR re-initialization, " +		      "ignored adding FDIR ATR filters\n"); +	} +} + +/** + * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts + * @adapter - pointer to the device adapter structure + * + * This function serves two purposes.  First it strobes the interrupt lines + * in order to make certain interrupts are occuring.  Secondly it sets the + * bits needed to check for TX hangs.  As a result we should immediately + * determine if a hang has occured. + */ +static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u64 eics = 0; +	int i; + +	/* If we're down or resetting, just bail */ +	if (test_bit(__IXGBE_DOWN, &adapter->state) || +	    test_bit(__IXGBE_RESETTING, &adapter->state)) +		return; + +	/* Force detection of hung controller */ +	if (netif_carrier_ok(adapter->netdev)) { +		for (i = 0; i < adapter->num_tx_queues; i++) +			set_check_for_tx_hang(adapter->tx_ring[i]); +	} + +	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { +		/* +		 * for legacy and MSI interrupts don't set any bits +		 * that are enabled for EIAM, because this operation +		 * would set *both* EIMS and EICS for any bit in EIAM +		 */ +		IXGBE_WRITE_REG(hw, IXGBE_EICS, +			(IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); +	} else { +		/* get one bit for every active tx/rx interrupt vector */ +		for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { +			struct ixgbe_q_vector *qv = adapter->q_vector[i]; +			if (qv->rx.count || qv->tx.count) +				eics |= ((u64)1 << i); +		} +	} + +	/* Cause software interrupt to ensure rings are cleaned */ +	ixgbe_irq_rearm_queues(adapter, eics); + +} + +/** + * ixgbe_watchdog_update_link - update the link status + * @adapter - pointer to the device adapter structure + * @link_speed - pointer to a u32 to store the link_speed + **/ +static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u32 link_speed = adapter->link_speed; +	bool link_up = adapter->link_up; +	int i; + +	if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) +		return; + +	if (hw->mac.ops.check_link) { +		hw->mac.ops.check_link(hw, &link_speed, &link_up, false); +	} else { +		/* always assume link is up, if no check link function */ +		link_speed = IXGBE_LINK_SPEED_10GB_FULL; +		link_up = true; +	} +	if (link_up) { +		if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { +			for (i = 0; i < MAX_TRAFFIC_CLASS; i++) +				hw->mac.ops.fc_enable(hw, i); +		} else { +			hw->mac.ops.fc_enable(hw, 0); +		} +	} + +	if (link_up || +	    time_after(jiffies, (adapter->link_check_timeout + +				 IXGBE_TRY_LINK_TIMEOUT))) { +		adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; +		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); +		IXGBE_WRITE_FLUSH(hw); +	} + +	adapter->link_up = link_up; +	adapter->link_speed = link_speed; +} + +/** + * ixgbe_watchdog_link_is_up - update netif_carrier status and + *                             print link up message + * @adapter - pointer to the device adapter structure + **/ +static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) +{ +	struct net_device *netdev = adapter->netdev; +	struct ixgbe_hw *hw = &adapter->hw; +	u32 link_speed = adapter->link_speed; +	bool flow_rx, flow_tx; + +	/* only continue if link was previously down */ +	if (netif_carrier_ok(netdev)) +		return; + +	adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; + +	switch (hw->mac.type) { +	case ixgbe_mac_82598EB: { +		u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); +		u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); +		flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); +		flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); +	} +		break; +	case ixgbe_mac_X540: +	case ixgbe_mac_82599EB: { +		u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); +		u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); +		flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); +		flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); +	} +		break; +	default: +		flow_tx = false; +		flow_rx = false; +		break; +	} +	e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", +	       (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? +	       "10 Gbps" : +	       (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? +	       "1 Gbps" : +	       (link_speed == IXGBE_LINK_SPEED_100_FULL ? +	       "100 Mbps" : +	       "unknown speed"))), +	       ((flow_rx && flow_tx) ? "RX/TX" : +	       (flow_rx ? "RX" : +	       (flow_tx ? "TX" : "None")))); + +	netif_carrier_on(netdev); +	ixgbe_check_vf_rate_limit(adapter); +} + +/** + * ixgbe_watchdog_link_is_down - update netif_carrier status and + *                               print link down message + * @adapter - pointer to the adapter structure + **/ +static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter* adapter) +{ +	struct net_device *netdev = adapter->netdev; +	struct ixgbe_hw *hw = &adapter->hw; + +	adapter->link_up = false; +	adapter->link_speed = 0; + +	/* only continue if link was up previously */ +	if (!netif_carrier_ok(netdev)) +		return; + +	/* poll for SFP+ cable when link is down */ +	if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) +		adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; + +	e_info(drv, "NIC Link is Down\n"); +	netif_carrier_off(netdev); +} + +/** + * ixgbe_watchdog_flush_tx - flush queues on link down + * @adapter - pointer to the device adapter structure + **/ +static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) +{ +	int i; +	int some_tx_pending = 0; + +	if (!netif_carrier_ok(adapter->netdev)) { +		for (i = 0; i < adapter->num_tx_queues; i++) { +			struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; +			if (tx_ring->next_to_use != tx_ring->next_to_clean) { +				some_tx_pending = 1; +				break; +			} +		} + +		if (some_tx_pending) { +			/* We've lost link, so the controller stops DMA, +			 * but we've got queued Tx work that's never going +			 * to get done, so reset controller to flush Tx. +			 * (Do the reset outside of interrupt context). +			 */ +			adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; +		} +	} +} + +static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) +{ +	u32 ssvpc; + +	/* Do not perform spoof check for 82598 */ +	if (adapter->hw.mac.type == ixgbe_mac_82598EB) +		return; + +	ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC); + +	/* +	 * ssvpc register is cleared on read, if zero then no +	 * spoofed packets in the last interval. +	 */ +	if (!ssvpc) +		return; + +	e_warn(drv, "%d Spoofed packets detected\n", ssvpc); +} + +/** + * ixgbe_watchdog_subtask - check and bring link up + * @adapter - pointer to the device adapter structure + **/ +static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) +{ +	/* if interface is down do nothing */ +	if (test_bit(__IXGBE_DOWN, &adapter->state)) +		return; + +	ixgbe_watchdog_update_link(adapter); + +	if (adapter->link_up) +		ixgbe_watchdog_link_is_up(adapter); +	else +		ixgbe_watchdog_link_is_down(adapter); + +	ixgbe_spoof_check(adapter); +	ixgbe_update_stats(adapter); + +	ixgbe_watchdog_flush_tx(adapter); +} + +/** + * ixgbe_sfp_detection_subtask - poll for SFP+ cable + * @adapter - the ixgbe adapter structure + **/ +static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	s32 err; + +	/* not searching for SFP so there is nothing to do here */ +	if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && +	    !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) +		return; + +	/* someone else is in init, wait until next service event */ +	if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) +		return; + +	err = hw->phy.ops.identify_sfp(hw); +	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) +		goto sfp_out; + +	if (err == IXGBE_ERR_SFP_NOT_PRESENT) { +		/* If no cable is present, then we need to reset +		 * the next time we find a good cable. */ +		adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; +	} + +	/* exit on error */ +	if (err) +		goto sfp_out; + +	/* exit if reset not needed */ +	if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) +		goto sfp_out; + +	adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET; + +	/* +	 * A module may be identified correctly, but the EEPROM may not have +	 * support for that module.  setup_sfp() will fail in that case, so +	 * we should not allow that module to load. +	 */ +	if (hw->mac.type == ixgbe_mac_82598EB) +		err = hw->phy.ops.reset(hw); +	else +		err = hw->mac.ops.setup_sfp(hw); + +	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) +		goto sfp_out; + +	adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; +	e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); + +sfp_out: +	clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); + +	if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) && +	    (adapter->netdev->reg_state == NETREG_REGISTERED)) { +		e_dev_err("failed to initialize because an unsupported " +			  "SFP+ module type was detected.\n"); +		e_dev_err("Reload the driver after installing a " +			  "supported module.\n"); +		unregister_netdev(adapter->netdev); +	} +} + +/** + * ixgbe_sfp_link_config_subtask - set up link SFP after module install + * @adapter - the ixgbe adapter structure + **/ +static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u32 autoneg; +	bool negotiation; + +	if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG)) +		return; + +	/* someone else is in init, wait until next service event */ +	if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) +		return; + +	adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; + +	autoneg = hw->phy.autoneg_advertised; +	if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) +		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); +	hw->mac.autotry_restart = false; +	if (hw->mac.ops.setup_link) +		hw->mac.ops.setup_link(hw, autoneg, negotiation, true); + +	adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; +	adapter->link_check_timeout = jiffies; +	clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); +} + +/** + * ixgbe_service_timer - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void ixgbe_service_timer(unsigned long data) +{ +	struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; +	unsigned long next_event_offset; + +	/* poll faster when waiting for link */ +	if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) +		next_event_offset = HZ / 10; +	else +		next_event_offset = HZ * 2; + +	/* Reset the timer */ +	mod_timer(&adapter->service_timer, next_event_offset + jiffies); + +	ixgbe_service_event_schedule(adapter); +} + +static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) +{ +	if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED)) +		return; + +	adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED; + +	/* If we're already down or resetting, just bail */ +	if (test_bit(__IXGBE_DOWN, &adapter->state) || +	    test_bit(__IXGBE_RESETTING, &adapter->state)) +		return; + +	ixgbe_dump(adapter); +	netdev_err(adapter->netdev, "Reset adapter\n"); +	adapter->tx_timeout_count++; + +	ixgbe_reinit_locked(adapter); +} + +/** + * ixgbe_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +static void ixgbe_service_task(struct work_struct *work) +{ +	struct ixgbe_adapter *adapter = container_of(work, +						     struct ixgbe_adapter, +						     service_task); + +	ixgbe_reset_subtask(adapter); +	ixgbe_sfp_detection_subtask(adapter); +	ixgbe_sfp_link_config_subtask(adapter); +	ixgbe_check_overtemp_subtask(adapter); +	ixgbe_watchdog_subtask(adapter); +	ixgbe_fdir_reinit_subtask(adapter); +	ixgbe_check_hang_subtask(adapter); + +	ixgbe_service_event_complete(adapter); +} + +void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, +		       u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) +{ +	struct ixgbe_adv_tx_context_desc *context_desc; +	u16 i = tx_ring->next_to_use; + +	context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i); + +	i++; +	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + +	/* set bits to identify this as an advanced context descriptor */ +	type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; + +	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens); +	context_desc->seqnum_seed	= cpu_to_le32(fcoe_sof_eof); +	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd); +	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx); +} + +static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, +		     u32 tx_flags, __be16 protocol, u8 *hdr_len) +{ +	int err; +	u32 vlan_macip_lens, type_tucmd; +	u32 mss_l4len_idx, l4len; + +	if (!skb_is_gso(skb)) +		return 0; + +	if (skb_header_cloned(skb)) { +		err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); +		if (err) +			return err; +	} + +	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ +	type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; + +	if (protocol == __constant_htons(ETH_P_IP)) { +		struct iphdr *iph = ip_hdr(skb); +		iph->tot_len = 0; +		iph->check = 0; +		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, +							 iph->daddr, 0, +							 IPPROTO_TCP, +							 0); +		type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; +	} else if (skb_is_gso_v6(skb)) { +		ipv6_hdr(skb)->payload_len = 0; +		tcp_hdr(skb)->check = +		    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, +				     &ipv6_hdr(skb)->daddr, +				     0, IPPROTO_TCP, 0); +	} + +	l4len = tcp_hdrlen(skb); +	*hdr_len = skb_transport_offset(skb) + l4len; + +	/* mss_l4len_id: use 1 as index for TSO */ +	mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; +	mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; +	mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; + +	/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ +	vlan_macip_lens = skb_network_header_len(skb); +	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; +	vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + +	ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, +	                  mss_l4len_idx); + +	return 1; +} + +static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, +			  struct sk_buff *skb, u32 tx_flags, +			  __be16 protocol) +{ +	u32 vlan_macip_lens = 0; +	u32 mss_l4len_idx = 0; +	u32 type_tucmd = 0; + +	if (skb->ip_summed != CHECKSUM_PARTIAL) { +	    if (!(tx_flags & IXGBE_TX_FLAGS_VLAN)) +			return false; +	} else { +		u8 l4_hdr = 0; +		switch (protocol) { +		case __constant_htons(ETH_P_IP): +			vlan_macip_lens |= skb_network_header_len(skb); +			type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; +			l4_hdr = ip_hdr(skb)->protocol; +			break; +		case __constant_htons(ETH_P_IPV6): +			vlan_macip_lens |= skb_network_header_len(skb); +			l4_hdr = ipv6_hdr(skb)->nexthdr; +			break; +		default: +			if (unlikely(net_ratelimit())) { +				dev_warn(tx_ring->dev, +				 "partial checksum but proto=%x!\n", +				 skb->protocol); +			} +			break; +		} + +		switch (l4_hdr) { +		case IPPROTO_TCP: +			type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; +			mss_l4len_idx = tcp_hdrlen(skb) << +					IXGBE_ADVTXD_L4LEN_SHIFT; +			break; +		case IPPROTO_SCTP: +			type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; +			mss_l4len_idx = sizeof(struct sctphdr) << +					IXGBE_ADVTXD_L4LEN_SHIFT; +			break; +		case IPPROTO_UDP: +			mss_l4len_idx = sizeof(struct udphdr) << +					IXGBE_ADVTXD_L4LEN_SHIFT; +			break; +		default: +			if (unlikely(net_ratelimit())) { +				dev_warn(tx_ring->dev, +				 "partial checksum but l4 proto=%x!\n", +				 skb->protocol); +			} +			break; +		} +	} + +	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; +	vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + +	ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, +			  type_tucmd, mss_l4len_idx); + +	return (skb->ip_summed == CHECKSUM_PARTIAL); +} + +static int ixgbe_tx_map(struct ixgbe_adapter *adapter, +			struct ixgbe_ring *tx_ring, +			struct sk_buff *skb, u32 tx_flags, +			unsigned int first, const u8 hdr_len) +{ +	struct device *dev = tx_ring->dev; +	struct ixgbe_tx_buffer *tx_buffer_info; +	unsigned int len; +	unsigned int total = skb->len; +	unsigned int offset = 0, size, count = 0; +	unsigned int nr_frags = skb_shinfo(skb)->nr_frags; +	unsigned int f; +	unsigned int bytecount = skb->len; +	u16 gso_segs = 1; +	u16 i; + +	i = tx_ring->next_to_use; + +	if (tx_flags & IXGBE_TX_FLAGS_FCOE) +		/* excluding fcoe_crc_eof for FCoE */ +		total -= sizeof(struct fcoe_crc_eof); + +	len = min(skb_headlen(skb), total); +	while (len) { +		tx_buffer_info = &tx_ring->tx_buffer_info[i]; +		size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); + +		tx_buffer_info->length = size; +		tx_buffer_info->mapped_as_page = false; +		tx_buffer_info->dma = dma_map_single(dev, +						     skb->data + offset, +						     size, DMA_TO_DEVICE); +		if (dma_mapping_error(dev, tx_buffer_info->dma)) +			goto dma_error; +		tx_buffer_info->time_stamp = jiffies; +		tx_buffer_info->next_to_watch = i; + +		len -= size; +		total -= size; +		offset += size; +		count++; + +		if (len) { +			i++; +			if (i == tx_ring->count) +				i = 0; +		} +	} + +	for (f = 0; f < nr_frags; f++) { +		struct skb_frag_struct *frag; + +		frag = &skb_shinfo(skb)->frags[f]; +		len = min((unsigned int)frag->size, total); +		offset = frag->page_offset; + +		while (len) { +			i++; +			if (i == tx_ring->count) +				i = 0; + +			tx_buffer_info = &tx_ring->tx_buffer_info[i]; +			size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); + +			tx_buffer_info->length = size; +			tx_buffer_info->dma = dma_map_page(dev, +							   frag->page, +							   offset, size, +							   DMA_TO_DEVICE); +			tx_buffer_info->mapped_as_page = true; +			if (dma_mapping_error(dev, tx_buffer_info->dma)) +				goto dma_error; +			tx_buffer_info->time_stamp = jiffies; +			tx_buffer_info->next_to_watch = i; + +			len -= size; +			total -= size; +			offset += size; +			count++; +		} +		if (total == 0) +			break; +	} + +	if (tx_flags & IXGBE_TX_FLAGS_TSO) +		gso_segs = skb_shinfo(skb)->gso_segs; +#ifdef IXGBE_FCOE +	/* adjust for FCoE Sequence Offload */ +	else if (tx_flags & IXGBE_TX_FLAGS_FSO) +		gso_segs = DIV_ROUND_UP(skb->len - hdr_len, +					skb_shinfo(skb)->gso_size); +#endif /* IXGBE_FCOE */ +	bytecount += (gso_segs - 1) * hdr_len; + +	/* multiply data chunks by size of headers */ +	tx_ring->tx_buffer_info[i].bytecount = bytecount; +	tx_ring->tx_buffer_info[i].gso_segs = gso_segs; +	tx_ring->tx_buffer_info[i].skb = skb; +	tx_ring->tx_buffer_info[first].next_to_watch = i; + +	return count; + +dma_error: +	e_dev_err("TX DMA map failed\n"); + +	/* clear timestamp and dma mappings for failed tx_buffer_info map */ +	tx_buffer_info->dma = 0; +	tx_buffer_info->time_stamp = 0; +	tx_buffer_info->next_to_watch = 0; +	if (count) +		count--; + +	/* clear timestamp and dma mappings for remaining portion of packet */ +	while (count--) { +		if (i == 0) +			i += tx_ring->count; +		i--; +		tx_buffer_info = &tx_ring->tx_buffer_info[i]; +		ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); +	} + +	return 0; +} + +static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring, +			   int tx_flags, int count, u32 paylen, u8 hdr_len) +{ +	union ixgbe_adv_tx_desc *tx_desc = NULL; +	struct ixgbe_tx_buffer *tx_buffer_info; +	u32 olinfo_status = 0, cmd_type_len = 0; +	unsigned int i; +	u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; + +	cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; + +	cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; + +	if (tx_flags & IXGBE_TX_FLAGS_VLAN) +		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; + +	if (tx_flags & IXGBE_TX_FLAGS_TSO) { +		cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; + +		olinfo_status |= IXGBE_TXD_POPTS_TXSM << +				 IXGBE_ADVTXD_POPTS_SHIFT; + +		/* use index 1 context for tso */ +		olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); +		if (tx_flags & IXGBE_TX_FLAGS_IPV4) +			olinfo_status |= IXGBE_TXD_POPTS_IXSM << +					 IXGBE_ADVTXD_POPTS_SHIFT; + +	} else if (tx_flags & IXGBE_TX_FLAGS_CSUM) +		olinfo_status |= IXGBE_TXD_POPTS_TXSM << +				 IXGBE_ADVTXD_POPTS_SHIFT; + +	if (tx_flags & IXGBE_TX_FLAGS_FCOE) { +		olinfo_status |= IXGBE_ADVTXD_CC; +		olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); +		if (tx_flags & IXGBE_TX_FLAGS_FSO) +			cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; +	} + +	olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); + +	i = tx_ring->next_to_use; +	while (count--) { +		tx_buffer_info = &tx_ring->tx_buffer_info[i]; +		tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); +		tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); +		tx_desc->read.cmd_type_len = +			cpu_to_le32(cmd_type_len | tx_buffer_info->length); +		tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +		i++; +		if (i == tx_ring->count) +			i = 0; +	} + +	tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); + +	/* +	 * Force memory writes to complete before letting h/w +	 * know there are new descriptors to fetch.  (Only +	 * applicable for weak-ordered memory model archs, +	 * such as IA-64). +	 */ +	wmb(); + +	tx_ring->next_to_use = i; +	writel(i, tx_ring->tail); +} + +static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb, +		      u32 tx_flags, __be16 protocol) +{ +	struct ixgbe_q_vector *q_vector = ring->q_vector; +	union ixgbe_atr_hash_dword input = { .dword = 0 }; +	union ixgbe_atr_hash_dword common = { .dword = 0 }; +	union { +		unsigned char *network; +		struct iphdr *ipv4; +		struct ipv6hdr *ipv6; +	} hdr; +	struct tcphdr *th; +	__be16 vlan_id; + +	/* if ring doesn't have a interrupt vector, cannot perform ATR */ +	if (!q_vector) +		return; + +	/* do nothing if sampling is disabled */ +	if (!ring->atr_sample_rate) +		return; + +	ring->atr_count++; + +	/* snag network header to get L4 type and address */ +	hdr.network = skb_network_header(skb); + +	/* Currently only IPv4/IPv6 with TCP is supported */ +	if ((protocol != __constant_htons(ETH_P_IPV6) || +	     hdr.ipv6->nexthdr != IPPROTO_TCP) && +	    (protocol != __constant_htons(ETH_P_IP) || +	     hdr.ipv4->protocol != IPPROTO_TCP)) +		return; + +	th = tcp_hdr(skb); + +	/* skip this packet since the socket is closing */ +	if (th->fin) +		return; + +	/* sample on all syn packets or once every atr sample count */ +	if (!th->syn && (ring->atr_count < ring->atr_sample_rate)) +		return; + +	/* reset sample count */ +	ring->atr_count = 0; + +	vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); + +	/* +	 * src and dst are inverted, think how the receiver sees them +	 * +	 * The input is broken into two sections, a non-compressed section +	 * containing vm_pool, vlan_id, and flow_type.  The rest of the data +	 * is XORed together and stored in the compressed dword. +	 */ +	input.formatted.vlan_id = vlan_id; + +	/* +	 * since src port and flex bytes occupy the same word XOR them together +	 * and write the value to source port portion of compressed dword +	 */ +	if (vlan_id) +		common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q); +	else +		common.port.src ^= th->dest ^ protocol; +	common.port.dst ^= th->source; + +	if (protocol == __constant_htons(ETH_P_IP)) { +		input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; +		common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; +	} else { +		input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; +		common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ +			     hdr.ipv6->saddr.s6_addr32[1] ^ +			     hdr.ipv6->saddr.s6_addr32[2] ^ +			     hdr.ipv6->saddr.s6_addr32[3] ^ +			     hdr.ipv6->daddr.s6_addr32[0] ^ +			     hdr.ipv6->daddr.s6_addr32[1] ^ +			     hdr.ipv6->daddr.s6_addr32[2] ^ +			     hdr.ipv6->daddr.s6_addr32[3]; +	} + +	/* This assumes the Rx queue and Tx queue are bound to the same CPU */ +	ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, +					      input, common, ring->queue_index); +} + +static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) +{ +	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); +	/* Herbert's original patch had: +	 *  smp_mb__after_netif_stop_queue(); +	 * but since that doesn't exist yet, just open code it. */ +	smp_mb(); + +	/* We need to check again in a case another CPU has just +	 * made room available. */ +	if (likely(ixgbe_desc_unused(tx_ring) < size)) +		return -EBUSY; + +	/* A reprieve! - use start_queue because it doesn't call schedule */ +	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); +	++tx_ring->tx_stats.restart_queue; +	return 0; +} + +static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) +{ +	if (likely(ixgbe_desc_unused(tx_ring) >= size)) +		return 0; +	return __ixgbe_maybe_stop_tx(tx_ring, size); +} + +static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) +{ +	struct ixgbe_adapter *adapter = netdev_priv(dev); +	int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : +					       smp_processor_id(); +#ifdef IXGBE_FCOE +	__be16 protocol = vlan_get_protocol(skb); + +	if (((protocol == htons(ETH_P_FCOE)) || +	    (protocol == htons(ETH_P_FIP))) && +	    (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { +		txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); +		txq += adapter->ring_feature[RING_F_FCOE].mask; +		return txq; +	} +#endif + +	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { +		while (unlikely(txq >= dev->real_num_tx_queues)) +			txq -= dev->real_num_tx_queues; +		return txq; +	} + +	return skb_tx_hash(dev, skb); +} + +netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, +			  struct ixgbe_adapter *adapter, +			  struct ixgbe_ring *tx_ring) +{ +	int tso; +	u32  tx_flags = 0; +#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD +	unsigned short f; +#endif +	u16 first; +	u16 count = TXD_USE_COUNT(skb_headlen(skb)); +	__be16 protocol; +	u8 hdr_len = 0; + +	/* +	 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, +	 *       + 1 desc for skb_head_len/IXGBE_MAX_DATA_PER_TXD, +	 *       + 2 desc gap to keep tail from touching head, +	 *       + 1 desc for context descriptor, +	 * otherwise try next time +	 */ +#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD +	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) +		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); +#else +	count += skb_shinfo(skb)->nr_frags; +#endif +	if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { +		tx_ring->tx_stats.tx_busy++; +		return NETDEV_TX_BUSY; +	} + +	protocol = vlan_get_protocol(skb); + +	if (vlan_tx_tag_present(skb)) { +		tx_flags |= vlan_tx_tag_get(skb); +		if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { +			tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; +			tx_flags |= tx_ring->dcb_tc << 13; +		} +		tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; +		tx_flags |= IXGBE_TX_FLAGS_VLAN; +	} else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED && +		   skb->priority != TC_PRIO_CONTROL) { +		tx_flags |= tx_ring->dcb_tc << 13; +		tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; +		tx_flags |= IXGBE_TX_FLAGS_VLAN; +	} + +#ifdef IXGBE_FCOE +	/* for FCoE with DCB, we force the priority to what +	 * was specified by the switch */ +	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && +	    (protocol == htons(ETH_P_FCOE))) +		tx_flags |= IXGBE_TX_FLAGS_FCOE; + +#endif +	/* record the location of the first descriptor for this packet */ +	first = tx_ring->next_to_use; + +	if (tx_flags & IXGBE_TX_FLAGS_FCOE) { +#ifdef IXGBE_FCOE +		/* setup tx offload for FCoE */ +		tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len); +		if (tso < 0) +			goto out_drop; +		else if (tso) +			tx_flags |= IXGBE_TX_FLAGS_FSO; +#endif /* IXGBE_FCOE */ +	} else { +		if (protocol == htons(ETH_P_IP)) +			tx_flags |= IXGBE_TX_FLAGS_IPV4; +		tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len); +		if (tso < 0) +			goto out_drop; +		else if (tso) +			tx_flags |= IXGBE_TX_FLAGS_TSO; +		else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol)) +			tx_flags |= IXGBE_TX_FLAGS_CSUM; +	} + +	count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len); +	if (count) { +		/* add the ATR filter if ATR is on */ +		if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) +			ixgbe_atr(tx_ring, skb, tx_flags, protocol); +		ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len); +		ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); + +	} else { +		tx_ring->tx_buffer_info[first].time_stamp = 0; +		tx_ring->next_to_use = first; +		goto out_drop; +	} + +	return NETDEV_TX_OK; + +out_drop: +	dev_kfree_skb_any(skb); +	return NETDEV_TX_OK; +} + +static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_ring *tx_ring; + +	tx_ring = adapter->tx_ring[skb->queue_mapping]; +	return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); +} + +/** + * ixgbe_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int ixgbe_set_mac(struct net_device *netdev, void *p) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_hw *hw = &adapter->hw; +	struct sockaddr *addr = p; + +	if (!is_valid_ether_addr(addr->sa_data)) +		return -EADDRNOTAVAIL; + +	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); +	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + +	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, +			    IXGBE_RAH_AV); + +	return 0; +} + +static int +ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_hw *hw = &adapter->hw; +	u16 value; +	int rc; + +	if (prtad != hw->phy.mdio.prtad) +		return -EINVAL; +	rc = hw->phy.ops.read_reg(hw, addr, devad, &value); +	if (!rc) +		rc = value; +	return rc; +} + +static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, +			    u16 addr, u16 value) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_hw *hw = &adapter->hw; + +	if (prtad != hw->phy.mdio.prtad) +		return -EINVAL; +	return hw->phy.ops.write_reg(hw, addr, devad, value); +} + +static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); +} + +/** + * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding + * netdev->dev_addrs + * @netdev: network interface device structure + * + * Returns non-zero on failure + **/ +static int ixgbe_add_sanmac_netdev(struct net_device *dev) +{ +	int err = 0; +	struct ixgbe_adapter *adapter = netdev_priv(dev); +	struct ixgbe_mac_info *mac = &adapter->hw.mac; + +	if (is_valid_ether_addr(mac->san_addr)) { +		rtnl_lock(); +		err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); +		rtnl_unlock(); +	} +	return err; +} + +/** + * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding + * netdev->dev_addrs + * @netdev: network interface device structure + * + * Returns non-zero on failure + **/ +static int ixgbe_del_sanmac_netdev(struct net_device *dev) +{ +	int err = 0; +	struct ixgbe_adapter *adapter = netdev_priv(dev); +	struct ixgbe_mac_info *mac = &adapter->hw.mac; + +	if (is_valid_ether_addr(mac->san_addr)) { +		rtnl_lock(); +		err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); +		rtnl_unlock(); +	} +	return err; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void ixgbe_netpoll(struct net_device *netdev) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	int i; + +	/* if interface is down do nothing */ +	if (test_bit(__IXGBE_DOWN, &adapter->state)) +		return; + +	adapter->flags |= IXGBE_FLAG_IN_NETPOLL; +	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { +		int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; +		for (i = 0; i < num_q_vectors; i++) { +			struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; +			ixgbe_msix_clean_many(0, q_vector); +		} +	} else { +		ixgbe_intr(adapter->pdev->irq, netdev); +	} +	adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; +} +#endif + +static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, +						   struct rtnl_link_stats64 *stats) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	int i; + +	rcu_read_lock(); +	for (i = 0; i < adapter->num_rx_queues; i++) { +		struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); +		u64 bytes, packets; +		unsigned int start; + +		if (ring) { +			do { +				start = u64_stats_fetch_begin_bh(&ring->syncp); +				packets = ring->stats.packets; +				bytes   = ring->stats.bytes; +			} while (u64_stats_fetch_retry_bh(&ring->syncp, start)); +			stats->rx_packets += packets; +			stats->rx_bytes   += bytes; +		} +	} + +	for (i = 0; i < adapter->num_tx_queues; i++) { +		struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); +		u64 bytes, packets; +		unsigned int start; + +		if (ring) { +			do { +				start = u64_stats_fetch_begin_bh(&ring->syncp); +				packets = ring->stats.packets; +				bytes   = ring->stats.bytes; +			} while (u64_stats_fetch_retry_bh(&ring->syncp, start)); +			stats->tx_packets += packets; +			stats->tx_bytes   += bytes; +		} +	} +	rcu_read_unlock(); +	/* following stats updated by ixgbe_watchdog_task() */ +	stats->multicast	= netdev->stats.multicast; +	stats->rx_errors	= netdev->stats.rx_errors; +	stats->rx_length_errors	= netdev->stats.rx_length_errors; +	stats->rx_crc_errors	= netdev->stats.rx_crc_errors; +	stats->rx_missed_errors	= netdev->stats.rx_missed_errors; +	return stats; +} + +/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. + * #adapter: pointer to ixgbe_adapter + * @tc: number of traffic classes currently enabled + * + * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm + * 802.1Q priority maps to a packet buffer that exists. + */ +static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u32 reg, rsave; +	int i; + +	/* 82598 have a static priority to TC mapping that can not +	 * be changed so no validation is needed. +	 */ +	if (hw->mac.type == ixgbe_mac_82598EB) +		return; + +	reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); +	rsave = reg; + +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { +		u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT); + +		/* If up2tc is out of bounds default to zero */ +		if (up2tc > tc) +			reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT); +	} + +	if (reg != rsave) +		IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); + +	return; +} + + +/* ixgbe_setup_tc - routine to configure net_device for multiple traffic + * classes. + * + * @netdev: net device to configure + * @tc: number of traffic classes to enable + */ +int ixgbe_setup_tc(struct net_device *dev, u8 tc) +{ +	struct ixgbe_adapter *adapter = netdev_priv(dev); +	struct ixgbe_hw *hw = &adapter->hw; + +	/* If DCB is anabled do not remove traffic classes, multiple +	 * traffic classes are required to implement DCB +	 */ +	if (!tc && (adapter->flags & IXGBE_FLAG_DCB_ENABLED)) +		return 0; + +	/* Hardware supports up to 8 traffic classes */ +	if (tc > MAX_TRAFFIC_CLASS || +	    (hw->mac.type == ixgbe_mac_82598EB && tc < MAX_TRAFFIC_CLASS)) +		return -EINVAL; + +	/* Hardware has to reinitialize queues and interrupts to +	 * match packet buffer alignment. Unfortunantly, the +	 * hardware is not flexible enough to do this dynamically. +	 */ +	if (netif_running(dev)) +		ixgbe_close(dev); +	ixgbe_clear_interrupt_scheme(adapter); + +	if (tc) +		netdev_set_num_tc(dev, tc); +	else +		netdev_reset_tc(dev); + +	ixgbe_init_interrupt_scheme(adapter); +	ixgbe_validate_rtr(adapter, tc); +	if (netif_running(dev)) +		ixgbe_open(dev); + +	return 0; +} + +void ixgbe_do_reset(struct net_device *netdev) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +	if (netif_running(netdev)) +		ixgbe_reinit_locked(adapter); +	else +		ixgbe_reset(adapter); +} + +static u32 ixgbe_fix_features(struct net_device *netdev, u32 data) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); + +#ifdef CONFIG_DCB +	if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) +		data &= ~NETIF_F_HW_VLAN_RX; +#endif + +	/* return error if RXHASH is being enabled when RSS is not supported */ +	if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) +		data &= ~NETIF_F_RXHASH; + +	/* If Rx checksum is disabled, then RSC/LRO should also be disabled */ +	if (!(data & NETIF_F_RXCSUM)) +		data &= ~NETIF_F_LRO; + +	/* Turn off LRO if not RSC capable or invalid ITR settings */ +	if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) { +		data &= ~NETIF_F_LRO; +	} else if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && +		   (adapter->rx_itr_setting != 1 && +		    adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE)) { +		data &= ~NETIF_F_LRO; +		e_info(probe, "rx-usecs set too low, not enabling RSC\n"); +	} + +	return data; +} + +static int ixgbe_set_features(struct net_device *netdev, u32 data) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	bool need_reset = false; + +	/* If Rx checksum is disabled, then RSC/LRO should also be disabled */ +	if (!(data & NETIF_F_RXCSUM)) +		adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; +	else +		adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; + +	/* Make sure RSC matches LRO, reset if change */ +	if (!!(data & NETIF_F_LRO) != +	     !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { +		adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; +		switch (adapter->hw.mac.type) { +		case ixgbe_mac_X540: +		case ixgbe_mac_82599EB: +			need_reset = true; +			break; +		default: +			break; +		} +	} + +	/* +	 * Check if Flow Director n-tuple support was enabled or disabled.  If +	 * the state changed, we need to reset. +	 */ +	if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { +		/* turn off ATR, enable perfect filters and reset */ +		if (data & NETIF_F_NTUPLE) { +			adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; +			adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; +			need_reset = true; +		} +	} else if (!(data & NETIF_F_NTUPLE)) { +		/* turn off Flow Director, set ATR and reset */ +		adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; +		if ((adapter->flags &  IXGBE_FLAG_RSS_ENABLED) && +		    !(adapter->flags &  IXGBE_FLAG_DCB_ENABLED)) +			adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; +		need_reset = true; +	} + +	if (need_reset) +		ixgbe_do_reset(netdev); + +	return 0; + +} + +static const struct net_device_ops ixgbe_netdev_ops = { +	.ndo_open		= ixgbe_open, +	.ndo_stop		= ixgbe_close, +	.ndo_start_xmit		= ixgbe_xmit_frame, +	.ndo_select_queue	= ixgbe_select_queue, +	.ndo_set_rx_mode        = ixgbe_set_rx_mode, +	.ndo_set_multicast_list	= ixgbe_set_rx_mode, +	.ndo_validate_addr	= eth_validate_addr, +	.ndo_set_mac_address	= ixgbe_set_mac, +	.ndo_change_mtu		= ixgbe_change_mtu, +	.ndo_tx_timeout		= ixgbe_tx_timeout, +	.ndo_vlan_rx_add_vid	= ixgbe_vlan_rx_add_vid, +	.ndo_vlan_rx_kill_vid	= ixgbe_vlan_rx_kill_vid, +	.ndo_do_ioctl		= ixgbe_ioctl, +	.ndo_set_vf_mac		= ixgbe_ndo_set_vf_mac, +	.ndo_set_vf_vlan	= ixgbe_ndo_set_vf_vlan, +	.ndo_set_vf_tx_rate	= ixgbe_ndo_set_vf_bw, +	.ndo_get_vf_config	= ixgbe_ndo_get_vf_config, +	.ndo_get_stats64	= ixgbe_get_stats64, +	.ndo_setup_tc		= ixgbe_setup_tc, +#ifdef CONFIG_NET_POLL_CONTROLLER +	.ndo_poll_controller	= ixgbe_netpoll, +#endif +#ifdef IXGBE_FCOE +	.ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, +	.ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, +	.ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, +	.ndo_fcoe_enable = ixgbe_fcoe_enable, +	.ndo_fcoe_disable = ixgbe_fcoe_disable, +	.ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, +#endif /* IXGBE_FCOE */ +	.ndo_set_features = ixgbe_set_features, +	.ndo_fix_features = ixgbe_fix_features, +}; + +static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, +			   const struct ixgbe_info *ii) +{ +#ifdef CONFIG_PCI_IOV +	struct ixgbe_hw *hw = &adapter->hw; +	int err; +	int num_vf_macvlans, i; +	struct vf_macvlans *mv_list; + +	if (hw->mac.type == ixgbe_mac_82598EB || !max_vfs) +		return; + +	/* The 82599 supports up to 64 VFs per physical function +	 * but this implementation limits allocation to 63 so that +	 * basic networking resources are still available to the +	 * physical function +	 */ +	adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs; +	adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; +	err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); +	if (err) { +		e_err(probe, "Failed to enable PCI sriov: %d\n", err); +		goto err_novfs; +	} + +	num_vf_macvlans = hw->mac.num_rar_entries - +		(IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs); + +	adapter->mv_list = mv_list = kcalloc(num_vf_macvlans, +					     sizeof(struct vf_macvlans), +					     GFP_KERNEL); +	if (mv_list) { +		/* Initialize list of VF macvlans */ +		INIT_LIST_HEAD(&adapter->vf_mvs.l); +		for (i = 0; i < num_vf_macvlans; i++) { +			mv_list->vf = -1; +			mv_list->free = true; +			mv_list->rar_entry = hw->mac.num_rar_entries - +				(i + adapter->num_vfs + 1); +			list_add(&mv_list->l, &adapter->vf_mvs.l); +			mv_list++; +		} +	} + +	/* If call to enable VFs succeeded then allocate memory +	 * for per VF control structures. +	 */ +	adapter->vfinfo = +		kcalloc(adapter->num_vfs, +			sizeof(struct vf_data_storage), GFP_KERNEL); +	if (adapter->vfinfo) { +		/* Now that we're sure SR-IOV is enabled +		 * and memory allocated set up the mailbox parameters +		 */ +		ixgbe_init_mbx_params_pf(hw); +		memcpy(&hw->mbx.ops, ii->mbx_ops, +		       sizeof(hw->mbx.ops)); + +		/* Disable RSC when in SR-IOV mode */ +		adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | +				     IXGBE_FLAG2_RSC_ENABLED); +		return; +	} + +	/* Oh oh */ +	e_err(probe, "Unable to allocate memory for VF Data Storage - " +	      "SRIOV disabled\n"); +	pci_disable_sriov(adapter->pdev); + +err_novfs: +	adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; +	adapter->num_vfs = 0; +#endif /* CONFIG_PCI_IOV */ +} + +/** + * ixgbe_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in ixgbe_pci_tbl + * + * Returns 0 on success, negative on failure + * + * ixgbe_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int __devinit ixgbe_probe(struct pci_dev *pdev, +				 const struct pci_device_id *ent) +{ +	struct net_device *netdev; +	struct ixgbe_adapter *adapter = NULL; +	struct ixgbe_hw *hw; +	const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; +	static int cards_found; +	int i, err, pci_using_dac; +	u8 part_str[IXGBE_PBANUM_LENGTH]; +	unsigned int indices = num_possible_cpus(); +#ifdef IXGBE_FCOE +	u16 device_caps; +#endif +	u32 eec; + +	/* Catch broken hardware that put the wrong VF device ID in +	 * the PCIe SR-IOV capability. +	 */ +	if (pdev->is_virtfn) { +		WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", +		     pci_name(pdev), pdev->vendor, pdev->device); +		return -EINVAL; +	} + +	err = pci_enable_device_mem(pdev); +	if (err) +		return err; + +	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && +	    !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { +		pci_using_dac = 1; +	} else { +		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); +		if (err) { +			err = dma_set_coherent_mask(&pdev->dev, +						    DMA_BIT_MASK(32)); +			if (err) { +				dev_err(&pdev->dev, +					"No usable DMA configuration, aborting\n"); +				goto err_dma; +			} +		} +		pci_using_dac = 0; +	} + +	err = pci_request_selected_regions(pdev, pci_select_bars(pdev, +					   IORESOURCE_MEM), ixgbe_driver_name); +	if (err) { +		dev_err(&pdev->dev, +			"pci_request_selected_regions failed 0x%x\n", err); +		goto err_pci_reg; +	} + +	pci_enable_pcie_error_reporting(pdev); + +	pci_set_master(pdev); +	pci_save_state(pdev); + +#ifdef CONFIG_IXGBE_DCB +	indices *= MAX_TRAFFIC_CLASS; +#endif + +	if (ii->mac == ixgbe_mac_82598EB) +		indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES); +	else +		indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); + +#ifdef IXGBE_FCOE +	indices += min_t(unsigned int, num_possible_cpus(), +			 IXGBE_MAX_FCOE_INDICES); +#endif +	netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); +	if (!netdev) { +		err = -ENOMEM; +		goto err_alloc_etherdev; +	} + +	SET_NETDEV_DEV(netdev, &pdev->dev); + +	adapter = netdev_priv(netdev); +	pci_set_drvdata(pdev, adapter); + +	adapter->netdev = netdev; +	adapter->pdev = pdev; +	hw = &adapter->hw; +	hw->back = adapter; +	adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + +	hw->hw_addr = ioremap(pci_resource_start(pdev, 0), +			      pci_resource_len(pdev, 0)); +	if (!hw->hw_addr) { +		err = -EIO; +		goto err_ioremap; +	} + +	for (i = 1; i <= 5; i++) { +		if (pci_resource_len(pdev, i) == 0) +			continue; +	} + +	netdev->netdev_ops = &ixgbe_netdev_ops; +	ixgbe_set_ethtool_ops(netdev); +	netdev->watchdog_timeo = 5 * HZ; +	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + +	adapter->bd_number = cards_found; + +	/* Setup hw api */ +	memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); +	hw->mac.type  = ii->mac; + +	/* EEPROM */ +	memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); +	eec = IXGBE_READ_REG(hw, IXGBE_EEC); +	/* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ +	if (!(eec & (1 << 8))) +		hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; + +	/* PHY */ +	memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); +	hw->phy.sfp_type = ixgbe_sfp_type_unknown; +	/* ixgbe_identify_phy_generic will set prtad and mmds properly */ +	hw->phy.mdio.prtad = MDIO_PRTAD_NONE; +	hw->phy.mdio.mmds = 0; +	hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; +	hw->phy.mdio.dev = netdev; +	hw->phy.mdio.mdio_read = ixgbe_mdio_read; +	hw->phy.mdio.mdio_write = ixgbe_mdio_write; + +	ii->get_invariants(hw); + +	/* setup the private structure */ +	err = ixgbe_sw_init(adapter); +	if (err) +		goto err_sw_init; + +	/* Make it possible the adapter to be woken up via WOL */ +	switch (adapter->hw.mac.type) { +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); +		break; +	default: +		break; +	} + +	/* +	 * If there is a fan on this device and it has failed log the +	 * failure. +	 */ +	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { +		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); +		if (esdp & IXGBE_ESDP_SDP1) +			e_crit(probe, "Fan has stopped, replace the adapter\n"); +	} + +	/* reset_hw fills in the perm_addr as well */ +	hw->phy.reset_if_overtemp = true; +	err = hw->mac.ops.reset_hw(hw); +	hw->phy.reset_if_overtemp = false; +	if (err == IXGBE_ERR_SFP_NOT_PRESENT && +	    hw->mac.type == ixgbe_mac_82598EB) { +		err = 0; +	} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { +		e_dev_err("failed to load because an unsupported SFP+ " +			  "module type was detected.\n"); +		e_dev_err("Reload the driver after installing a supported " +			  "module.\n"); +		goto err_sw_init; +	} else if (err) { +		e_dev_err("HW Init failed: %d\n", err); +		goto err_sw_init; +	} + +	ixgbe_probe_vf(adapter, ii); + +	netdev->features = NETIF_F_SG | +			   NETIF_F_IP_CSUM | +			   NETIF_F_IPV6_CSUM | +			   NETIF_F_HW_VLAN_TX | +			   NETIF_F_HW_VLAN_RX | +			   NETIF_F_HW_VLAN_FILTER | +			   NETIF_F_TSO | +			   NETIF_F_TSO6 | +			   NETIF_F_GRO | +			   NETIF_F_RXHASH | +			   NETIF_F_RXCSUM; + +	netdev->hw_features = netdev->features; + +	switch (adapter->hw.mac.type) { +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		netdev->features |= NETIF_F_SCTP_CSUM; +		netdev->hw_features |= NETIF_F_SCTP_CSUM | +				       NETIF_F_NTUPLE; +		break; +	default: +		break; +	} + +	netdev->vlan_features |= NETIF_F_TSO; +	netdev->vlan_features |= NETIF_F_TSO6; +	netdev->vlan_features |= NETIF_F_IP_CSUM; +	netdev->vlan_features |= NETIF_F_IPV6_CSUM; +	netdev->vlan_features |= NETIF_F_SG; + +	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) +		adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED | +				    IXGBE_FLAG_DCB_ENABLED); + +#ifdef CONFIG_IXGBE_DCB +	netdev->dcbnl_ops = &dcbnl_ops; +#endif + +#ifdef IXGBE_FCOE +	if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { +		if (hw->mac.ops.get_device_caps) { +			hw->mac.ops.get_device_caps(hw, &device_caps); +			if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) +				adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; +		} +	} +	if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { +		netdev->vlan_features |= NETIF_F_FCOE_CRC; +		netdev->vlan_features |= NETIF_F_FSO; +		netdev->vlan_features |= NETIF_F_FCOE_MTU; +	} +#endif /* IXGBE_FCOE */ +	if (pci_using_dac) { +		netdev->features |= NETIF_F_HIGHDMA; +		netdev->vlan_features |= NETIF_F_HIGHDMA; +	} + +	if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) +		netdev->hw_features |= NETIF_F_LRO; +	if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) +		netdev->features |= NETIF_F_LRO; + +	/* make sure the EEPROM is good */ +	if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { +		e_dev_err("The EEPROM Checksum Is Not Valid\n"); +		err = -EIO; +		goto err_eeprom; +	} + +	memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); +	memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); + +	if (ixgbe_validate_mac_addr(netdev->perm_addr)) { +		e_dev_err("invalid MAC address\n"); +		err = -EIO; +		goto err_eeprom; +	} + +	/* power down the optics for multispeed fiber and 82599 SFP+ fiber */ +	if (hw->mac.ops.disable_tx_laser && +	    ((hw->phy.multispeed_fiber) || +	     ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && +	      (hw->mac.type == ixgbe_mac_82599EB)))) +		hw->mac.ops.disable_tx_laser(hw); + +	setup_timer(&adapter->service_timer, &ixgbe_service_timer, +	            (unsigned long) adapter); + +	INIT_WORK(&adapter->service_task, ixgbe_service_task); +	clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); + +	err = ixgbe_init_interrupt_scheme(adapter); +	if (err) +		goto err_sw_init; + +	if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { +		netdev->hw_features &= ~NETIF_F_RXHASH; +		netdev->features &= ~NETIF_F_RXHASH; +	} + +	switch (pdev->device) { +	case IXGBE_DEV_ID_82599_SFP: +		/* Only this subdevice supports WOL */ +		if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP) +			adapter->wol = IXGBE_WUFC_MAG; +		break; +	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: +		/* All except this subdevice support WOL */ +		if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) +			adapter->wol = IXGBE_WUFC_MAG; +		break; +	case IXGBE_DEV_ID_82599_KX4: +		adapter->wol = IXGBE_WUFC_MAG; +		break; +	default: +		adapter->wol = 0; +		break; +	} +	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + +	/* pick up the PCI bus settings for reporting later */ +	hw->mac.ops.get_bus_info(hw); + +	/* print bus type/speed/width info */ +	e_dev_info("(PCI Express:%s:%s) %pM\n", +		   (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" : +		    hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" : +		    "Unknown"), +		   (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" : +		    hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" : +		    hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" : +		    "Unknown"), +		   netdev->dev_addr); + +	err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH); +	if (err) +		strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH); +	if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) +		e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", +			   hw->mac.type, hw->phy.type, hw->phy.sfp_type, +		           part_str); +	else +		e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", +			   hw->mac.type, hw->phy.type, part_str); + +	if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { +		e_dev_warn("PCI-Express bandwidth available for this card is " +			   "not sufficient for optimal performance.\n"); +		e_dev_warn("For optimal performance a x8 PCI-Express slot " +			   "is required.\n"); +	} + +	/* save off EEPROM version number */ +	hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version); + +	/* reset the hardware with the new settings */ +	err = hw->mac.ops.start_hw(hw); + +	if (err == IXGBE_ERR_EEPROM_VERSION) { +		/* We are running on a pre-production device, log a warning */ +		e_dev_warn("This device is a pre-production adapter/LOM. " +			   "Please be aware there may be issues associated " +			   "with your hardware.  If you are experiencing " +			   "problems please contact your Intel or hardware " +			   "representative who provided you with this " +			   "hardware.\n"); +	} +	strcpy(netdev->name, "eth%d"); +	err = register_netdev(netdev); +	if (err) +		goto err_register; + +	/* carrier off reporting is important to ethtool even BEFORE open */ +	netif_carrier_off(netdev); + +#ifdef CONFIG_IXGBE_DCA +	if (dca_add_requester(&pdev->dev) == 0) { +		adapter->flags |= IXGBE_FLAG_DCA_ENABLED; +		ixgbe_setup_dca(adapter); +	} +#endif +	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { +		e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs); +		for (i = 0; i < adapter->num_vfs; i++) +			ixgbe_vf_configuration(pdev, (i | 0x10000000)); +	} + +	/* Inform firmware of driver version */ +	if (hw->mac.ops.set_fw_drv_ver) +		hw->mac.ops.set_fw_drv_ver(hw, MAJ, MIN, BUILD, +					   FW_CEM_UNUSED_VER); + +	/* add san mac addr to netdev */ +	ixgbe_add_sanmac_netdev(netdev); + +	e_dev_info("Intel(R) 10 Gigabit Network Connection\n"); +	cards_found++; +	return 0; + +err_register: +	ixgbe_release_hw_control(adapter); +	ixgbe_clear_interrupt_scheme(adapter); +err_sw_init: +err_eeprom: +	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) +		ixgbe_disable_sriov(adapter); +	adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; +	iounmap(hw->hw_addr); +err_ioremap: +	free_netdev(netdev); +err_alloc_etherdev: +	pci_release_selected_regions(pdev, +				     pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_reg: +err_dma: +	pci_disable_device(pdev); +	return err; +} + +/** + * ixgbe_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * ixgbe_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device.  The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void __devexit ixgbe_remove(struct pci_dev *pdev) +{ +	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); +	struct net_device *netdev = adapter->netdev; + +	set_bit(__IXGBE_DOWN, &adapter->state); +	cancel_work_sync(&adapter->service_task); + +#ifdef CONFIG_IXGBE_DCA +	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { +		adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; +		dca_remove_requester(&pdev->dev); +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); +	} + +#endif +#ifdef IXGBE_FCOE +	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) +		ixgbe_cleanup_fcoe(adapter); + +#endif /* IXGBE_FCOE */ + +	/* remove the added san mac */ +	ixgbe_del_sanmac_netdev(netdev); + +	if (netdev->reg_state == NETREG_REGISTERED) +		unregister_netdev(netdev); + +	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) +		ixgbe_disable_sriov(adapter); + +	ixgbe_clear_interrupt_scheme(adapter); + +	ixgbe_release_hw_control(adapter); + +	iounmap(adapter->hw.hw_addr); +	pci_release_selected_regions(pdev, pci_select_bars(pdev, +				     IORESOURCE_MEM)); + +	e_dev_info("complete\n"); + +	free_netdev(netdev); + +	pci_disable_pcie_error_reporting(pdev); + +	pci_disable_device(pdev); +} + +/** + * ixgbe_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, +						pci_channel_state_t state) +{ +	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); +	struct net_device *netdev = adapter->netdev; + +	netif_device_detach(netdev); + +	if (state == pci_channel_io_perm_failure) +		return PCI_ERS_RESULT_DISCONNECT; + +	if (netif_running(netdev)) +		ixgbe_down(adapter); +	pci_disable_device(pdev); + +	/* Request a slot reset. */ +	return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * ixgbe_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + */ +static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) +{ +	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); +	pci_ers_result_t result; +	int err; + +	if (pci_enable_device_mem(pdev)) { +		e_err(probe, "Cannot re-enable PCI device after reset.\n"); +		result = PCI_ERS_RESULT_DISCONNECT; +	} else { +		pci_set_master(pdev); +		pci_restore_state(pdev); +		pci_save_state(pdev); + +		pci_wake_from_d3(pdev, false); + +		ixgbe_reset(adapter); +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); +		result = PCI_ERS_RESULT_RECOVERED; +	} + +	err = pci_cleanup_aer_uncorrect_error_status(pdev); +	if (err) { +		e_dev_err("pci_cleanup_aer_uncorrect_error_status " +			  "failed 0x%0x\n", err); +		/* non-fatal, continue */ +	} + +	return result; +} + +/** + * ixgbe_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. + */ +static void ixgbe_io_resume(struct pci_dev *pdev) +{ +	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); +	struct net_device *netdev = adapter->netdev; + +	if (netif_running(netdev)) { +		if (ixgbe_up(adapter)) { +			e_info(probe, "ixgbe_up failed after reset\n"); +			return; +		} +	} + +	netif_device_attach(netdev); +} + +static struct pci_error_handlers ixgbe_err_handler = { +	.error_detected = ixgbe_io_error_detected, +	.slot_reset = ixgbe_io_slot_reset, +	.resume = ixgbe_io_resume, +}; + +static struct pci_driver ixgbe_driver = { +	.name     = ixgbe_driver_name, +	.id_table = ixgbe_pci_tbl, +	.probe    = ixgbe_probe, +	.remove   = __devexit_p(ixgbe_remove), +#ifdef CONFIG_PM +	.suspend  = ixgbe_suspend, +	.resume   = ixgbe_resume, +#endif +	.shutdown = ixgbe_shutdown, +	.err_handler = &ixgbe_err_handler +}; + +/** + * ixgbe_init_module - Driver Registration Routine + * + * ixgbe_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init ixgbe_init_module(void) +{ +	int ret; +	pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); +	pr_info("%s\n", ixgbe_copyright); + +#ifdef CONFIG_IXGBE_DCA +	dca_register_notify(&dca_notifier); +#endif + +	ret = pci_register_driver(&ixgbe_driver); +	return ret; +} + +module_init(ixgbe_init_module); + +/** + * ixgbe_exit_module - Driver Exit Cleanup Routine + * + * ixgbe_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit ixgbe_exit_module(void) +{ +#ifdef CONFIG_IXGBE_DCA +	dca_unregister_notify(&dca_notifier); +#endif +	pci_unregister_driver(&ixgbe_driver); +	rcu_barrier(); /* Wait for completion of call_rcu()'s */ +} + +#ifdef CONFIG_IXGBE_DCA +static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, +			    void *p) +{ +	int ret_val; + +	ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, +					 __ixgbe_notify_dca); + +	return ret_val ? NOTIFY_BAD : NOTIFY_DONE; +} + +#endif /* CONFIG_IXGBE_DCA */ + +module_exit(ixgbe_exit_module); + +/* ixgbe_main.c */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c new file mode 100644 index 00000000000..1ff0eefcfd0 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c @@ -0,0 +1,471 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include <linux/pci.h> +#include <linux/delay.h> +#include "ixgbe_type.h" +#include "ixgbe_common.h" +#include "ixgbe_mbx.h" + +/** + *  ixgbe_read_mbx - Reads a message from the mailbox + *  @hw: pointer to the HW structure + *  @msg: The message buffer + *  @size: Length of buffer + *  @mbx_id: id of mailbox to read + * + *  returns SUCCESS if it successfuly read message from buffer + **/ +s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ +	struct ixgbe_mbx_info *mbx = &hw->mbx; +	s32 ret_val = IXGBE_ERR_MBX; + +	/* limit read to size of mailbox */ +	if (size > mbx->size) +		size = mbx->size; + +	if (mbx->ops.read) +		ret_val = mbx->ops.read(hw, msg, size, mbx_id); + +	return ret_val; +} + +/** + *  ixgbe_write_mbx - Write a message to the mailbox + *  @hw: pointer to the HW structure + *  @msg: The message buffer + *  @size: Length of buffer + *  @mbx_id: id of mailbox to write + * + *  returns SUCCESS if it successfully copied message into the buffer + **/ +s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ +	struct ixgbe_mbx_info *mbx = &hw->mbx; +	s32 ret_val = 0; + +	if (size > mbx->size) +		ret_val = IXGBE_ERR_MBX; + +	else if (mbx->ops.write) +		ret_val = mbx->ops.write(hw, msg, size, mbx_id); + +	return ret_val; +} + +/** + *  ixgbe_check_for_msg - checks to see if someone sent us mail + *  @hw: pointer to the HW structure + *  @mbx_id: id of mailbox to check + * + *  returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) +{ +	struct ixgbe_mbx_info *mbx = &hw->mbx; +	s32 ret_val = IXGBE_ERR_MBX; + +	if (mbx->ops.check_for_msg) +		ret_val = mbx->ops.check_for_msg(hw, mbx_id); + +	return ret_val; +} + +/** + *  ixgbe_check_for_ack - checks to see if someone sent us ACK + *  @hw: pointer to the HW structure + *  @mbx_id: id of mailbox to check + * + *  returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) +{ +	struct ixgbe_mbx_info *mbx = &hw->mbx; +	s32 ret_val = IXGBE_ERR_MBX; + +	if (mbx->ops.check_for_ack) +		ret_val = mbx->ops.check_for_ack(hw, mbx_id); + +	return ret_val; +} + +/** + *  ixgbe_check_for_rst - checks to see if other side has reset + *  @hw: pointer to the HW structure + *  @mbx_id: id of mailbox to check + * + *  returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) +{ +	struct ixgbe_mbx_info *mbx = &hw->mbx; +	s32 ret_val = IXGBE_ERR_MBX; + +	if (mbx->ops.check_for_rst) +		ret_val = mbx->ops.check_for_rst(hw, mbx_id); + +	return ret_val; +} + +/** + *  ixgbe_poll_for_msg - Wait for message notification + *  @hw: pointer to the HW structure + *  @mbx_id: id of mailbox to write + * + *  returns SUCCESS if it successfully received a message notification + **/ +static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) +{ +	struct ixgbe_mbx_info *mbx = &hw->mbx; +	int countdown = mbx->timeout; + +	if (!countdown || !mbx->ops.check_for_msg) +		goto out; + +	while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { +		countdown--; +		if (!countdown) +			break; +		udelay(mbx->usec_delay); +	} + +out: +	return countdown ? 0 : IXGBE_ERR_MBX; +} + +/** + *  ixgbe_poll_for_ack - Wait for message acknowledgement + *  @hw: pointer to the HW structure + *  @mbx_id: id of mailbox to write + * + *  returns SUCCESS if it successfully received a message acknowledgement + **/ +static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) +{ +	struct ixgbe_mbx_info *mbx = &hw->mbx; +	int countdown = mbx->timeout; + +	if (!countdown || !mbx->ops.check_for_ack) +		goto out; + +	while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { +		countdown--; +		if (!countdown) +			break; +		udelay(mbx->usec_delay); +	} + +out: +	return countdown ? 0 : IXGBE_ERR_MBX; +} + +/** + *  ixgbe_read_posted_mbx - Wait for message notification and receive message + *  @hw: pointer to the HW structure + *  @msg: The message buffer + *  @size: Length of buffer + *  @mbx_id: id of mailbox to write + * + *  returns SUCCESS if it successfully received a message notification and + *  copied it into the receive buffer. + **/ +static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, +				 u16 mbx_id) +{ +	struct ixgbe_mbx_info *mbx = &hw->mbx; +	s32 ret_val = IXGBE_ERR_MBX; + +	if (!mbx->ops.read) +		goto out; + +	ret_val = ixgbe_poll_for_msg(hw, mbx_id); + +	/* if ack received read message, otherwise we timed out */ +	if (!ret_val) +		ret_val = mbx->ops.read(hw, msg, size, mbx_id); +out: +	return ret_val; +} + +/** + *  ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack + *  @hw: pointer to the HW structure + *  @msg: The message buffer + *  @size: Length of buffer + *  @mbx_id: id of mailbox to write + * + *  returns SUCCESS if it successfully copied message into the buffer and + *  received an ack to that message within delay * timeout period + **/ +static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, +                           u16 mbx_id) +{ +	struct ixgbe_mbx_info *mbx = &hw->mbx; +	s32 ret_val = IXGBE_ERR_MBX; + +	/* exit if either we can't write or there isn't a defined timeout */ +	if (!mbx->ops.write || !mbx->timeout) +		goto out; + +	/* send msg */ +	ret_val = mbx->ops.write(hw, msg, size, mbx_id); + +	/* if msg sent wait until we receive an ack */ +	if (!ret_val) +		ret_val = ixgbe_poll_for_ack(hw, mbx_id); +out: +	return ret_val; +} + +static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) +{ +	u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index)); +	s32 ret_val = IXGBE_ERR_MBX; + +	if (mbvficr & mask) { +		ret_val = 0; +		IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask); +	} + +	return ret_val; +} + +/** + *  ixgbe_check_for_msg_pf - checks to see if the VF has sent mail + *  @hw: pointer to the HW structure + *  @vf_number: the VF index + * + *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) +{ +	s32 ret_val = IXGBE_ERR_MBX; +	s32 index = IXGBE_MBVFICR_INDEX(vf_number); +	u32 vf_bit = vf_number % 16; + +	if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit, +	                            index)) { +		ret_val = 0; +		hw->mbx.stats.reqs++; +	} + +	return ret_val; +} + +/** + *  ixgbe_check_for_ack_pf - checks to see if the VF has ACKed + *  @hw: pointer to the HW structure + *  @vf_number: the VF index + * + *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) +{ +	s32 ret_val = IXGBE_ERR_MBX; +	s32 index = IXGBE_MBVFICR_INDEX(vf_number); +	u32 vf_bit = vf_number % 16; + +	if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit, +	                            index)) { +		ret_val = 0; +		hw->mbx.stats.acks++; +	} + +	return ret_val; +} + +/** + *  ixgbe_check_for_rst_pf - checks to see if the VF has reset + *  @hw: pointer to the HW structure + *  @vf_number: the VF index + * + *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) +{ +	u32 reg_offset = (vf_number < 32) ? 0 : 1; +	u32 vf_shift = vf_number % 32; +	u32 vflre = 0; +	s32 ret_val = IXGBE_ERR_MBX; + +	switch (hw->mac.type) { +	case ixgbe_mac_82599EB: +		vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); +		break; +	case ixgbe_mac_X540: +		vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); +		break; +	default: +		break; +	} + +	if (vflre & (1 << vf_shift)) { +		ret_val = 0; +		IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift)); +		hw->mbx.stats.rsts++; +	} + +	return ret_val; +} + +/** + *  ixgbe_obtain_mbx_lock_pf - obtain mailbox lock + *  @hw: pointer to the HW structure + *  @vf_number: the VF index + * + *  return SUCCESS if we obtained the mailbox lock + **/ +static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) +{ +	s32 ret_val = IXGBE_ERR_MBX; +	u32 p2v_mailbox; + +	/* Take ownership of the buffer */ +	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU); + +	/* reserve mailbox for vf use */ +	p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number)); +	if (p2v_mailbox & IXGBE_PFMAILBOX_PFU) +		ret_val = 0; + +	return ret_val; +} + +/** + *  ixgbe_write_mbx_pf - Places a message in the mailbox + *  @hw: pointer to the HW structure + *  @msg: The message buffer + *  @size: Length of buffer + *  @vf_number: the VF index + * + *  returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, +                              u16 vf_number) +{ +	s32 ret_val; +	u16 i; + +	/* lock the mailbox to prevent pf/vf race condition */ +	ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); +	if (ret_val) +		goto out_no_write; + +	/* flush msg and acks as we are overwriting the message buffer */ +	ixgbe_check_for_msg_pf(hw, vf_number); +	ixgbe_check_for_ack_pf(hw, vf_number); + +	/* copy the caller specified message to the mailbox memory buffer */ +	for (i = 0; i < size; i++) +		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]); + +	/* Interrupt VF to tell it a message has been sent and release buffer*/ +	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS); + +	/* update stats */ +	hw->mbx.stats.msgs_tx++; + +out_no_write: +	return ret_val; + +} + +/** + *  ixgbe_read_mbx_pf - Read a message from the mailbox + *  @hw: pointer to the HW structure + *  @msg: The message buffer + *  @size: Length of buffer + *  @vf_number: the VF index + * + *  This function copies a message from the mailbox buffer to the caller's + *  memory buffer.  The presumption is that the caller knows that there was + *  a message due to a VF request so no polling for message is needed. + **/ +static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, +                             u16 vf_number) +{ +	s32 ret_val; +	u16 i; + +	/* lock the mailbox to prevent pf/vf race condition */ +	ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); +	if (ret_val) +		goto out_no_read; + +	/* copy the message to the mailbox memory buffer */ +	for (i = 0; i < size; i++) +		msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i); + +	/* Acknowledge the message and release buffer */ +	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK); + +	/* update stats */ +	hw->mbx.stats.msgs_rx++; + +out_no_read: +	return ret_val; +} + +#ifdef CONFIG_PCI_IOV +/** + *  ixgbe_init_mbx_params_pf - set initial values for pf mailbox + *  @hw: pointer to the HW structure + * + *  Initializes the hw->mbx struct to correct values for pf mailbox + */ +void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) +{ +	struct ixgbe_mbx_info *mbx = &hw->mbx; + +	if (hw->mac.type != ixgbe_mac_82599EB && +	    hw->mac.type != ixgbe_mac_X540) +		return; + +	mbx->timeout = 0; +	mbx->usec_delay = 0; + +	mbx->stats.msgs_tx = 0; +	mbx->stats.msgs_rx = 0; +	mbx->stats.reqs = 0; +	mbx->stats.acks = 0; +	mbx->stats.rsts = 0; + +	mbx->size = IXGBE_VFMAILBOX_SIZE; +} +#endif /* CONFIG_PCI_IOV */ + +struct ixgbe_mbx_operations mbx_ops_generic = { +	.read                   = ixgbe_read_mbx_pf, +	.write                  = ixgbe_write_mbx_pf, +	.read_posted            = ixgbe_read_posted_mbx, +	.write_posted           = ixgbe_write_posted_mbx, +	.check_for_msg          = ixgbe_check_for_msg_pf, +	.check_for_ack          = ixgbe_check_for_ack_pf, +	.check_for_rst          = ixgbe_check_for_rst_pf, +}; + diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h new file mode 100644 index 00000000000..b239bdac38d --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -0,0 +1,93 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_MBX_H_ +#define _IXGBE_MBX_H_ + +#include "ixgbe_type.h" + +#define IXGBE_VFMAILBOX_SIZE        16 /* 16 32 bit words - 64 bytes */ +#define IXGBE_ERR_MBX               -100 + +#define IXGBE_VFMAILBOX             0x002FC +#define IXGBE_VFMBMEM               0x00200 + +#define IXGBE_PFMAILBOX_STS   0x00000001 /* Initiate message send to VF */ +#define IXGBE_PFMAILBOX_ACK   0x00000002 /* Ack message recv'd from VF */ +#define IXGBE_PFMAILBOX_VFU   0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_PFU   0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_RVFU  0x00000010 /* Reset VFU - used when VF stuck */ + +#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ +#define IXGBE_MBVFICR_VFREQ_VF1  0x00000001 /* bit for VF 1 message */ +#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ +#define IXGBE_MBVFICR_VFACK_VF1  0x00010000 /* bit for VF 1 ack */ + + +/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the + * PF.  The reverse is true if it is IXGBE_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +#define IXGBE_VT_MSGTYPE_ACK      0x80000000  /* Messages below or'd with +                                               * this are the ACK */ +#define IXGBE_VT_MSGTYPE_NACK     0x40000000  /* Messages below or'd with +                                               * this are the NACK */ +#define IXGBE_VT_MSGTYPE_CTS      0x20000000  /* Indicates that VF is still +                                                 clear to send requests */ +#define IXGBE_VT_MSGINFO_SHIFT    16 +/* bits 23:16 are used for exra info for certain messages */ +#define IXGBE_VT_MSGINFO_MASK     (0xFF << IXGBE_VT_MSGINFO_SHIFT) + +#define IXGBE_VF_RESET            0x01 /* VF requests reset */ +#define IXGBE_VF_SET_MAC_ADDR     0x02 /* VF requests PF to set MAC addr */ +#define IXGBE_VF_SET_MULTICAST    0x03 /* VF requests PF to set MC addr */ +#define IXGBE_VF_SET_VLAN         0x04 /* VF requests PF to set VLAN */ +#define IXGBE_VF_SET_LPE          0x05 /* VF requests PF to set VMOLR.LPE */ +#define IXGBE_VF_SET_MACVLAN      0x06 /* VF requests PF for unicast filter */ + +/* length of permanent address message returned from PF */ +#define IXGBE_VF_PERMADDR_MSG_LEN 4 +/* word in permanent address message with the current multicast type */ +#define IXGBE_VF_MC_TYPE_WORD     3 + +#define IXGBE_PF_CONTROL_MSG      0x0100 /* PF control message */ + +#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define IXGBE_VF_MBX_INIT_DELAY   500  /* microseconds between retries */ + +s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16); +s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16); +s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); +s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); +s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); +#ifdef CONFIG_PCI_IOV +void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); +#endif /* CONFIG_PCI_IOV */ + +extern struct ixgbe_mbx_operations mbx_ops_generic; + +#endif /* _IXGBE_MBX_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c new file mode 100644 index 00000000000..f7ca3511b9f --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -0,0 +1,1725 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/sched.h> + +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +static void ixgbe_i2c_start(struct ixgbe_hw *hw); +static void ixgbe_i2c_stop(struct ixgbe_hw *hw); +static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data); +static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data); +static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw); +static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data); +static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); +static s32 ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); +static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); +static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); +static bool ixgbe_get_i2c_data(u32 *i2cctl); +static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); +static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); +static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); + +/** + *  ixgbe_identify_phy_generic - Get physical layer module + *  @hw: pointer to hardware structure + * + *  Determines the physical layer module found on the current adapter. + **/ +s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) +{ +	s32 status = IXGBE_ERR_PHY_ADDR_INVALID; +	u32 phy_addr; +	u16 ext_ability = 0; + +	if (hw->phy.type == ixgbe_phy_unknown) { +		for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { +			hw->phy.mdio.prtad = phy_addr; +			if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) { +				ixgbe_get_phy_id(hw); +				hw->phy.type = +				        ixgbe_get_phy_type_from_id(hw->phy.id); + +				if (hw->phy.type == ixgbe_phy_unknown) { +					hw->phy.ops.read_reg(hw, +							     MDIO_PMA_EXTABLE, +							     MDIO_MMD_PMAPMD, +							     &ext_ability); +					if (ext_ability & +					    (MDIO_PMA_EXTABLE_10GBT | +					     MDIO_PMA_EXTABLE_1000BT)) +						hw->phy.type = +							 ixgbe_phy_cu_unknown; +					else +						hw->phy.type = +							 ixgbe_phy_generic; +				} + +				status = 0; +				break; +			} +		} +		/* clear value if nothing found */ +		if (status != 0) +			hw->phy.mdio.prtad = 0; +	} else { +		status = 0; +	} + +	return status; +} + +/** + *  ixgbe_get_phy_id - Get the phy type + *  @hw: pointer to hardware structure + * + **/ +static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) +{ +	u32 status; +	u16 phy_id_high = 0; +	u16 phy_id_low = 0; + +	status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD, +	                              &phy_id_high); + +	if (status == 0) { +		hw->phy.id = (u32)(phy_id_high << 16); +		status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD, +		                              &phy_id_low); +		hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); +		hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); +	} +	return status; +} + +/** + *  ixgbe_get_phy_type_from_id - Get the phy type + *  @hw: pointer to hardware structure + * + **/ +static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) +{ +	enum ixgbe_phy_type phy_type; + +	switch (phy_id) { +	case TN1010_PHY_ID: +		phy_type = ixgbe_phy_tn; +		break; +	case X540_PHY_ID: +		phy_type = ixgbe_phy_aq; +		break; +	case QT2022_PHY_ID: +		phy_type = ixgbe_phy_qt; +		break; +	case ATH_PHY_ID: +		phy_type = ixgbe_phy_nl; +		break; +	default: +		phy_type = ixgbe_phy_unknown; +		break; +	} + +	return phy_type; +} + +/** + *  ixgbe_reset_phy_generic - Performs a PHY reset + *  @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) +{ +	u32 i; +	u16 ctrl = 0; +	s32 status = 0; + +	if (hw->phy.type == ixgbe_phy_unknown) +		status = ixgbe_identify_phy_generic(hw); + +	if (status != 0 || hw->phy.type == ixgbe_phy_none) +		goto out; + +	/* Don't reset PHY if it's shut down due to overtemp. */ +	if (!hw->phy.reset_if_overtemp && +	    (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) +		goto out; + +	/* +	 * Perform soft PHY reset to the PHY_XS. +	 * This will cause a soft reset to the PHY +	 */ +	hw->phy.ops.write_reg(hw, MDIO_CTRL1, +			      MDIO_MMD_PHYXS, +			      MDIO_CTRL1_RESET); + +	/* +	 * Poll for reset bit to self-clear indicating reset is complete. +	 * Some PHYs could take up to 3 seconds to complete and need about +	 * 1.7 usec delay after the reset is complete. +	 */ +	for (i = 0; i < 30; i++) { +		msleep(100); +		hw->phy.ops.read_reg(hw, MDIO_CTRL1, +				     MDIO_MMD_PHYXS, &ctrl); +		if (!(ctrl & MDIO_CTRL1_RESET)) { +			udelay(2); +			break; +		} +	} + +	if (ctrl & MDIO_CTRL1_RESET) { +		status = IXGBE_ERR_RESET_FAILED; +		hw_dbg(hw, "PHY reset polling failed to complete.\n"); +	} + +out: +	return status; +} + +/** + *  ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register + *  @hw: pointer to hardware structure + *  @reg_addr: 32 bit address of PHY register to read + *  @phy_data: Pointer to read data from PHY register + **/ +s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, +                               u32 device_type, u16 *phy_data) +{ +	u32 command; +	u32 i; +	u32 data; +	s32 status = 0; +	u16 gssr; + +	if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) +		gssr = IXGBE_GSSR_PHY1_SM; +	else +		gssr = IXGBE_GSSR_PHY0_SM; + +	if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) +		status = IXGBE_ERR_SWFW_SYNC; + +	if (status == 0) { +		/* Setup and write the address cycle command */ +		command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  | +		           (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | +		           (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | +		           (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + +		IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + +		/* +		 * Check every 10 usec to see if the address cycle completed. +		 * The MDI Command bit will clear when the operation is +		 * complete +		 */ +		for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { +			udelay(10); + +			command = IXGBE_READ_REG(hw, IXGBE_MSCA); + +			if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) +				break; +		} + +		if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { +			hw_dbg(hw, "PHY address command did not complete.\n"); +			status = IXGBE_ERR_PHY; +		} + +		if (status == 0) { +			/* +			 * Address cycle complete, setup and write the read +			 * command +			 */ +			command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  | +			           (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | +			           (hw->phy.mdio.prtad << +				    IXGBE_MSCA_PHY_ADDR_SHIFT) | +			           (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); + +			IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + +			/* +			 * Check every 10 usec to see if the address cycle +			 * completed. The MDI Command bit will clear when the +			 * operation is complete +			 */ +			for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { +				udelay(10); + +				command = IXGBE_READ_REG(hw, IXGBE_MSCA); + +				if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) +					break; +			} + +			if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { +				hw_dbg(hw, "PHY read command didn't complete\n"); +				status = IXGBE_ERR_PHY; +			} else { +				/* +				 * Read operation is complete.  Get the data +				 * from MSRWD +				 */ +				data = IXGBE_READ_REG(hw, IXGBE_MSRWD); +				data >>= IXGBE_MSRWD_READ_DATA_SHIFT; +				*phy_data = (u16)(data); +			} +		} + +		hw->mac.ops.release_swfw_sync(hw, gssr); +	} + +	return status; +} + +/** + *  ixgbe_write_phy_reg_generic - Writes a value to specified PHY register + *  @hw: pointer to hardware structure + *  @reg_addr: 32 bit PHY register to write + *  @device_type: 5 bit device type + *  @phy_data: Data to write to the PHY register + **/ +s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, +                                u32 device_type, u16 phy_data) +{ +	u32 command; +	u32 i; +	s32 status = 0; +	u16 gssr; + +	if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) +		gssr = IXGBE_GSSR_PHY1_SM; +	else +		gssr = IXGBE_GSSR_PHY0_SM; + +	if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) +		status = IXGBE_ERR_SWFW_SYNC; + +	if (status == 0) { +		/* Put the data in the MDI single read and write data register*/ +		IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); + +		/* Setup and write the address cycle command */ +		command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  | +		           (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | +		           (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | +		           (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + +		IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + +		/* +		 * Check every 10 usec to see if the address cycle completed. +		 * The MDI Command bit will clear when the operation is +		 * complete +		 */ +		for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { +			udelay(10); + +			command = IXGBE_READ_REG(hw, IXGBE_MSCA); + +			if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) +				break; +		} + +		if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { +			hw_dbg(hw, "PHY address cmd didn't complete\n"); +			status = IXGBE_ERR_PHY; +		} + +		if (status == 0) { +			/* +			 * Address cycle complete, setup and write the write +			 * command +			 */ +			command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  | +			           (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | +			           (hw->phy.mdio.prtad << +				    IXGBE_MSCA_PHY_ADDR_SHIFT) | +			           (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); + +			IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + +			/* +			 * Check every 10 usec to see if the address cycle +			 * completed. The MDI Command bit will clear when the +			 * operation is complete +			 */ +			for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { +				udelay(10); + +				command = IXGBE_READ_REG(hw, IXGBE_MSCA); + +				if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) +					break; +			} + +			if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { +				hw_dbg(hw, "PHY address cmd didn't complete\n"); +				status = IXGBE_ERR_PHY; +			} +		} + +		hw->mac.ops.release_swfw_sync(hw, gssr); +	} + +	return status; +} + +/** + *  ixgbe_setup_phy_link_generic - Set and restart autoneg + *  @hw: pointer to hardware structure + * + *  Restart autonegotiation and PHY and waits for completion. + **/ +s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) +{ +	s32 status = 0; +	u32 time_out; +	u32 max_time_out = 10; +	u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; +	bool autoneg = false; +	ixgbe_link_speed speed; + +	ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); + +	if (speed & IXGBE_LINK_SPEED_10GB_FULL) { +		/* Set or unset auto-negotiation 10G advertisement */ +		hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, +				     MDIO_MMD_AN, +				     &autoneg_reg); + +		autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; +		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) +			autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; + +		hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, +				      MDIO_MMD_AN, +				      autoneg_reg); +	} + +	if (speed & IXGBE_LINK_SPEED_1GB_FULL) { +		/* Set or unset auto-negotiation 1G advertisement */ +		hw->phy.ops.read_reg(hw, +				     IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, +				     MDIO_MMD_AN, +				     &autoneg_reg); + +		autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; +		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) +			autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; + +		hw->phy.ops.write_reg(hw, +				      IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, +				      MDIO_MMD_AN, +				      autoneg_reg); +	} + +	if (speed & IXGBE_LINK_SPEED_100_FULL) { +		/* Set or unset auto-negotiation 100M advertisement */ +		hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, +				     MDIO_MMD_AN, +				     &autoneg_reg); + +		autoneg_reg &= ~(ADVERTISE_100FULL | +				 ADVERTISE_100HALF); +		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) +			autoneg_reg |= ADVERTISE_100FULL; + +		hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, +				      MDIO_MMD_AN, +				      autoneg_reg); +	} + +	/* Restart PHY autonegotiation and wait for completion */ +	hw->phy.ops.read_reg(hw, MDIO_CTRL1, +			     MDIO_MMD_AN, &autoneg_reg); + +	autoneg_reg |= MDIO_AN_CTRL1_RESTART; + +	hw->phy.ops.write_reg(hw, MDIO_CTRL1, +			      MDIO_MMD_AN, autoneg_reg); + +	/* Wait for autonegotiation to finish */ +	for (time_out = 0; time_out < max_time_out; time_out++) { +		udelay(10); +		/* Restart PHY autonegotiation and wait for completion */ +		status = hw->phy.ops.read_reg(hw, MDIO_STAT1, +					      MDIO_MMD_AN, +					      &autoneg_reg); + +		autoneg_reg &= MDIO_AN_STAT1_COMPLETE; +		if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) { +			break; +		} +	} + +	if (time_out == max_time_out) { +		status = IXGBE_ERR_LINK_SETUP; +		hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out"); +	} + +	return status; +} + +/** + *  ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities + *  @hw: pointer to hardware structure + *  @speed: new link speed + *  @autoneg: true if autonegotiation enabled + **/ +s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, +                                       ixgbe_link_speed speed, +                                       bool autoneg, +                                       bool autoneg_wait_to_complete) +{ + +	/* +	 * Clear autoneg_advertised and set new values based on input link +	 * speed. +	 */ +	hw->phy.autoneg_advertised = 0; + +	if (speed & IXGBE_LINK_SPEED_10GB_FULL) +		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + +	if (speed & IXGBE_LINK_SPEED_1GB_FULL) +		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + +	if (speed & IXGBE_LINK_SPEED_100_FULL) +		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; + +	/* Setup link based on the new speed settings */ +	hw->phy.ops.setup_link(hw); + +	return 0; +} + +/** + * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + * + * Determines the link capabilities by reading the AUTOC register. + */ +s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, +                                               ixgbe_link_speed *speed, +                                               bool *autoneg) +{ +	s32 status = IXGBE_ERR_LINK_SETUP; +	u16 speed_ability; + +	*speed = 0; +	*autoneg = true; + +	status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, +	                              &speed_ability); + +	if (status == 0) { +		if (speed_ability & MDIO_SPEED_10G) +			*speed |= IXGBE_LINK_SPEED_10GB_FULL; +		if (speed_ability & MDIO_PMA_SPEED_1000) +			*speed |= IXGBE_LINK_SPEED_1GB_FULL; +		if (speed_ability & MDIO_PMA_SPEED_100) +			*speed |= IXGBE_LINK_SPEED_100_FULL; +	} + +	return status; +} + +/** + *  ixgbe_check_phy_link_tnx - Determine link and speed status + *  @hw: pointer to hardware structure + * + *  Reads the VS1 register to determine if link is up and the current speed for + *  the PHY. + **/ +s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, +			     bool *link_up) +{ +	s32 status = 0; +	u32 time_out; +	u32 max_time_out = 10; +	u16 phy_link = 0; +	u16 phy_speed = 0; +	u16 phy_data = 0; + +	/* Initialize speed and link to default case */ +	*link_up = false; +	*speed = IXGBE_LINK_SPEED_10GB_FULL; + +	/* +	 * Check current speed and link status of the PHY register. +	 * This is a vendor specific register and may have to +	 * be changed for other copper PHYs. +	 */ +	for (time_out = 0; time_out < max_time_out; time_out++) { +		udelay(10); +		status = hw->phy.ops.read_reg(hw, +					      MDIO_STAT1, +					      MDIO_MMD_VEND1, +					      &phy_data); +		phy_link = phy_data & +			    IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; +		phy_speed = phy_data & +			    IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; +		if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { +			*link_up = true; +			if (phy_speed == +			    IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) +				*speed = IXGBE_LINK_SPEED_1GB_FULL; +			break; +		} +	} + +	return status; +} + +/** + *	ixgbe_setup_phy_link_tnx - Set and restart autoneg + *	@hw: pointer to hardware structure + * + *	Restart autonegotiation and PHY and waits for completion. + **/ +s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) +{ +	s32 status = 0; +	u32 time_out; +	u32 max_time_out = 10; +	u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; +	bool autoneg = false; +	ixgbe_link_speed speed; + +	ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); + +	if (speed & IXGBE_LINK_SPEED_10GB_FULL) { +		/* Set or unset auto-negotiation 10G advertisement */ +		hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, +				     MDIO_MMD_AN, +				     &autoneg_reg); + +		autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; +		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) +			autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; + +		hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, +				      MDIO_MMD_AN, +				      autoneg_reg); +	} + +	if (speed & IXGBE_LINK_SPEED_1GB_FULL) { +		/* Set or unset auto-negotiation 1G advertisement */ +		hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, +				     MDIO_MMD_AN, +				     &autoneg_reg); + +		autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; +		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) +			autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; + +		hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, +				      MDIO_MMD_AN, +				      autoneg_reg); +	} + +	if (speed & IXGBE_LINK_SPEED_100_FULL) { +		/* Set or unset auto-negotiation 100M advertisement */ +		hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, +				     MDIO_MMD_AN, +				     &autoneg_reg); + +		autoneg_reg &= ~(ADVERTISE_100FULL | +				 ADVERTISE_100HALF); +		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) +			autoneg_reg |= ADVERTISE_100FULL; + +		hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, +				      MDIO_MMD_AN, +				      autoneg_reg); +	} + +	/* Restart PHY autonegotiation and wait for completion */ +	hw->phy.ops.read_reg(hw, MDIO_CTRL1, +			     MDIO_MMD_AN, &autoneg_reg); + +	autoneg_reg |= MDIO_AN_CTRL1_RESTART; + +	hw->phy.ops.write_reg(hw, MDIO_CTRL1, +			      MDIO_MMD_AN, autoneg_reg); + +	/* Wait for autonegotiation to finish */ +	for (time_out = 0; time_out < max_time_out; time_out++) { +		udelay(10); +		/* Restart PHY autonegotiation and wait for completion */ +		status = hw->phy.ops.read_reg(hw, MDIO_STAT1, +					      MDIO_MMD_AN, +					      &autoneg_reg); + +		autoneg_reg &= MDIO_AN_STAT1_COMPLETE; +		if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) +			break; +	} + +	if (time_out == max_time_out) { +		status = IXGBE_ERR_LINK_SETUP; +		hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out"); +	} + +	return status; +} + +/** + *  ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version + *  @hw: pointer to hardware structure + *  @firmware_version: pointer to the PHY Firmware Version + **/ +s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, +				       u16 *firmware_version) +{ +	s32 status = 0; + +	status = hw->phy.ops.read_reg(hw, TNX_FW_REV, +				      MDIO_MMD_VEND1, +				      firmware_version); + +	return status; +} + +/** + *  ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version + *  @hw: pointer to hardware structure + *  @firmware_version: pointer to the PHY Firmware Version + **/ +s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, +					   u16 *firmware_version) +{ +	s32 status = 0; + +	status = hw->phy.ops.read_reg(hw, AQ_FW_REV, +				      MDIO_MMD_VEND1, +				      firmware_version); + +	return status; +} + +/** + *  ixgbe_reset_phy_nl - Performs a PHY reset + *  @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) +{ +	u16 phy_offset, control, eword, edata, block_crc; +	bool end_data = false; +	u16 list_offset, data_offset; +	u16 phy_data = 0; +	s32 ret_val = 0; +	u32 i; + +	hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data); + +	/* reset the PHY and poll for completion */ +	hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, +	                      (phy_data | MDIO_CTRL1_RESET)); + +	for (i = 0; i < 100; i++) { +		hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, +		                     &phy_data); +		if ((phy_data & MDIO_CTRL1_RESET) == 0) +			break; +		usleep_range(10000, 20000); +	} + +	if ((phy_data & MDIO_CTRL1_RESET) != 0) { +		hw_dbg(hw, "PHY reset did not complete.\n"); +		ret_val = IXGBE_ERR_PHY; +		goto out; +	} + +	/* Get init offsets */ +	ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, +	                                              &data_offset); +	if (ret_val != 0) +		goto out; + +	ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc); +	data_offset++; +	while (!end_data) { +		/* +		 * Read control word from PHY init contents offset +		 */ +		ret_val = hw->eeprom.ops.read(hw, data_offset, &eword); +		control = (eword & IXGBE_CONTROL_MASK_NL) >> +		           IXGBE_CONTROL_SHIFT_NL; +		edata = eword & IXGBE_DATA_MASK_NL; +		switch (control) { +		case IXGBE_DELAY_NL: +			data_offset++; +			hw_dbg(hw, "DELAY: %d MS\n", edata); +			usleep_range(edata * 1000, edata * 2000); +			break; +		case IXGBE_DATA_NL: +			hw_dbg(hw, "DATA:\n"); +			data_offset++; +			hw->eeprom.ops.read(hw, data_offset++, +			                    &phy_offset); +			for (i = 0; i < edata; i++) { +				hw->eeprom.ops.read(hw, data_offset, &eword); +				hw->phy.ops.write_reg(hw, phy_offset, +				                      MDIO_MMD_PMAPMD, eword); +				hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword, +				       phy_offset); +				data_offset++; +				phy_offset++; +			} +			break; +		case IXGBE_CONTROL_NL: +			data_offset++; +			hw_dbg(hw, "CONTROL:\n"); +			if (edata == IXGBE_CONTROL_EOL_NL) { +				hw_dbg(hw, "EOL\n"); +				end_data = true; +			} else if (edata == IXGBE_CONTROL_SOL_NL) { +				hw_dbg(hw, "SOL\n"); +			} else { +				hw_dbg(hw, "Bad control value\n"); +				ret_val = IXGBE_ERR_PHY; +				goto out; +			} +			break; +		default: +			hw_dbg(hw, "Bad control type\n"); +			ret_val = IXGBE_ERR_PHY; +			goto out; +		} +	} + +out: +	return ret_val; +} + +/** + *  ixgbe_identify_sfp_module_generic - Identifies SFP modules + *  @hw: pointer to hardware structure + * + *  Searches for and identifies the SFP module and assigns appropriate PHY type. + **/ +s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) +{ +	s32 status = IXGBE_ERR_PHY_ADDR_INVALID; +	u32 vendor_oui = 0; +	enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; +	u8 identifier = 0; +	u8 comp_codes_1g = 0; +	u8 comp_codes_10g = 0; +	u8 oui_bytes[3] = {0, 0, 0}; +	u8 cable_tech = 0; +	u8 cable_spec = 0; +	u16 enforce_sfp = 0; + +	if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) { +		hw->phy.sfp_type = ixgbe_sfp_type_not_present; +		status = IXGBE_ERR_SFP_NOT_PRESENT; +		goto out; +	} + +	status = hw->phy.ops.read_i2c_eeprom(hw, +					     IXGBE_SFF_IDENTIFIER, +	                                     &identifier); + +	if (status == IXGBE_ERR_SWFW_SYNC || +	    status == IXGBE_ERR_I2C || +	    status == IXGBE_ERR_SFP_NOT_PRESENT) +		goto err_read_i2c_eeprom; + +	/* LAN ID is needed for sfp_type determination */ +	hw->mac.ops.set_lan_id(hw); + +	if (identifier != IXGBE_SFF_IDENTIFIER_SFP) { +		hw->phy.type = ixgbe_phy_sfp_unsupported; +		status = IXGBE_ERR_SFP_NOT_SUPPORTED; +	} else { +		status = hw->phy.ops.read_i2c_eeprom(hw, +						     IXGBE_SFF_1GBE_COMP_CODES, +						     &comp_codes_1g); + +		if (status == IXGBE_ERR_SWFW_SYNC || +		    status == IXGBE_ERR_I2C || +		    status == IXGBE_ERR_SFP_NOT_PRESENT) +			goto err_read_i2c_eeprom; + +		status = hw->phy.ops.read_i2c_eeprom(hw, +						     IXGBE_SFF_10GBE_COMP_CODES, +						     &comp_codes_10g); + +		if (status == IXGBE_ERR_SWFW_SYNC || +		    status == IXGBE_ERR_I2C || +		    status == IXGBE_ERR_SFP_NOT_PRESENT) +			goto err_read_i2c_eeprom; +		status = hw->phy.ops.read_i2c_eeprom(hw, +						     IXGBE_SFF_CABLE_TECHNOLOGY, +						     &cable_tech); + +		if (status == IXGBE_ERR_SWFW_SYNC || +		    status == IXGBE_ERR_I2C || +		    status == IXGBE_ERR_SFP_NOT_PRESENT) +			goto err_read_i2c_eeprom; + +		 /* ID Module +		  * ========= +		  * 0   SFP_DA_CU +		  * 1   SFP_SR +		  * 2   SFP_LR +		  * 3   SFP_DA_CORE0 - 82599-specific +		  * 4   SFP_DA_CORE1 - 82599-specific +		  * 5   SFP_SR/LR_CORE0 - 82599-specific +		  * 6   SFP_SR/LR_CORE1 - 82599-specific +		  * 7   SFP_act_lmt_DA_CORE0 - 82599-specific +		  * 8   SFP_act_lmt_DA_CORE1 - 82599-specific +		  * 9   SFP_1g_cu_CORE0 - 82599-specific +		  * 10  SFP_1g_cu_CORE1 - 82599-specific +		  */ +		if (hw->mac.type == ixgbe_mac_82598EB) { +			if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) +				hw->phy.sfp_type = ixgbe_sfp_type_da_cu; +			else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) +				hw->phy.sfp_type = ixgbe_sfp_type_sr; +			else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) +				hw->phy.sfp_type = ixgbe_sfp_type_lr; +			else +				hw->phy.sfp_type = ixgbe_sfp_type_unknown; +		} else if (hw->mac.type == ixgbe_mac_82599EB) { +			if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { +				if (hw->bus.lan_id == 0) +					hw->phy.sfp_type = +					             ixgbe_sfp_type_da_cu_core0; +				else +					hw->phy.sfp_type = +					             ixgbe_sfp_type_da_cu_core1; +			} else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) { +				hw->phy.ops.read_i2c_eeprom( +						hw, IXGBE_SFF_CABLE_SPEC_COMP, +						&cable_spec); +				if (cable_spec & +				    IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) { +					if (hw->bus.lan_id == 0) +						hw->phy.sfp_type = +						ixgbe_sfp_type_da_act_lmt_core0; +					else +						hw->phy.sfp_type = +						ixgbe_sfp_type_da_act_lmt_core1; +				} else { +					hw->phy.sfp_type = +							ixgbe_sfp_type_unknown; +				} +			} else if (comp_codes_10g & +				   (IXGBE_SFF_10GBASESR_CAPABLE | +				    IXGBE_SFF_10GBASELR_CAPABLE)) { +				if (hw->bus.lan_id == 0) +					hw->phy.sfp_type = +					              ixgbe_sfp_type_srlr_core0; +				else +					hw->phy.sfp_type = +					              ixgbe_sfp_type_srlr_core1; +			} else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) { +				if (hw->bus.lan_id == 0) +					hw->phy.sfp_type = +						ixgbe_sfp_type_1g_cu_core0; +				else +					hw->phy.sfp_type = +						ixgbe_sfp_type_1g_cu_core1; +			} else { +				hw->phy.sfp_type = ixgbe_sfp_type_unknown; +			} +		} + +		if (hw->phy.sfp_type != stored_sfp_type) +			hw->phy.sfp_setup_needed = true; + +		/* Determine if the SFP+ PHY is dual speed or not. */ +		hw->phy.multispeed_fiber = false; +		if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && +		   (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || +		   ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && +		   (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) +			hw->phy.multispeed_fiber = true; + +		/* Determine PHY vendor */ +		if (hw->phy.type != ixgbe_phy_nl) { +			hw->phy.id = identifier; +			status = hw->phy.ops.read_i2c_eeprom(hw, +			                            IXGBE_SFF_VENDOR_OUI_BYTE0, +			                            &oui_bytes[0]); + +			if (status == IXGBE_ERR_SWFW_SYNC || +			    status == IXGBE_ERR_I2C || +			    status == IXGBE_ERR_SFP_NOT_PRESENT) +				goto err_read_i2c_eeprom; + +			status = hw->phy.ops.read_i2c_eeprom(hw, +			                            IXGBE_SFF_VENDOR_OUI_BYTE1, +			                            &oui_bytes[1]); + +			if (status == IXGBE_ERR_SWFW_SYNC || +			    status == IXGBE_ERR_I2C || +			    status == IXGBE_ERR_SFP_NOT_PRESENT) +				goto err_read_i2c_eeprom; + +			status = hw->phy.ops.read_i2c_eeprom(hw, +			                            IXGBE_SFF_VENDOR_OUI_BYTE2, +			                            &oui_bytes[2]); + +			if (status == IXGBE_ERR_SWFW_SYNC || +			    status == IXGBE_ERR_I2C || +			    status == IXGBE_ERR_SFP_NOT_PRESENT) +				goto err_read_i2c_eeprom; + +			vendor_oui = +			  ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | +			   (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | +			   (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); + +			switch (vendor_oui) { +			case IXGBE_SFF_VENDOR_OUI_TYCO: +				if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) +					hw->phy.type = +						    ixgbe_phy_sfp_passive_tyco; +				break; +			case IXGBE_SFF_VENDOR_OUI_FTL: +				if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) +					hw->phy.type = ixgbe_phy_sfp_ftl_active; +				else +					hw->phy.type = ixgbe_phy_sfp_ftl; +				break; +			case IXGBE_SFF_VENDOR_OUI_AVAGO: +				hw->phy.type = ixgbe_phy_sfp_avago; +				break; +			case IXGBE_SFF_VENDOR_OUI_INTEL: +				hw->phy.type = ixgbe_phy_sfp_intel; +				break; +			default: +				if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) +					hw->phy.type = +						 ixgbe_phy_sfp_passive_unknown; +				else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) +					hw->phy.type = +						ixgbe_phy_sfp_active_unknown; +				else +					hw->phy.type = ixgbe_phy_sfp_unknown; +				break; +			} +		} + +		/* Allow any DA cable vendor */ +		if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | +		    IXGBE_SFF_DA_ACTIVE_CABLE)) { +			status = 0; +			goto out; +		} + +		/* Verify supported 1G SFP modules */ +		if (comp_codes_10g == 0 && +		    !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || +		      hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) { +			hw->phy.type = ixgbe_phy_sfp_unsupported; +			status = IXGBE_ERR_SFP_NOT_SUPPORTED; +			goto out; +		} + +		/* Anything else 82598-based is supported */ +		if (hw->mac.type == ixgbe_mac_82598EB) { +			status = 0; +			goto out; +		} + +		hw->mac.ops.get_device_caps(hw, &enforce_sfp); +		if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && +		    !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) || +		      (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) { +			/* Make sure we're a supported PHY type */ +			if (hw->phy.type == ixgbe_phy_sfp_intel) { +				status = 0; +			} else { +				hw_dbg(hw, "SFP+ module not supported\n"); +				hw->phy.type = ixgbe_phy_sfp_unsupported; +				status = IXGBE_ERR_SFP_NOT_SUPPORTED; +			} +		} else { +			status = 0; +		} +	} + +out: +	return status; + +err_read_i2c_eeprom: +	hw->phy.sfp_type = ixgbe_sfp_type_not_present; +	if (hw->phy.type != ixgbe_phy_nl) { +		hw->phy.id = 0; +		hw->phy.type = ixgbe_phy_unknown; +	} +	return IXGBE_ERR_SFP_NOT_PRESENT; +} + +/** + *  ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence + *  @hw: pointer to hardware structure + *  @list_offset: offset to the SFP ID list + *  @data_offset: offset to the SFP data block + * + *  Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if + *  so it returns the offsets to the phy init sequence block. + **/ +s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, +                                        u16 *list_offset, +                                        u16 *data_offset) +{ +	u16 sfp_id; +	u16 sfp_type = hw->phy.sfp_type; + +	if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) +		return IXGBE_ERR_SFP_NOT_SUPPORTED; + +	if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) +		return IXGBE_ERR_SFP_NOT_PRESENT; + +	if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) && +	    (hw->phy.sfp_type == ixgbe_sfp_type_da_cu)) +		return IXGBE_ERR_SFP_NOT_SUPPORTED; + +	/* +	 * Limiting active cables and 1G Phys must be initialized as +	 * SR modules +	 */ +	if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 || +	    sfp_type == ixgbe_sfp_type_1g_cu_core0) +		sfp_type = ixgbe_sfp_type_srlr_core0; +	else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 || +	         sfp_type == ixgbe_sfp_type_1g_cu_core1) +		sfp_type = ixgbe_sfp_type_srlr_core1; + +	/* Read offset to PHY init contents */ +	hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset); + +	if ((!*list_offset) || (*list_offset == 0xFFFF)) +		return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; + +	/* Shift offset to first ID word */ +	(*list_offset)++; + +	/* +	 * Find the matching SFP ID in the EEPROM +	 * and program the init sequence +	 */ +	hw->eeprom.ops.read(hw, *list_offset, &sfp_id); + +	while (sfp_id != IXGBE_PHY_INIT_END_NL) { +		if (sfp_id == sfp_type) { +			(*list_offset)++; +			hw->eeprom.ops.read(hw, *list_offset, data_offset); +			if ((!*data_offset) || (*data_offset == 0xFFFF)) { +				hw_dbg(hw, "SFP+ module not supported\n"); +				return IXGBE_ERR_SFP_NOT_SUPPORTED; +			} else { +				break; +			} +		} else { +			(*list_offset) += 2; +			if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) +				return IXGBE_ERR_PHY; +		} +	} + +	if (sfp_id == IXGBE_PHY_INIT_END_NL) { +		hw_dbg(hw, "No matching SFP+ module found\n"); +		return IXGBE_ERR_SFP_NOT_SUPPORTED; +	} + +	return 0; +} + +/** + *  ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface + *  @hw: pointer to hardware structure + *  @byte_offset: EEPROM byte offset to read + *  @eeprom_data: value read + * + *  Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, +                                  u8 *eeprom_data) +{ +	return hw->phy.ops.read_i2c_byte(hw, byte_offset, +	                                 IXGBE_I2C_EEPROM_DEV_ADDR, +	                                 eeprom_data); +} + +/** + *  ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface + *  @hw: pointer to hardware structure + *  @byte_offset: EEPROM byte offset to write + *  @eeprom_data: value to write + * + *  Performs byte write operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, +                                   u8 eeprom_data) +{ +	return hw->phy.ops.write_i2c_byte(hw, byte_offset, +	                                  IXGBE_I2C_EEPROM_DEV_ADDR, +	                                  eeprom_data); +} + +/** + *  ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C + *  @hw: pointer to hardware structure + *  @byte_offset: byte offset to read + *  @data: value read + * + *  Performs byte read operation to SFP module's EEPROM over I2C interface at + *  a specified deivce address. + **/ +s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, +                                u8 dev_addr, u8 *data) +{ +	s32 status = 0; +	u32 max_retry = 10; +	u32 retry = 0; +	u16 swfw_mask = 0; +	bool nack = 1; + +	if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) +		swfw_mask = IXGBE_GSSR_PHY1_SM; +	else +		swfw_mask = IXGBE_GSSR_PHY0_SM; + +	do { +		if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) { +			status = IXGBE_ERR_SWFW_SYNC; +			goto read_byte_out; +		} + +		ixgbe_i2c_start(hw); + +		/* Device Address and write indication */ +		status = ixgbe_clock_out_i2c_byte(hw, dev_addr); +		if (status != 0) +			goto fail; + +		status = ixgbe_get_i2c_ack(hw); +		if (status != 0) +			goto fail; + +		status = ixgbe_clock_out_i2c_byte(hw, byte_offset); +		if (status != 0) +			goto fail; + +		status = ixgbe_get_i2c_ack(hw); +		if (status != 0) +			goto fail; + +		ixgbe_i2c_start(hw); + +		/* Device Address and read indication */ +		status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1)); +		if (status != 0) +			goto fail; + +		status = ixgbe_get_i2c_ack(hw); +		if (status != 0) +			goto fail; + +		status = ixgbe_clock_in_i2c_byte(hw, data); +		if (status != 0) +			goto fail; + +		status = ixgbe_clock_out_i2c_bit(hw, nack); +		if (status != 0) +			goto fail; + +		ixgbe_i2c_stop(hw); +		break; + +fail: +		hw->mac.ops.release_swfw_sync(hw, swfw_mask); +		msleep(100); +		ixgbe_i2c_bus_clear(hw); +		retry++; +		if (retry < max_retry) +			hw_dbg(hw, "I2C byte read error - Retrying.\n"); +		else +			hw_dbg(hw, "I2C byte read error.\n"); + +	} while (retry < max_retry); + +	hw->mac.ops.release_swfw_sync(hw, swfw_mask); + +read_byte_out: +	return status; +} + +/** + *  ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C + *  @hw: pointer to hardware structure + *  @byte_offset: byte offset to write + *  @data: value to write + * + *  Performs byte write operation to SFP module's EEPROM over I2C interface at + *  a specified device address. + **/ +s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, +                                 u8 dev_addr, u8 data) +{ +	s32 status = 0; +	u32 max_retry = 1; +	u32 retry = 0; +	u16 swfw_mask = 0; + +	if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) +		swfw_mask = IXGBE_GSSR_PHY1_SM; +	else +		swfw_mask = IXGBE_GSSR_PHY0_SM; + +	if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) { +		status = IXGBE_ERR_SWFW_SYNC; +		goto write_byte_out; +	} + +	do { +		ixgbe_i2c_start(hw); + +		status = ixgbe_clock_out_i2c_byte(hw, dev_addr); +		if (status != 0) +			goto fail; + +		status = ixgbe_get_i2c_ack(hw); +		if (status != 0) +			goto fail; + +		status = ixgbe_clock_out_i2c_byte(hw, byte_offset); +		if (status != 0) +			goto fail; + +		status = ixgbe_get_i2c_ack(hw); +		if (status != 0) +			goto fail; + +		status = ixgbe_clock_out_i2c_byte(hw, data); +		if (status != 0) +			goto fail; + +		status = ixgbe_get_i2c_ack(hw); +		if (status != 0) +			goto fail; + +		ixgbe_i2c_stop(hw); +		break; + +fail: +		ixgbe_i2c_bus_clear(hw); +		retry++; +		if (retry < max_retry) +			hw_dbg(hw, "I2C byte write error - Retrying.\n"); +		else +			hw_dbg(hw, "I2C byte write error.\n"); +	} while (retry < max_retry); + +	hw->mac.ops.release_swfw_sync(hw, swfw_mask); + +write_byte_out: +	return status; +} + +/** + *  ixgbe_i2c_start - Sets I2C start condition + *  @hw: pointer to hardware structure + * + *  Sets I2C start condition (High -> Low on SDA while SCL is High) + **/ +static void ixgbe_i2c_start(struct ixgbe_hw *hw) +{ +	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + +	/* Start condition must begin with data and clock high */ +	ixgbe_set_i2c_data(hw, &i2cctl, 1); +	ixgbe_raise_i2c_clk(hw, &i2cctl); + +	/* Setup time for start condition (4.7us) */ +	udelay(IXGBE_I2C_T_SU_STA); + +	ixgbe_set_i2c_data(hw, &i2cctl, 0); + +	/* Hold time for start condition (4us) */ +	udelay(IXGBE_I2C_T_HD_STA); + +	ixgbe_lower_i2c_clk(hw, &i2cctl); + +	/* Minimum low period of clock is 4.7 us */ +	udelay(IXGBE_I2C_T_LOW); + +} + +/** + *  ixgbe_i2c_stop - Sets I2C stop condition + *  @hw: pointer to hardware structure + * + *  Sets I2C stop condition (Low -> High on SDA while SCL is High) + **/ +static void ixgbe_i2c_stop(struct ixgbe_hw *hw) +{ +	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + +	/* Stop condition must begin with data low and clock high */ +	ixgbe_set_i2c_data(hw, &i2cctl, 0); +	ixgbe_raise_i2c_clk(hw, &i2cctl); + +	/* Setup time for stop condition (4us) */ +	udelay(IXGBE_I2C_T_SU_STO); + +	ixgbe_set_i2c_data(hw, &i2cctl, 1); + +	/* bus free time between stop and start (4.7us)*/ +	udelay(IXGBE_I2C_T_BUF); +} + +/** + *  ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C + *  @hw: pointer to hardware structure + *  @data: data byte to clock in + * + *  Clocks in one byte data via I2C data/clock + **/ +static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) +{ +	s32 status = 0; +	s32 i; +	bool bit = 0; + +	for (i = 7; i >= 0; i--) { +		status = ixgbe_clock_in_i2c_bit(hw, &bit); +		*data |= bit << i; + +		if (status != 0) +			break; +	} + +	return status; +} + +/** + *  ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C + *  @hw: pointer to hardware structure + *  @data: data byte clocked out + * + *  Clocks out one byte data via I2C data/clock + **/ +static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) +{ +	s32 status = 0; +	s32 i; +	u32 i2cctl; +	bool bit = 0; + +	for (i = 7; i >= 0; i--) { +		bit = (data >> i) & 0x1; +		status = ixgbe_clock_out_i2c_bit(hw, bit); + +		if (status != 0) +			break; +	} + +	/* Release SDA line (set high) */ +	i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); +	i2cctl |= IXGBE_I2C_DATA_OUT; +	IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl); + +	return status; +} + +/** + *  ixgbe_get_i2c_ack - Polls for I2C ACK + *  @hw: pointer to hardware structure + * + *  Clocks in/out one bit via I2C data/clock + **/ +static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) +{ +	s32 status; +	u32 i = 0; +	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); +	u32 timeout = 10; +	bool ack = 1; + +	status = ixgbe_raise_i2c_clk(hw, &i2cctl); + +	if (status != 0) +		goto out; + +	/* Minimum high period of clock is 4us */ +	udelay(IXGBE_I2C_T_HIGH); + +	/* Poll for ACK.  Note that ACK in I2C spec is +	 * transition from 1 to 0 */ +	for (i = 0; i < timeout; i++) { +		i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); +		ack = ixgbe_get_i2c_data(&i2cctl); + +		udelay(1); +		if (ack == 0) +			break; +	} + +	if (ack == 1) { +		hw_dbg(hw, "I2C ack was not received.\n"); +		status = IXGBE_ERR_I2C; +	} + +	ixgbe_lower_i2c_clk(hw, &i2cctl); + +	/* Minimum low period of clock is 4.7 us */ +	udelay(IXGBE_I2C_T_LOW); + +out: +	return status; +} + +/** + *  ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock + *  @hw: pointer to hardware structure + *  @data: read data value + * + *  Clocks in one bit via I2C data/clock + **/ +static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) +{ +	s32 status; +	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + +	status = ixgbe_raise_i2c_clk(hw, &i2cctl); + +	/* Minimum high period of clock is 4us */ +	udelay(IXGBE_I2C_T_HIGH); + +	i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); +	*data = ixgbe_get_i2c_data(&i2cctl); + +	ixgbe_lower_i2c_clk(hw, &i2cctl); + +	/* Minimum low period of clock is 4.7 us */ +	udelay(IXGBE_I2C_T_LOW); + +	return status; +} + +/** + *  ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock + *  @hw: pointer to hardware structure + *  @data: data value to write + * + *  Clocks out one bit via I2C data/clock + **/ +static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) +{ +	s32 status; +	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + +	status = ixgbe_set_i2c_data(hw, &i2cctl, data); +	if (status == 0) { +		status = ixgbe_raise_i2c_clk(hw, &i2cctl); + +		/* Minimum high period of clock is 4us */ +		udelay(IXGBE_I2C_T_HIGH); + +		ixgbe_lower_i2c_clk(hw, &i2cctl); + +		/* Minimum low period of clock is 4.7 us. +		 * This also takes care of the data hold time. +		 */ +		udelay(IXGBE_I2C_T_LOW); +	} else { +		status = IXGBE_ERR_I2C; +		hw_dbg(hw, "I2C data was not set to %X\n", data); +	} + +	return status; +} +/** + *  ixgbe_raise_i2c_clk - Raises the I2C SCL clock + *  @hw: pointer to hardware structure + *  @i2cctl: Current value of I2CCTL register + * + *  Raises the I2C clock line '0'->'1' + **/ +static s32 ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) +{ +	s32 status = 0; + +	*i2cctl |= IXGBE_I2C_CLK_OUT; + +	IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); +	IXGBE_WRITE_FLUSH(hw); + +	/* SCL rise time (1000ns) */ +	udelay(IXGBE_I2C_T_RISE); + +	return status; +} + +/** + *  ixgbe_lower_i2c_clk - Lowers the I2C SCL clock + *  @hw: pointer to hardware structure + *  @i2cctl: Current value of I2CCTL register + * + *  Lowers the I2C clock line '1'->'0' + **/ +static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) +{ + +	*i2cctl &= ~IXGBE_I2C_CLK_OUT; + +	IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); +	IXGBE_WRITE_FLUSH(hw); + +	/* SCL fall time (300ns) */ +	udelay(IXGBE_I2C_T_FALL); +} + +/** + *  ixgbe_set_i2c_data - Sets the I2C data bit + *  @hw: pointer to hardware structure + *  @i2cctl: Current value of I2CCTL register + *  @data: I2C data value (0 or 1) to set + * + *  Sets the I2C data bit + **/ +static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) +{ +	s32 status = 0; + +	if (data) +		*i2cctl |= IXGBE_I2C_DATA_OUT; +	else +		*i2cctl &= ~IXGBE_I2C_DATA_OUT; + +	IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); +	IXGBE_WRITE_FLUSH(hw); + +	/* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ +	udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); + +	/* Verify data was set correctly */ +	*i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); +	if (data != ixgbe_get_i2c_data(i2cctl)) { +		status = IXGBE_ERR_I2C; +		hw_dbg(hw, "Error - I2C data was not set to %X.\n", data); +	} + +	return status; +} + +/** + *  ixgbe_get_i2c_data - Reads the I2C SDA data bit + *  @hw: pointer to hardware structure + *  @i2cctl: Current value of I2CCTL register + * + *  Returns the I2C data bit value + **/ +static bool ixgbe_get_i2c_data(u32 *i2cctl) +{ +	bool data; + +	if (*i2cctl & IXGBE_I2C_DATA_IN) +		data = 1; +	else +		data = 0; + +	return data; +} + +/** + *  ixgbe_i2c_bus_clear - Clears the I2C bus + *  @hw: pointer to hardware structure + * + *  Clears the I2C bus by sending nine clock pulses. + *  Used when data line is stuck low. + **/ +static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) +{ +	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); +	u32 i; + +	ixgbe_i2c_start(hw); + +	ixgbe_set_i2c_data(hw, &i2cctl, 1); + +	for (i = 0; i < 9; i++) { +		ixgbe_raise_i2c_clk(hw, &i2cctl); + +		/* Min high period of clock is 4us */ +		udelay(IXGBE_I2C_T_HIGH); + +		ixgbe_lower_i2c_clk(hw, &i2cctl); + +		/* Min low period of clock is 4.7us*/ +		udelay(IXGBE_I2C_T_LOW); +	} + +	ixgbe_i2c_start(hw); + +	/* Put the i2c bus back to default state */ +	ixgbe_i2c_stop(hw); +} + +/** + *  ixgbe_tn_check_overtemp - Checks if an overtemp occurred. + *  @hw: pointer to hardware structure + * + *  Checks if the LASI temp alarm status was triggered due to overtemp + **/ +s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) +{ +	s32 status = 0; +	u16 phy_data = 0; + +	if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM) +		goto out; + +	/* Check that the LASI temp alarm status was triggered */ +	hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, +	                     MDIO_MMD_PMAPMD, &phy_data); + +	if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) +		goto out; + +	status = IXGBE_ERR_OVERTEMP; +out: +	return status; +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h new file mode 100644 index 00000000000..197bdd13106 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -0,0 +1,131 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_PHY_H_ +#define _IXGBE_PHY_H_ + +#include "ixgbe_type.h" +#define IXGBE_I2C_EEPROM_DEV_ADDR    0xA0 + +/* EEPROM byte offsets */ +#define IXGBE_SFF_IDENTIFIER         0x0 +#define IXGBE_SFF_IDENTIFIER_SFP     0x3 +#define IXGBE_SFF_VENDOR_OUI_BYTE0   0x25 +#define IXGBE_SFF_VENDOR_OUI_BYTE1   0x26 +#define IXGBE_SFF_VENDOR_OUI_BYTE2   0x27 +#define IXGBE_SFF_1GBE_COMP_CODES    0x6 +#define IXGBE_SFF_10GBE_COMP_CODES   0x3 +#define IXGBE_SFF_CABLE_TECHNOLOGY   0x8 +#define IXGBE_SFF_CABLE_SPEC_COMP    0x3C + +/* Bitmasks */ +#define IXGBE_SFF_DA_PASSIVE_CABLE           0x4 +#define IXGBE_SFF_DA_ACTIVE_CABLE            0x8 +#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING    0x4 +#define IXGBE_SFF_1GBASESX_CAPABLE           0x1 +#define IXGBE_SFF_1GBASELX_CAPABLE           0x2 +#define IXGBE_SFF_1GBASET_CAPABLE            0x8 +#define IXGBE_SFF_10GBASESR_CAPABLE          0x10 +#define IXGBE_SFF_10GBASELR_CAPABLE          0x20 +#define IXGBE_I2C_EEPROM_READ_MASK           0x100 +#define IXGBE_I2C_EEPROM_STATUS_MASK         0x3 +#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 +#define IXGBE_I2C_EEPROM_STATUS_PASS         0x1 +#define IXGBE_I2C_EEPROM_STATUS_FAIL         0x2 +#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS  0x3 + +/* Flow control defines */ +#define IXGBE_TAF_SYM_PAUSE                  0x400 +#define IXGBE_TAF_ASM_PAUSE                  0x800 + +/* Bit-shift macros */ +#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT    24 +#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT    16 +#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT    8 + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define IXGBE_SFF_VENDOR_OUI_TYCO     0x00407600 +#define IXGBE_SFF_VENDOR_OUI_FTL      0x00906500 +#define IXGBE_SFF_VENDOR_OUI_AVAGO    0x00176A00 +#define IXGBE_SFF_VENDOR_OUI_INTEL    0x001B2100 + +/* I2C SDA and SCL timing parameters for standard mode */ +#define IXGBE_I2C_T_HD_STA  4 +#define IXGBE_I2C_T_LOW     5 +#define IXGBE_I2C_T_HIGH    4 +#define IXGBE_I2C_T_SU_STA  5 +#define IXGBE_I2C_T_HD_DATA 5 +#define IXGBE_I2C_T_SU_DATA 1 +#define IXGBE_I2C_T_RISE    1 +#define IXGBE_I2C_T_FALL    1 +#define IXGBE_I2C_T_SU_STO  4 +#define IXGBE_I2C_T_BUF     5 + +#define IXGBE_TN_LASI_STATUS_REG        0x9005 +#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 + +s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); +s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); +s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); +s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, +                               u32 device_type, u16 *phy_data); +s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, +                                u32 device_type, u16 phy_data); +s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); +s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, +                                       ixgbe_link_speed speed, +                                       bool autoneg, +                                       bool autoneg_wait_to_complete); +s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, +                                               ixgbe_link_speed *speed, +                                               bool *autoneg); + +/* PHY specific */ +s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, +                             ixgbe_link_speed *speed, +                             bool *link_up); +s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); +s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, +                                       u16 *firmware_version); +s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, +                                           u16 *firmware_version); + +s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); +s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); +s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, +                                        u16 *list_offset, +                                        u16 *data_offset); +s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); +s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, +                                u8 dev_addr, u8 *data); +s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, +                                 u8 dev_addr, u8 data); +s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, +                                  u8 *eeprom_data); +s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, +                                   u8 eeprom_data); +#endif /* _IXGBE_PHY_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c new file mode 100644 index 00000000000..d99d01e2132 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -0,0 +1,687 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/vmalloc.h> +#include <linux/string.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/ipv6.h> +#ifdef NETIF_F_HW_VLAN_TX +#include <linux/if_vlan.h> +#endif + +#include "ixgbe.h" + +#include "ixgbe_sriov.h" + +static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, +				   int entries, u16 *hash_list, u32 vf) +{ +	struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; +	struct ixgbe_hw *hw = &adapter->hw; +	int i; +	u32 vector_bit; +	u32 vector_reg; +	u32 mta_reg; + +	/* only so many hash values supported */ +	entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); + +	/* +	 * salt away the number of multi cast addresses assigned +	 * to this VF for later use to restore when the PF multi cast +	 * list changes +	 */ +	vfinfo->num_vf_mc_hashes = entries; + +	/* +	 * VFs are limited to using the MTA hash table for their multicast +	 * addresses +	 */ +	for (i = 0; i < entries; i++) { +		vfinfo->vf_mc_hashes[i] = hash_list[i]; +	} + +	for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { +		vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; +		vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; +		mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); +		mta_reg |= (1 << vector_bit); +		IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); +	} + +	return 0; +} + +static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	struct list_head *pos; +	struct vf_macvlans *entry; + +	list_for_each(pos, &adapter->vf_mvs.l) { +		entry = list_entry(pos, struct vf_macvlans, l); +		if (entry->free == false) +			hw->mac.ops.set_rar(hw, entry->rar_entry, +					    entry->vf_macvlan, +					    entry->vf, IXGBE_RAH_AV); +	} +} + +void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	struct vf_data_storage *vfinfo; +	int i, j; +	u32 vector_bit; +	u32 vector_reg; +	u32 mta_reg; + +	for (i = 0; i < adapter->num_vfs; i++) { +		vfinfo = &adapter->vfinfo[i]; +		for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { +			hw->addr_ctrl.mta_in_use++; +			vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; +			vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; +			mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); +			mta_reg |= (1 << vector_bit); +			IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); +		} +	} + +	/* Restore any VF macvlans */ +	ixgbe_restore_vf_macvlans(adapter); +} + +static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, +			     u32 vf) +{ +	return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); +} + +static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	int new_mtu = msgbuf[1]; +	u32 max_frs; +	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + +	/* Only X540 supports jumbo frames in IOV mode */ +	if (adapter->hw.mac.type != ixgbe_mac_X540) +		return; + +	/* MTU < 68 is an error and causes problems on some kernels */ +	if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) { +		e_err(drv, "VF mtu %d out of range\n", new_mtu); +		return; +	} + +	max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) & +		   IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; +	if (max_frs < new_mtu) { +		max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT; +		IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); +	} + +	e_info(hw, "VF requests change max MTU to %d\n", new_mtu); +} + +static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) +{ +	u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); +	vmolr |= (IXGBE_VMOLR_ROMPE | +		  IXGBE_VMOLR_BAM); +	if (aupe) +		vmolr |= IXGBE_VMOLR_AUPE; +	else +		vmolr &= ~IXGBE_VMOLR_AUPE; +	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); +} + +static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf) +{ +	struct ixgbe_hw *hw = &adapter->hw; + +	if (vid) +		IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), +				(vid | IXGBE_VMVIR_VLANA_DEFAULT)); +	else +		IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); +} + +static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	int rar_entry = hw->mac.num_rar_entries - (vf + 1); + +	/* reset offloads to defaults */ +	if (adapter->vfinfo[vf].pf_vlan) { +		ixgbe_set_vf_vlan(adapter, true, +				  adapter->vfinfo[vf].pf_vlan, vf); +		ixgbe_set_vmvir(adapter, +				(adapter->vfinfo[vf].pf_vlan | +				 (adapter->vfinfo[vf].pf_qos << +				  VLAN_PRIO_SHIFT)), vf); +		ixgbe_set_vmolr(hw, vf, false); +	} else { +		ixgbe_set_vmvir(adapter, 0, vf); +		ixgbe_set_vmolr(hw, vf, true); +	} + +	/* reset multicast table array for vf */ +	adapter->vfinfo[vf].num_vf_mc_hashes = 0; + +	/* Flush and reset the mta with the new values */ +	ixgbe_set_rx_mode(adapter->netdev); + +	hw->mac.ops.clear_rar(hw, rar_entry); +} + +static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, +			    int vf, unsigned char *mac_addr) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	int rar_entry = hw->mac.num_rar_entries - (vf + 1); + +	memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6); +	hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV); + +	return 0; +} + +static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, +				int vf, int index, unsigned char *mac_addr) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	struct list_head *pos; +	struct vf_macvlans *entry; + +	if (index <= 1) { +		list_for_each(pos, &adapter->vf_mvs.l) { +			entry = list_entry(pos, struct vf_macvlans, l); +			if (entry->vf == vf) { +				entry->vf = -1; +				entry->free = true; +				entry->is_macvlan = false; +				hw->mac.ops.clear_rar(hw, entry->rar_entry); +			} +		} +	} + +	/* +	 * If index was zero then we were asked to clear the uc list +	 * for the VF.  We're done. +	 */ +	if (!index) +		return 0; + +	entry = NULL; + +	list_for_each(pos, &adapter->vf_mvs.l) { +		entry = list_entry(pos, struct vf_macvlans, l); +		if (entry->free) +			break; +	} + +	/* +	 * If we traversed the entire list and didn't find a free entry +	 * then we're out of space on the RAR table.  Also entry may +	 * be NULL because the original memory allocation for the list +	 * failed, which is not fatal but does mean we can't support +	 * VF requests for MACVLAN because we couldn't allocate +	 * memory for the list management required. +	 */ +	if (!entry || !entry->free) +		return -ENOSPC; + +	entry->free = false; +	entry->is_macvlan = true; +	entry->vf = vf; +	memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); + +	hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV); + +	return 0; +} + +int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) +{ +	unsigned char vf_mac_addr[6]; +	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); +	unsigned int vfn = (event_mask & 0x3f); + +	bool enable = ((event_mask & 0x10000000U) != 0); + +	if (enable) { +		random_ether_addr(vf_mac_addr); +		e_info(probe, "IOV: VF %d is enabled MAC %pM\n", +		       vfn, vf_mac_addr); +		/* +		 * Store away the VF "permananet" MAC address, it will ask +		 * for it later. +		 */ +		memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); +	} + +	return 0; +} + +static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u32 reg; +	u32 reg_offset, vf_shift; + +	vf_shift = vf % 32; +	reg_offset = vf / 32; + +	/* enable transmit and receive for vf */ +	reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); +	reg |= (reg | (1 << vf_shift)); +	IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); + +	reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); +	reg |= (reg | (1 << vf_shift)); +	IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); + +	/* Enable counting of spoofed packets in the SSVPC register */ +	reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); +	reg |= (1 << vf_shift); +	IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); + +	ixgbe_vf_reset_event(adapter, vf); +} + +static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) +{ +	u32 mbx_size = IXGBE_VFMAILBOX_SIZE; +	u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; +	struct ixgbe_hw *hw = &adapter->hw; +	s32 retval; +	int entries; +	u16 *hash_list; +	int add, vid, index; +	u8 *new_mac; + +	retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); + +	if (retval) +		pr_err("Error receiving message from VF\n"); + +	/* this is a message we already processed, do nothing */ +	if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) +		return retval; + +	/* +	 * until the vf completes a virtual function reset it should not be +	 * allowed to start any configuration. +	 */ + +	if (msgbuf[0] == IXGBE_VF_RESET) { +		unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; +		new_mac = (u8 *)(&msgbuf[1]); +		e_info(probe, "VF Reset msg received from vf %d\n", vf); +		adapter->vfinfo[vf].clear_to_send = false; +		ixgbe_vf_reset_msg(adapter, vf); +		adapter->vfinfo[vf].clear_to_send = true; + +		if (is_valid_ether_addr(new_mac) && +		    !adapter->vfinfo[vf].pf_set_mac) +			ixgbe_set_vf_mac(adapter, vf, vf_mac); +		else +			ixgbe_set_vf_mac(adapter, +				 vf, adapter->vfinfo[vf].vf_mac_addresses); + +		/* reply to reset with ack and vf mac address */ +		msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; +		memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS); +		/* +		 * Piggyback the multicast filter type so VF can compute the +		 * correct vectors +		 */ +		msgbuf[3] = hw->mac.mc_filter_type; +		ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); + +		return retval; +	} + +	if (!adapter->vfinfo[vf].clear_to_send) { +		msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; +		ixgbe_write_mbx(hw, msgbuf, 1, vf); +		return retval; +	} + +	switch ((msgbuf[0] & 0xFFFF)) { +	case IXGBE_VF_SET_MAC_ADDR: +		new_mac = ((u8 *)(&msgbuf[1])); +		if (is_valid_ether_addr(new_mac) && +		    !adapter->vfinfo[vf].pf_set_mac) { +			ixgbe_set_vf_mac(adapter, vf, new_mac); +		} else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses, +				  new_mac, ETH_ALEN)) { +			e_warn(drv, "VF %d attempted to override " +			       "administratively set MAC address\nReload " +			       "the VF driver to resume operations\n", vf); +			retval = -1; +		} +		break; +	case IXGBE_VF_SET_MULTICAST: +		entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) +		          >> IXGBE_VT_MSGINFO_SHIFT; +		hash_list = (u16 *)&msgbuf[1]; +		retval = ixgbe_set_vf_multicasts(adapter, entries, +		                                 hash_list, vf); +		break; +	case IXGBE_VF_SET_LPE: +		ixgbe_set_vf_lpe(adapter, msgbuf); +		break; +	case IXGBE_VF_SET_VLAN: +		add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) +		      >> IXGBE_VT_MSGINFO_SHIFT; +		vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); +		if (adapter->vfinfo[vf].pf_vlan) { +			e_warn(drv, "VF %d attempted to override " +			       "administratively set VLAN configuration\n" +			       "Reload the VF driver to resume operations\n", +			       vf); +			retval = -1; +		} else { +			retval = ixgbe_set_vf_vlan(adapter, add, vid, vf); +		} +		break; +	case IXGBE_VF_SET_MACVLAN: +		index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> +			IXGBE_VT_MSGINFO_SHIFT; +		/* +		 * If the VF is allowed to set MAC filters then turn off +		 * anti-spoofing to avoid false positives.  An index +		 * greater than 0 will indicate the VF is setting a +		 * macvlan MAC filter. +		 */ +		if (index > 0 && adapter->antispoofing_enabled) { +			hw->mac.ops.set_mac_anti_spoofing(hw, false, +							  adapter->num_vfs); +			hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); +			adapter->antispoofing_enabled = false; +		} +		retval = ixgbe_set_vf_macvlan(adapter, vf, index, +					      (unsigned char *)(&msgbuf[1])); +		break; +	default: +		e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); +		retval = IXGBE_ERR_MBX; +		break; +	} + +	/* notify the VF of the results of what it sent us */ +	if (retval) +		msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; +	else +		msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; + +	msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; + +	ixgbe_write_mbx(hw, msgbuf, 1, vf); + +	return retval; +} + +static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u32 msg = IXGBE_VT_MSGTYPE_NACK; + +	/* if device isn't clear to send it shouldn't be reading either */ +	if (!adapter->vfinfo[vf].clear_to_send) +		ixgbe_write_mbx(hw, &msg, 1, vf); +} + +void ixgbe_msg_task(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u32 vf; + +	for (vf = 0; vf < adapter->num_vfs; vf++) { +		/* process any reset requests */ +		if (!ixgbe_check_for_rst(hw, vf)) +			ixgbe_vf_reset_event(adapter, vf); + +		/* process any messages pending */ +		if (!ixgbe_check_for_msg(hw, vf)) +			ixgbe_rcv_msg_from_vf(adapter, vf); + +		/* process any acks */ +		if (!ixgbe_check_for_ack(hw, vf)) +			ixgbe_rcv_ack_from_vf(adapter, vf); +	} +} + +void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; + +	/* disable transmit and receive for all vfs */ +	IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0); +	IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0); + +	IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0); +	IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); +} + +void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) +{ +	struct ixgbe_hw *hw = &adapter->hw; +	u32 ping; +	int i; + +	for (i = 0 ; i < adapter->num_vfs; i++) { +		ping = IXGBE_PF_CONTROL_MSG; +		if (adapter->vfinfo[i].clear_to_send) +			ping |= IXGBE_VT_MSGTYPE_CTS; +		ixgbe_write_mbx(hw, &ping, 1, i); +	} +} + +int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs)) +		return -EINVAL; +	adapter->vfinfo[vf].pf_set_mac = true; +	dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); +	dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" +				      " change effective."); +	if (test_bit(__IXGBE_DOWN, &adapter->state)) { +		dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," +			 " but the PF device is not up.\n"); +		dev_warn(&adapter->pdev->dev, "Bring the PF device up before" +			 " attempting to use the VF device.\n"); +	} +	return ixgbe_set_vf_mac(adapter, vf, mac); +} + +int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) +{ +	int err = 0; +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_hw *hw = &adapter->hw; + +	if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) +		return -EINVAL; +	if (vlan || qos) { +		err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); +		if (err) +			goto out; +		ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); +		ixgbe_set_vmolr(hw, vf, false); +		if (adapter->antispoofing_enabled) +			hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); +		adapter->vfinfo[vf].pf_vlan = vlan; +		adapter->vfinfo[vf].pf_qos = qos; +		dev_info(&adapter->pdev->dev, +			 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); +		if (test_bit(__IXGBE_DOWN, &adapter->state)) { +			dev_warn(&adapter->pdev->dev, +				 "The VF VLAN has been set," +				 " but the PF device is not up.\n"); +			dev_warn(&adapter->pdev->dev, +				 "Bring the PF device up before" +				 " attempting to use the VF device.\n"); +		} +	} else { +		err = ixgbe_set_vf_vlan(adapter, false, +					adapter->vfinfo[vf].pf_vlan, vf); +		ixgbe_set_vmvir(adapter, vlan, vf); +		ixgbe_set_vmolr(hw, vf, true); +		hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); +		adapter->vfinfo[vf].pf_vlan = 0; +		adapter->vfinfo[vf].pf_qos = 0; +       } +out: +       return err; +} + +static int ixgbe_link_mbps(int internal_link_speed) +{ +	switch (internal_link_speed) { +	case IXGBE_LINK_SPEED_100_FULL: +		return 100; +	case IXGBE_LINK_SPEED_1GB_FULL: +		return 1000; +	case IXGBE_LINK_SPEED_10GB_FULL: +		return 10000; +	default: +		return 0; +	} +} + +static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate, +				    int link_speed) +{ +	int rf_dec, rf_int; +	u32 bcnrc_val; + +	if (tx_rate != 0) { +		/* Calculate the rate factor values to set */ +		rf_int = link_speed / tx_rate; +		rf_dec = (link_speed - (rf_int * tx_rate)); +		rf_dec = (rf_dec * (1<<IXGBE_RTTBCNRC_RF_INT_SHIFT)) / tx_rate; + +		bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; +		bcnrc_val |= ((rf_int<<IXGBE_RTTBCNRC_RF_INT_SHIFT) & +		               IXGBE_RTTBCNRC_RF_INT_MASK); +		bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); +	} else { +		bcnrc_val = 0; +	} + +	IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */ +	/* +	 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM +	 * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported +	 * and 0x004 otherwise. +	 */ +	switch (hw->mac.type) { +	case ixgbe_mac_82599EB: +		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4); +		break; +	case ixgbe_mac_X540: +		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14); +		break; +	default: +		break; +	} + +	IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); +} + +void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) +{ +	int actual_link_speed, i; +	bool reset_rate = false; + +	/* VF Tx rate limit was not set */ +	if (adapter->vf_rate_link_speed == 0) +		return; + +	actual_link_speed = ixgbe_link_mbps(adapter->link_speed); +	if (actual_link_speed != adapter->vf_rate_link_speed) { +		reset_rate = true; +		adapter->vf_rate_link_speed = 0; +		dev_info(&adapter->pdev->dev, +		         "Link speed has been changed. VF Transmit rate " +		         "is disabled\n"); +	} + +	for (i = 0; i < adapter->num_vfs; i++) { +		if (reset_rate) +			adapter->vfinfo[i].tx_rate = 0; + +		ixgbe_set_vf_rate_limit(&adapter->hw, i, +					adapter->vfinfo[i].tx_rate, +					actual_link_speed); +	} +} + +int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	struct ixgbe_hw *hw = &adapter->hw; +	int actual_link_speed; + +	actual_link_speed = ixgbe_link_mbps(adapter->link_speed); +	if ((vf >= adapter->num_vfs) || (!adapter->link_up) || +	    (tx_rate > actual_link_speed) || (actual_link_speed != 10000) || +	    ((tx_rate != 0) && (tx_rate <= 10))) +	    /* rate limit cannot be set to 10Mb or less in 10Gb adapters */ +		return -EINVAL; + +	adapter->vf_rate_link_speed = actual_link_speed; +	adapter->vfinfo[vf].tx_rate = (u16)tx_rate; +	ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); + +	return 0; +} + +int ixgbe_ndo_get_vf_config(struct net_device *netdev, +			    int vf, struct ifla_vf_info *ivi) +{ +	struct ixgbe_adapter *adapter = netdev_priv(netdev); +	if (vf >= adapter->num_vfs) +		return -EINVAL; +	ivi->vf = vf; +	memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); +	ivi->tx_rate = adapter->vfinfo[vf].tx_rate; +	ivi->vlan = adapter->vfinfo[vf].pf_vlan; +	ivi->qos = adapter->vfinfo[vf].pf_qos; +	return 0; +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h new file mode 100644 index 00000000000..34175564bb7 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -0,0 +1,46 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_SRIOV_H_ +#define _IXGBE_SRIOV_H_ + +void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); +void ixgbe_msg_task(struct ixgbe_adapter *adapter); +int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); +void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); +void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); +void ixgbe_dump_registers(struct ixgbe_adapter *adapter); +int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); +int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, +			   u8 qos); +int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); +int ixgbe_ndo_get_vf_config(struct net_device *netdev, +			    int vf, struct ifla_vf_info *ivi); +void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); + +#endif /* _IXGBE_SRIOV_H_ */ + diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h new file mode 100644 index 00000000000..e0d970ebab7 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -0,0 +1,2877 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_TYPE_H_ +#define _IXGBE_TYPE_H_ + +#include <linux/types.h> +#include <linux/mdio.h> +#include <linux/netdevice.h> + +/* Vendor ID */ +#define IXGBE_INTEL_VENDOR_ID   0x8086 + +/* Device IDs */ +#define IXGBE_DEV_ID_82598               0x10B6 +#define IXGBE_DEV_ID_82598_BX            0x1508 +#define IXGBE_DEV_ID_82598AF_DUAL_PORT   0x10C6 +#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 +#define IXGBE_DEV_ID_82598EB_SFP_LOM     0x10DB +#define IXGBE_DEV_ID_82598AT             0x10C8 +#define IXGBE_DEV_ID_82598AT2            0x150B +#define IXGBE_DEV_ID_82598EB_CX4         0x10DD +#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC +#define IXGBE_DEV_ID_82598_DA_DUAL_PORT  0x10F1 +#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM      0x10E1 +#define IXGBE_DEV_ID_82598EB_XF_LR       0x10F4 +#define IXGBE_DEV_ID_82599_KX4           0x10F7 +#define IXGBE_DEV_ID_82599_KX4_MEZZ      0x1514 +#define IXGBE_DEV_ID_82599_KR            0x1517 +#define IXGBE_DEV_ID_82599_T3_LOM        0x151C +#define IXGBE_DEV_ID_82599_CX4           0x10F9 +#define IXGBE_DEV_ID_82599_SFP           0x10FB +#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE       0x152a +#define IXGBE_DEV_ID_82599_SFP_FCOE      0x1529 +#define IXGBE_SUBDEV_ID_82599_SFP        0x11A9 +#define IXGBE_DEV_ID_82599_SFP_EM        0x1507 +#define IXGBE_DEV_ID_82599_SFP_SF2       0x154D +#define IXGBE_DEV_ID_82599_XAUI_LOM      0x10FC +#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 +#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ  0x000C +#define IXGBE_DEV_ID_82599_LS            0x154F +#define IXGBE_DEV_ID_X540T               0x1528 + +/* General Registers */ +#define IXGBE_CTRL      0x00000 +#define IXGBE_STATUS    0x00008 +#define IXGBE_CTRL_EXT  0x00018 +#define IXGBE_ESDP      0x00020 +#define IXGBE_EODSDP    0x00028 +#define IXGBE_I2CCTL    0x00028 +#define IXGBE_LEDCTL    0x00200 +#define IXGBE_FRTIMER   0x00048 +#define IXGBE_TCPTIMER  0x0004C +#define IXGBE_CORESPARE 0x00600 +#define IXGBE_EXVET     0x05078 + +/* NVM Registers */ +#define IXGBE_EEC       0x10010 +#define IXGBE_EERD      0x10014 +#define IXGBE_EEWR      0x10018 +#define IXGBE_FLA       0x1001C +#define IXGBE_EEMNGCTL  0x10110 +#define IXGBE_EEMNGDATA 0x10114 +#define IXGBE_FLMNGCTL  0x10118 +#define IXGBE_FLMNGDATA 0x1011C +#define IXGBE_FLMNGCNT  0x10120 +#define IXGBE_FLOP      0x1013C +#define IXGBE_GRC       0x10200 + +/* General Receive Control */ +#define IXGBE_GRC_MNG  0x00000001 /* Manageability Enable */ +#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */ + +#define IXGBE_VPDDIAG0  0x10204 +#define IXGBE_VPDDIAG1  0x10208 + +/* I2CCTL Bit Masks */ +#define IXGBE_I2C_CLK_IN    0x00000001 +#define IXGBE_I2C_CLK_OUT   0x00000002 +#define IXGBE_I2C_DATA_IN   0x00000004 +#define IXGBE_I2C_DATA_OUT  0x00000008 + +/* Interrupt Registers */ +#define IXGBE_EICR      0x00800 +#define IXGBE_EICS      0x00808 +#define IXGBE_EIMS      0x00880 +#define IXGBE_EIMC      0x00888 +#define IXGBE_EIAC      0x00810 +#define IXGBE_EIAM      0x00890 +#define IXGBE_EICS_EX(_i)   (0x00A90 + (_i) * 4) +#define IXGBE_EIMS_EX(_i)   (0x00AA0 + (_i) * 4) +#define IXGBE_EIMC_EX(_i)   (0x00AB0 + (_i) * 4) +#define IXGBE_EIAM_EX(_i)   (0x00AD0 + (_i) * 4) +/* + * 82598 EITR is 16 bits but set the limits based on the max + * supported by all ixgbe hardware.  82599 EITR is only 12 bits, + * with the lower 3 always zero. + */ +#define IXGBE_MAX_INT_RATE 488281 +#define IXGBE_MIN_INT_RATE 956 +#define IXGBE_MAX_EITR     0x00000FF8 +#define IXGBE_MIN_EITR     8 +#define IXGBE_EITR(_i)  (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \ +                         (0x012300 + (((_i) - 24) * 4))) +#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8 +#define IXGBE_EITR_LLI_MOD      0x00008000 +#define IXGBE_EITR_CNT_WDIS     0x80000000 +#define IXGBE_IVAR(_i)  (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ +#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */ +#define IXGBE_EITRSEL   0x00894 +#define IXGBE_MSIXT     0x00000 /* MSI-X Table. 0x0000 - 0x01C */ +#define IXGBE_MSIXPBA   0x02000 /* MSI-X Pending bit array */ +#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4))) +#define IXGBE_GPIE      0x00898 + +/* Flow Control Registers */ +#define IXGBE_FCADBUL   0x03210 +#define IXGBE_FCADBUH   0x03214 +#define IXGBE_FCAMACL   0x04328 +#define IXGBE_FCAMACH   0x0432C +#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_PFCTOP    0x03008 +#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTV     0x032A0 +#define IXGBE_FCCFG     0x03D00 +#define IXGBE_TFCS      0x0CE00 + +/* Receive DMA Registers */ +#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \ +                         (0x0D000 + ((_i - 64) * 0x40))) +#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \ +                         (0x0D004 + ((_i - 64) * 0x40))) +#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \ +                         (0x0D008 + ((_i - 64) * 0x40))) +#define IXGBE_RDH(_i)   (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \ +                         (0x0D010 + ((_i - 64) * 0x40))) +#define IXGBE_RDT(_i)   (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \ +                         (0x0D018 + ((_i - 64) * 0x40))) +#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ +                          (0x0D028 + ((_i - 64) * 0x40))) +#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ +                          (0x0D02C + ((_i - 64) * 0x40))) +#define IXGBE_RSCDBU     0x03028 +#define IXGBE_RDDCC      0x02F20 +#define IXGBE_RXMEMWRAP  0x03190 +#define IXGBE_STARCTRL   0x03024 +/* + * Split and Replication Receive Control Registers + * 00-15 : 0x02100 + n*4 + * 16-64 : 0x01014 + n*0x40 + * 64-127: 0x0D014 + (n-64)*0x40 + */ +#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ +                          (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ +                          (0x0D014 + ((_i - 64) * 0x40)))) +/* + * Rx DCA Control Register: + * 00-15 : 0x02200 + n*4 + * 16-64 : 0x0100C + n*0x40 + * 64-127: 0x0D00C + (n-64)*0x40 + */ +#define IXGBE_DCA_RXCTRL(_i)    (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ +                                 (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ +                                 (0x0D00C + ((_i - 64) * 0x40)))) +#define IXGBE_RDRXCTL           0x02F00 +#define IXGBE_RXPBSIZE(_i)      (0x03C00 + ((_i) * 4)) +                                             /* 8 of these 0x03C00 - 0x03C1C */ +#define IXGBE_RXCTRL    0x03000 +#define IXGBE_DROPEN    0x03D04 +#define IXGBE_RXPBSIZE_SHIFT 10 + +/* Receive Registers */ +#define IXGBE_RXCSUM    0x05000 +#define IXGBE_RFCTL     0x05008 +#define IXGBE_DRECCCTL  0x02F08 +#define IXGBE_DRECCCTL_DISABLE 0 +/* Multicast Table Array - 128 entries */ +#define IXGBE_MTA(_i)   (0x05200 + ((_i) * 4)) +#define IXGBE_RAL(_i)   (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ +                         (0x0A200 + ((_i) * 8))) +#define IXGBE_RAH(_i)   (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ +                         (0x0A204 + ((_i) * 8))) +#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8)) +#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8)) +/* Packet split receive type */ +#define IXGBE_PSRTYPE(_i)    (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \ +                              (0x0EA00 + ((_i) * 4))) +/* array of 4096 1-bit vlan filters */ +#define IXGBE_VFTA(_i)  (0x0A000 + ((_i) * 4)) +/*array of 4096 4-bit vlan vmdq indices */ +#define IXGBE_VFTAVIND(_j, _i)  (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) +#define IXGBE_FCTRL     0x05080 +#define IXGBE_VLNCTRL   0x05088 +#define IXGBE_MCSTCTRL  0x05090 +#define IXGBE_MRQC      0x05818 +#define IXGBE_SAQF(_i)  (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */ +#define IXGBE_DAQF(_i)  (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */ +#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */ +#define IXGBE_FTQF(_i)  (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */ +#define IXGBE_ETQF(_i)  (0x05128 + ((_i) * 4)) /* EType Queue Filter */ +#define IXGBE_ETQS(_i)  (0x0EC00 + ((_i) * 4)) /* EType Queue Select */ +#define IXGBE_SYNQF     0x0EC30 /* SYN Packet Queue Filter */ +#define IXGBE_RQTC      0x0EC70 +#define IXGBE_MTQC      0x08120 +#define IXGBE_VLVF(_i)  (0x0F100 + ((_i) * 4))  /* 64 of these (0-63) */ +#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4))  /* 128 of these (0-127) */ +#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4))  /* 64 of these (0-63) */ +#define IXGBE_VT_CTL         0x051B0 +#define IXGBE_PFMAILBOX(_i)  (0x04B00 + (4 * (_i))) /* 64 total */ +#define IXGBE_PFMBMEM(_i)    (0x13000 + (64 * (_i))) /* 64 Mailboxes, 16 DW each */ +#define IXGBE_PFMBICR(_i)    (0x00710 + (4 * (_i))) /* 4 total */ +#define IXGBE_PFMBIMR(_i)    (0x00720 + (4 * (_i))) /* 4 total */ +#define IXGBE_VFRE(_i)       (0x051E0 + ((_i) * 4)) +#define IXGBE_VFTE(_i)       (0x08110 + ((_i) * 4)) +#define IXGBE_VMECM(_i)      (0x08790 + ((_i) * 4)) +#define IXGBE_QDE            0x2F04 +#define IXGBE_VMTXSW(_i)     (0x05180 + ((_i) * 4)) /* 2 total */ +#define IXGBE_VMOLR(_i)      (0x0F000 + ((_i) * 4)) /* 64 total */ +#define IXGBE_UTA(_i)        (0x0F400 + ((_i) * 4)) +#define IXGBE_MRCTL(_i)      (0x0F600 + ((_i) * 4)) +#define IXGBE_VMRVLAN(_i)    (0x0F610 + ((_i) * 4)) +#define IXGBE_VMRVM(_i)      (0x0F630 + ((_i) * 4)) +#define IXGBE_L34T_IMIR(_i)  (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ +#define IXGBE_RXFECCERR0         0x051B8 +#define IXGBE_LLITHRESH 0x0EC90 +#define IXGBE_IMIR(_i)  (0x05A80 + ((_i) * 4))  /* 8 of these (0-7) */ +#define IXGBE_IMIREXT(_i)       (0x05AA0 + ((_i) * 4))  /* 8 of these (0-7) */ +#define IXGBE_IMIRVP    0x05AC0 +#define IXGBE_VMD_CTL   0x0581C +#define IXGBE_RETA(_i)  (0x05C00 + ((_i) * 4))  /* 32 of these (0-31) */ +#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4))  /* 10 of these (0-9) */ + +/* Flow Director registers */ +#define IXGBE_FDIRCTRL  0x0EE00 +#define IXGBE_FDIRHKEY  0x0EE68 +#define IXGBE_FDIRSKEY  0x0EE6C +#define IXGBE_FDIRDIP4M 0x0EE3C +#define IXGBE_FDIRSIP4M 0x0EE40 +#define IXGBE_FDIRTCPM  0x0EE44 +#define IXGBE_FDIRUDPM  0x0EE48 +#define IXGBE_FDIRIP6M  0x0EE74 +#define IXGBE_FDIRM     0x0EE70 + +/* Flow Director Stats registers */ +#define IXGBE_FDIRFREE  0x0EE38 +#define IXGBE_FDIRLEN   0x0EE4C +#define IXGBE_FDIRUSTAT 0x0EE50 +#define IXGBE_FDIRFSTAT 0x0EE54 +#define IXGBE_FDIRMATCH 0x0EE58 +#define IXGBE_FDIRMISS  0x0EE5C + +/* Flow Director Programming registers */ +#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */ +#define IXGBE_FDIRIPSA      0x0EE18 +#define IXGBE_FDIRIPDA      0x0EE1C +#define IXGBE_FDIRPORT      0x0EE20 +#define IXGBE_FDIRVLAN      0x0EE24 +#define IXGBE_FDIRHASH      0x0EE28 +#define IXGBE_FDIRCMD       0x0EE2C + +/* Transmit DMA registers */ +#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/ +#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) +#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) +#define IXGBE_TDH(_i)   (0x06010 + ((_i) * 0x40)) +#define IXGBE_TDT(_i)   (0x06018 + ((_i) * 0x40)) +#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40)) +#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) +#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) +#define IXGBE_DTXCTL    0x07E00 + +#define IXGBE_DMATXCTL      0x04A80 +#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */ +#define IXGBE_PFDTXGSWC     0x08220 +#define IXGBE_DTXMXSZRQ     0x08100 +#define IXGBE_DTXTCPFLGL    0x04A88 +#define IXGBE_DTXTCPFLGH    0x04A8C +#define IXGBE_LBDRPEN       0x0CA00 +#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */ + +#define IXGBE_DMATXCTL_TE       0x1 /* Transmit Enable */ +#define IXGBE_DMATXCTL_NS       0x2 /* No Snoop LSO hdr buffer */ +#define IXGBE_DMATXCTL_GDV      0x8 /* Global Double VLAN */ +#define IXGBE_DMATXCTL_VT_SHIFT 16  /* VLAN EtherType */ + +#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ + +/* Anti-spoofing defines */ +#define IXGBE_SPOOF_MACAS_MASK          0xFF +#define IXGBE_SPOOF_VLANAS_MASK         0xFF00 +#define IXGBE_SPOOF_VLANAS_SHIFT        8 +#define IXGBE_PFVFSPOOF_REG_COUNT       8 + +#define IXGBE_DCA_TXCTRL(_i)    (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ +/* Tx DCA Control register : 128 of these (0-127) */ +#define IXGBE_DCA_TXCTRL_82599(_i)  (0x0600C + ((_i) * 0x40)) +#define IXGBE_TIPG      0x0CB00 +#define IXGBE_TXPBSIZE(_i)      (0x0CC00 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_MNGTXMAP  0x0CD10 +#define IXGBE_TIPG_FIBER_DEFAULT 3 +#define IXGBE_TXPBSIZE_SHIFT    10 + +/* Wake up registers */ +#define IXGBE_WUC       0x05800 +#define IXGBE_WUFC      0x05808 +#define IXGBE_WUS       0x05810 +#define IXGBE_IPAV      0x05838 +#define IXGBE_IP4AT     0x05840 /* IPv4 table 0x5840-0x5858 */ +#define IXGBE_IP6AT     0x05880 /* IPv6 table 0x5880-0x588F */ + +#define IXGBE_WUPL      0x05900 +#define IXGBE_WUPM      0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ +#define IXGBE_FHFT(_n)     (0x09000 + (_n * 0x100)) /* Flex host filter table */ +#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100)) /* Ext Flexible Host +                                                     * Filter Table */ + +#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX         4 +#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX     2 + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX  128 +#define IXGBE_FHFT_LENGTH_OFFSET        0xFC  /* Length byte in FHFT */ +#define IXGBE_FHFT_LENGTH_MASK          0x0FF /* Length in lower byte */ + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define IXGBE_WUC_PME_EN     0x00000002 /* PME Enable */ +#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define IXGBE_WUC_WKEN       0x00000010 /* Enable PE_WAKE_N pin assertion  */ + +/* Wake Up Filter Control */ +#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define IXGBE_WUFC_MAG  0x00000002 /* Magic Packet Wakeup Enable */ +#define IXGBE_WUFC_EX   0x00000004 /* Directed Exact Wakeup Enable */ +#define IXGBE_WUFC_MC   0x00000008 /* Directed Multicast Wakeup Enable */ +#define IXGBE_WUFC_BC   0x00000010 /* Broadcast Wakeup Enable */ +#define IXGBE_WUFC_ARP  0x00000020 /* ARP Request Packet Wakeup Enable */ +#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define IXGBE_WUFC_MNG  0x00000100 /* Directed Mgmt Packet Wakeup Enable */ + +#define IXGBE_WUFC_IGNORE_TCO   0x00008000 /* Ignore WakeOn TCO packets */ +#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */ +#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */ +#define IXGBE_WUFC_FLX_FILTERS     0x000F0000 /* Mask for 4 flex filters */ +#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */ +#define IXGBE_WUFC_ALL_FILTERS     0x003F00FF /* Mask for all wakeup filters */ +#define IXGBE_WUFC_FLX_OFFSET      16 /* Offset to the Flexible Filters bits */ + +/* Wake Up Status */ +#define IXGBE_WUS_LNKC  IXGBE_WUFC_LNKC +#define IXGBE_WUS_MAG   IXGBE_WUFC_MAG +#define IXGBE_WUS_EX    IXGBE_WUFC_EX +#define IXGBE_WUS_MC    IXGBE_WUFC_MC +#define IXGBE_WUS_BC    IXGBE_WUFC_BC +#define IXGBE_WUS_ARP   IXGBE_WUFC_ARP +#define IXGBE_WUS_IPV4  IXGBE_WUFC_IPV4 +#define IXGBE_WUS_IPV6  IXGBE_WUFC_IPV6 +#define IXGBE_WUS_MNG   IXGBE_WUFC_MNG +#define IXGBE_WUS_FLX0  IXGBE_WUFC_FLX0 +#define IXGBE_WUS_FLX1  IXGBE_WUFC_FLX1 +#define IXGBE_WUS_FLX2  IXGBE_WUFC_FLX2 +#define IXGBE_WUS_FLX3  IXGBE_WUFC_FLX3 +#define IXGBE_WUS_FLX4  IXGBE_WUFC_FLX4 +#define IXGBE_WUS_FLX5  IXGBE_WUFC_FLX5 +#define IXGBE_WUS_FLX_FILTERS  IXGBE_WUFC_FLX_FILTERS + +/* Wake Up Packet Length */ +#define IXGBE_WUPL_LENGTH_MASK 0xFFFF + +/* DCB registers */ +#define IXGBE_RMCS      0x03D00 +#define IXGBE_DPMCS     0x07F40 +#define IXGBE_PDPMCS    0x0CD00 +#define IXGBE_RUPPBMR   0x050A0 +#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCCR(_i)     (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCSR(_i)     (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCCR(_i)     (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCSR(_i)     (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ + + +/* Security Control Registers */ +#define IXGBE_SECTXCTRL         0x08800 +#define IXGBE_SECTXSTAT         0x08804 +#define IXGBE_SECTXBUFFAF       0x08808 +#define IXGBE_SECTXMINIFG       0x08810 +#define IXGBE_SECRXCTRL         0x08D00 +#define IXGBE_SECRXSTAT         0x08D04 + +/* Security Bit Fields and Masks */ +#define IXGBE_SECTXCTRL_SECTX_DIS       0x00000001 +#define IXGBE_SECTXCTRL_TX_DIS          0x00000002 +#define IXGBE_SECTXCTRL_STORE_FORWARD   0x00000004 + +#define IXGBE_SECTXSTAT_SECTX_RDY       0x00000001 +#define IXGBE_SECTXSTAT_ECC_TXERR       0x00000002 + +#define IXGBE_SECRXCTRL_SECRX_DIS       0x00000001 +#define IXGBE_SECRXCTRL_RX_DIS          0x00000002 + +#define IXGBE_SECRXSTAT_SECRX_RDY       0x00000001 +#define IXGBE_SECRXSTAT_ECC_RXERR       0x00000002 + +/* LinkSec (MacSec) Registers */ +#define IXGBE_LSECTXCAP         0x08A00 +#define IXGBE_LSECRXCAP         0x08F00 +#define IXGBE_LSECTXCTRL        0x08A04 +#define IXGBE_LSECTXSCL         0x08A08 /* SCI Low */ +#define IXGBE_LSECTXSCH         0x08A0C /* SCI High */ +#define IXGBE_LSECTXSA          0x08A10 +#define IXGBE_LSECTXPN0         0x08A14 +#define IXGBE_LSECTXPN1         0x08A18 +#define IXGBE_LSECTXKEY0(_n)    (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */ +#define IXGBE_LSECTXKEY1(_n)    (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */ +#define IXGBE_LSECRXCTRL        0x08F04 +#define IXGBE_LSECRXSCL         0x08F08 +#define IXGBE_LSECRXSCH         0x08F0C +#define IXGBE_LSECRXSA(_i)      (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */ +#define IXGBE_LSECRXPN(_i)      (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */ +#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m)))) +#define IXGBE_LSECTXUT          0x08A3C /* OutPktsUntagged */ +#define IXGBE_LSECTXPKTE        0x08A40 /* OutPktsEncrypted */ +#define IXGBE_LSECTXPKTP        0x08A44 /* OutPktsProtected */ +#define IXGBE_LSECTXOCTE        0x08A48 /* OutOctetsEncrypted */ +#define IXGBE_LSECTXOCTP        0x08A4C /* OutOctetsProtected */ +#define IXGBE_LSECRXUT          0x08F40 /* InPktsUntagged/InPktsNoTag */ +#define IXGBE_LSECRXOCTD        0x08F44 /* InOctetsDecrypted */ +#define IXGBE_LSECRXOCTV        0x08F48 /* InOctetsValidated */ +#define IXGBE_LSECRXBAD         0x08F4C /* InPktsBadTag */ +#define IXGBE_LSECRXNOSCI       0x08F50 /* InPktsNoSci */ +#define IXGBE_LSECRXUNSCI       0x08F54 /* InPktsUnknownSci */ +#define IXGBE_LSECRXUNCH        0x08F58 /* InPktsUnchecked */ +#define IXGBE_LSECRXDELAY       0x08F5C /* InPktsDelayed */ +#define IXGBE_LSECRXLATE        0x08F60 /* InPktsLate */ +#define IXGBE_LSECRXOK(_n)      (0x08F64 + (0x04 * (_n))) /* InPktsOk */ +#define IXGBE_LSECRXINV(_n)     (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */ +#define IXGBE_LSECRXNV(_n)      (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */ +#define IXGBE_LSECRXUNSA        0x08F7C /* InPktsUnusedSa */ +#define IXGBE_LSECRXNUSA        0x08F80 /* InPktsNotUsingSa */ + +/* LinkSec (MacSec) Bit Fields and Masks */ +#define IXGBE_LSECTXCAP_SUM_MASK        0x00FF0000 +#define IXGBE_LSECTXCAP_SUM_SHIFT       16 +#define IXGBE_LSECRXCAP_SUM_MASK        0x00FF0000 +#define IXGBE_LSECRXCAP_SUM_SHIFT       16 + +#define IXGBE_LSECTXCTRL_EN_MASK        0x00000003 +#define IXGBE_LSECTXCTRL_DISABLE        0x0 +#define IXGBE_LSECTXCTRL_AUTH           0x1 +#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT   0x2 +#define IXGBE_LSECTXCTRL_AISCI          0x00000020 +#define IXGBE_LSECTXCTRL_PNTHRSH_MASK   0xFFFFFF00 +#define IXGBE_LSECTXCTRL_RSV_MASK       0x000000D8 + +#define IXGBE_LSECRXCTRL_EN_MASK        0x0000000C +#define IXGBE_LSECRXCTRL_EN_SHIFT       2 +#define IXGBE_LSECRXCTRL_DISABLE        0x0 +#define IXGBE_LSECRXCTRL_CHECK          0x1 +#define IXGBE_LSECRXCTRL_STRICT         0x2 +#define IXGBE_LSECRXCTRL_DROP           0x3 +#define IXGBE_LSECRXCTRL_PLSH           0x00000040 +#define IXGBE_LSECRXCTRL_RP             0x00000080 +#define IXGBE_LSECRXCTRL_RSV_MASK       0xFFFFFF33 + +/* IpSec Registers */ +#define IXGBE_IPSTXIDX          0x08900 +#define IXGBE_IPSTXSALT         0x08904 +#define IXGBE_IPSTXKEY(_i)      (0x08908 + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXIDX          0x08E00 +#define IXGBE_IPSRXIPADDR(_i)   (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXSPI          0x08E14 +#define IXGBE_IPSRXIPIDX        0x08E18 +#define IXGBE_IPSRXKEY(_i)      (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXSALT         0x08E2C +#define IXGBE_IPSRXMOD          0x08E30 + +#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE    0x4 + +/* DCB registers */ +#define IXGBE_RTRPCS      0x02430 +#define IXGBE_RTTDCS      0x04900 +#define IXGBE_RTTDCS_ARBDIS     0x00000040 /* DCB arbiter disable */ +#define IXGBE_RTTPCS      0x0CD00 +#define IXGBE_RTRUP2TC    0x03020 +#define IXGBE_RTTUP2TC    0x0C800 +#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TXLLQ(_i)   (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDQSEL    0x04904 +#define IXGBE_RTTDT1C     0x04908 +#define IXGBE_RTTDT1S     0x0490C +#define IXGBE_RTTDTECC    0x04990 +#define IXGBE_RTTDTECC_NO_BCN   0x00000100 +#define IXGBE_RTTBCNRC    0x04984 +#define IXGBE_RTTBCNRC_RS_ENA	0x80000000 +#define IXGBE_RTTBCNRC_RF_DEC_MASK	0x00003FFF +#define IXGBE_RTTBCNRC_RF_INT_SHIFT	14 +#define IXGBE_RTTBCNRC_RF_INT_MASK	\ +	(IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT) +#define IXGBE_RTTBCNRM    0x04980 + +/* FCoE DMA Context Registers */ +#define IXGBE_FCPTRL    0x02410 /* FC User Desc. PTR Low */ +#define IXGBE_FCPTRH    0x02414 /* FC USer Desc. PTR High */ +#define IXGBE_FCBUFF    0x02418 /* FC Buffer Control */ +#define IXGBE_FCDMARW   0x02420 /* FC Receive DMA RW */ +#define IXGBE_FCINVST0  0x03FC0 /* FC Invalid DMA Context Status Reg 0 */ +#define IXGBE_FCINVST(_i)       (IXGBE_FCINVST0 + ((_i) * 4)) +#define IXGBE_FCBUFF_VALID      (1 << 0)   /* DMA Context Valid */ +#define IXGBE_FCBUFF_BUFFSIZE   (3 << 3)   /* User Buffer Size */ +#define IXGBE_FCBUFF_WRCONTX    (1 << 7)   /* 0: Initiator, 1: Target */ +#define IXGBE_FCBUFF_BUFFCNT    0x0000ff00 /* Number of User Buffers */ +#define IXGBE_FCBUFF_OFFSET     0xffff0000 /* User Buffer Offset */ +#define IXGBE_FCBUFF_BUFFSIZE_SHIFT  3 +#define IXGBE_FCBUFF_BUFFCNT_SHIFT   8 +#define IXGBE_FCBUFF_OFFSET_SHIFT    16 +#define IXGBE_FCDMARW_WE        (1 << 14)   /* Write enable */ +#define IXGBE_FCDMARW_RE        (1 << 15)   /* Read enable */ +#define IXGBE_FCDMARW_FCOESEL   0x000001ff  /* FC X_ID: 11 bits */ +#define IXGBE_FCDMARW_LASTSIZE  0xffff0000  /* Last User Buffer Size */ +#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16 + +/* FCoE SOF/EOF */ +#define IXGBE_TEOFF     0x04A94 /* Tx FC EOF */ +#define IXGBE_TSOFF     0x04A98 /* Tx FC SOF */ +#define IXGBE_REOFF     0x05158 /* Rx FC EOF */ +#define IXGBE_RSOFF     0x051F8 /* Rx FC SOF */ +/* FCoE Filter Context Registers */ +#define IXGBE_FCFLT     0x05108 /* FC FLT Context */ +#define IXGBE_FCFLTRW   0x05110 /* FC Filter RW Control */ +#define IXGBE_FCPARAM   0x051d8 /* FC Offset Parameter */ +#define IXGBE_FCFLT_VALID       (1 << 0)   /* Filter Context Valid */ +#define IXGBE_FCFLT_FIRST       (1 << 1)   /* Filter First */ +#define IXGBE_FCFLT_SEQID       0x00ff0000 /* Sequence ID */ +#define IXGBE_FCFLT_SEQCNT      0xff000000 /* Sequence Count */ +#define IXGBE_FCFLTRW_RVALDT    (1 << 13)  /* Fast Re-Validation */ +#define IXGBE_FCFLTRW_WE        (1 << 14)  /* Write Enable */ +#define IXGBE_FCFLTRW_RE        (1 << 15)  /* Read Enable */ +/* FCoE Receive Control */ +#define IXGBE_FCRXCTRL  0x05100 /* FC Receive Control */ +#define IXGBE_FCRXCTRL_FCOELLI  (1 << 0)   /* Low latency interrupt */ +#define IXGBE_FCRXCTRL_SAVBAD   (1 << 1)   /* Save Bad Frames */ +#define IXGBE_FCRXCTRL_FRSTRDH  (1 << 2)   /* EN 1st Read Header */ +#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3)   /* EN Last Header in Seq */ +#define IXGBE_FCRXCTRL_ALLH     (1 << 4)   /* EN All Headers */ +#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5)   /* EN 1st Seq. Header */ +#define IXGBE_FCRXCTRL_ICRC     (1 << 6)   /* Ignore Bad FC CRC */ +#define IXGBE_FCRXCTRL_FCCRCBO  (1 << 7)   /* FC CRC Byte Ordering */ +#define IXGBE_FCRXCTRL_FCOEVER  0x00000f00 /* FCoE Version: 4 bits */ +#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8 +/* FCoE Redirection */ +#define IXGBE_FCRECTL   0x0ED00 /* FC Redirection Control */ +#define IXGBE_FCRETA0   0x0ED10 /* FC Redirection Table 0 */ +#define IXGBE_FCRETA(_i)        (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */ +#define IXGBE_FCRECTL_ENA       0x1        /* FCoE Redir Table Enable */ +#define IXGBE_FCRETA_SIZE       8          /* Max entries in FCRETA */ +#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */ + +/* Stats registers */ +#define IXGBE_CRCERRS   0x04000 +#define IXGBE_ILLERRC   0x04004 +#define IXGBE_ERRBC     0x04008 +#define IXGBE_MSPDC     0x04010 +#define IXGBE_MPC(_i)   (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/ +#define IXGBE_MLFC      0x04034 +#define IXGBE_MRFC      0x04038 +#define IXGBE_RLEC      0x04040 +#define IXGBE_LXONTXC   0x03F60 +#define IXGBE_LXONRXC   0x0CF60 +#define IXGBE_LXOFFTXC  0x03F68 +#define IXGBE_LXOFFRXC  0x0CF68 +#define IXGBE_LXONRXCNT 0x041A4 +#define IXGBE_LXOFFRXCNT 0x041A8 +#define IXGBE_PXONRXCNT(_i)     (0x04140 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXOFFRXCNT(_i)    (0x04160 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXON2OFFCNT(_i)   (0x03240 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXONTXC(_i)       (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/ +#define IXGBE_PXONRXC(_i)       (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/ +#define IXGBE_PXOFFTXC(_i)      (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/ +#define IXGBE_PXOFFRXC(_i)      (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/ +#define IXGBE_PRC64     0x0405C +#define IXGBE_PRC127    0x04060 +#define IXGBE_PRC255    0x04064 +#define IXGBE_PRC511    0x04068 +#define IXGBE_PRC1023   0x0406C +#define IXGBE_PRC1522   0x04070 +#define IXGBE_GPRC      0x04074 +#define IXGBE_BPRC      0x04078 +#define IXGBE_MPRC      0x0407C +#define IXGBE_GPTC      0x04080 +#define IXGBE_GORCL     0x04088 +#define IXGBE_GORCH     0x0408C +#define IXGBE_GOTCL     0x04090 +#define IXGBE_GOTCH     0x04094 +#define IXGBE_RNBC(_i)  (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/ +#define IXGBE_RUC       0x040A4 +#define IXGBE_RFC       0x040A8 +#define IXGBE_ROC       0x040AC +#define IXGBE_RJC       0x040B0 +#define IXGBE_MNGPRC    0x040B4 +#define IXGBE_MNGPDC    0x040B8 +#define IXGBE_MNGPTC    0x0CF90 +#define IXGBE_TORL      0x040C0 +#define IXGBE_TORH      0x040C4 +#define IXGBE_TPR       0x040D0 +#define IXGBE_TPT       0x040D4 +#define IXGBE_PTC64     0x040D8 +#define IXGBE_PTC127    0x040DC +#define IXGBE_PTC255    0x040E0 +#define IXGBE_PTC511    0x040E4 +#define IXGBE_PTC1023   0x040E8 +#define IXGBE_PTC1522   0x040EC +#define IXGBE_MPTC      0x040F0 +#define IXGBE_BPTC      0x040F4 +#define IXGBE_XEC       0x04120 +#define IXGBE_SSVPC     0x08780 + +#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) +#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \ +                         (0x08600 + ((_i) * 4))) +#define IXGBE_TQSM(_i)  (0x08600 + ((_i) * 4)) + +#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */ +#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */ +#define IXGBE_FCCRC     0x05118 /* Count of Good Eth CRC w/ Bad FC CRC */ +#define IXGBE_FCOERPDC  0x0241C /* FCoE Rx Packets Dropped Count */ +#define IXGBE_FCLAST    0x02424 /* FCoE Last Error Count */ +#define IXGBE_FCOEPRC   0x02428 /* Number of FCoE Packets Received */ +#define IXGBE_FCOEDWRC  0x0242C /* Number of FCoE DWords Received */ +#define IXGBE_FCOEPTC   0x08784 /* Number of FCoE Packets Transmitted */ +#define IXGBE_FCOEDWTC  0x08788 /* Number of FCoE DWords Transmitted */ +#define IXGBE_O2BGPTC   0x041C4 +#define IXGBE_O2BSPC    0x087B0 +#define IXGBE_B2OSPC    0x041C0 +#define IXGBE_B2OGPRC   0x02F90 +#define IXGBE_PCRC8ECL  0x0E810 +#define IXGBE_PCRC8ECH  0x0E811 +#define IXGBE_PCRC8ECH_MASK     0x1F +#define IXGBE_LDPCECL   0x0E820 +#define IXGBE_LDPCECH   0x0E821 + +/* Management */ +#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MANC      0x05820 +#define IXGBE_MFVAL     0x05824 +#define IXGBE_MANC2H    0x05860 +#define IXGBE_MDEF(_i)  (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MIPAF     0x058B0 +#define IXGBE_MMAL(_i)  (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_MMAH(_i)  (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_FTFT      0x09400 /* 0x9400-0x97FC */ +#define IXGBE_METF(_i)  (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_LSWFW     0x15014 + +/* ARC Subsystem registers */ +#define IXGBE_HICR      0x15F00 +#define IXGBE_FWSTS     0x15F0C +#define IXGBE_HSMC0R    0x15F04 +#define IXGBE_HSMC1R    0x15F08 +#define IXGBE_SWSR      0x15F10 +#define IXGBE_HFDR      0x15FE8 +#define IXGBE_FLEX_MNG  0x15800 /* 0x15800 - 0x15EFC */ + +#define IXGBE_HICR_EN              0x01  /* Enable bit - RO */ +/* Driver sets this bit when done to put command in RAM */ +#define IXGBE_HICR_C               0x02 +#define IXGBE_HICR_SV              0x04  /* Status Validity */ +#define IXGBE_HICR_FW_RESET_ENABLE 0x40 +#define IXGBE_HICR_FW_RESET        0x80 + +/* PCI-E registers */ +#define IXGBE_GCR       0x11000 +#define IXGBE_GTV       0x11004 +#define IXGBE_FUNCTAG   0x11008 +#define IXGBE_GLT       0x1100C +#define IXGBE_GSCL_1    0x11010 +#define IXGBE_GSCL_2    0x11014 +#define IXGBE_GSCL_3    0x11018 +#define IXGBE_GSCL_4    0x1101C +#define IXGBE_GSCN_0    0x11020 +#define IXGBE_GSCN_1    0x11024 +#define IXGBE_GSCN_2    0x11028 +#define IXGBE_GSCN_3    0x1102C +#define IXGBE_FACTPS    0x10150 +#define IXGBE_PCIEANACTL  0x11040 +#define IXGBE_SWSM      0x10140 +#define IXGBE_FWSM      0x10148 +#define IXGBE_GSSR      0x10160 +#define IXGBE_MREVID    0x11064 +#define IXGBE_DCA_ID    0x11070 +#define IXGBE_DCA_CTRL  0x11074 +#define IXGBE_SWFW_SYNC IXGBE_GSSR + +/* PCIe registers 82599-specific */ +#define IXGBE_GCR_EXT           0x11050 +#define IXGBE_GSCL_5_82599      0x11030 +#define IXGBE_GSCL_6_82599      0x11034 +#define IXGBE_GSCL_7_82599      0x11038 +#define IXGBE_GSCL_8_82599      0x1103C +#define IXGBE_PHYADR_82599      0x11040 +#define IXGBE_PHYDAT_82599      0x11044 +#define IXGBE_PHYCTL_82599      0x11048 +#define IXGBE_PBACLR_82599      0x11068 +#define IXGBE_CIAA_82599        0x11088 +#define IXGBE_CIAD_82599        0x1108C +#define IXGBE_PICAUSE           0x110B0 +#define IXGBE_PIENA             0x110B8 +#define IXGBE_CDQ_MBR_82599     0x110B4 +#define IXGBE_PCIESPARE         0x110BC +#define IXGBE_MISC_REG_82599    0x110F0 +#define IXGBE_ECC_CTRL_0_82599  0x11100 +#define IXGBE_ECC_CTRL_1_82599  0x11104 +#define IXGBE_ECC_STATUS_82599  0x110E0 +#define IXGBE_BAR_CTRL_82599    0x110F4 + +/* PCI Express Control */ +#define IXGBE_GCR_CMPL_TMOUT_MASK       0x0000F000 +#define IXGBE_GCR_CMPL_TMOUT_10ms       0x00001000 +#define IXGBE_GCR_CMPL_TMOUT_RESEND     0x00010000 +#define IXGBE_GCR_CAP_VER2              0x00040000 + +#define IXGBE_GCR_EXT_MSIX_EN           0x80000000 +#define IXGBE_GCR_EXT_VT_MODE_16        0x00000001 +#define IXGBE_GCR_EXT_VT_MODE_32        0x00000002 +#define IXGBE_GCR_EXT_VT_MODE_64        0x00000003 +#define IXGBE_GCR_EXT_SRIOV             (IXGBE_GCR_EXT_MSIX_EN | \ +                                         IXGBE_GCR_EXT_VT_MODE_64) + +/* Time Sync Registers */ +#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ +#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ +#define IXGBE_RXSTMPL    0x051E8 /* Rx timestamp Low - RO */ +#define IXGBE_RXSTMPH    0x051A4 /* Rx timestamp High - RO */ +#define IXGBE_RXSATRL    0x051A0 /* Rx timestamp attribute low - RO */ +#define IXGBE_RXSATRH    0x051A8 /* Rx timestamp attribute high - RO */ +#define IXGBE_RXMTRL     0x05120 /* RX message type register low - RW */ +#define IXGBE_TXSTMPL    0x08C04 /* Tx timestamp value Low - RO */ +#define IXGBE_TXSTMPH    0x08C08 /* Tx timestamp value High - RO */ +#define IXGBE_SYSTIML    0x08C0C /* System time register Low - RO */ +#define IXGBE_SYSTIMH    0x08C10 /* System time register High - RO */ +#define IXGBE_TIMINCA    0x08C14 /* Increment attributes register - RW */ +#define IXGBE_TIMADJL    0x08C18 /* Time Adjustment Offset register Low - RW */ +#define IXGBE_TIMADJH    0x08C1C /* Time Adjustment Offset register High - RW */ +#define IXGBE_TSAUXC     0x08C20 /* TimeSync Auxiliary Control register - RW */ +#define IXGBE_TRGTTIML0  0x08C24 /* Target Time Register 0 Low - RW */ +#define IXGBE_TRGTTIMH0  0x08C28 /* Target Time Register 0 High - RW */ +#define IXGBE_TRGTTIML1  0x08C2C /* Target Time Register 1 Low - RW */ +#define IXGBE_TRGTTIMH1  0x08C30 /* Target Time Register 1 High - RW */ +#define IXGBE_FREQOUT0   0x08C34 /* Frequency Out 0 Control register - RW */ +#define IXGBE_FREQOUT1   0x08C38 /* Frequency Out 1 Control register - RW */ +#define IXGBE_AUXSTMPL0  0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */ +#define IXGBE_AUXSTMPH0  0x08C40 /* Auxiliary Time Stamp 0 register High - RO */ +#define IXGBE_AUXSTMPL1  0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */ +#define IXGBE_AUXSTMPH1  0x08C48 /* Auxiliary Time Stamp 1 register High - RO */ + +/* Diagnostic Registers */ +#define IXGBE_RDSTATCTL   0x02C20 +#define IXGBE_RDSTAT(_i)  (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ +#define IXGBE_RDHMPN      0x02F08 +#define IXGBE_RIC_DW(_i)  (0x02F10 + ((_i) * 4)) +#define IXGBE_RDPROBE     0x02F20 +#define IXGBE_RDMAM       0x02F30 +#define IXGBE_RDMAD       0x02F34 +#define IXGBE_TDSTATCTL   0x07C20 +#define IXGBE_TDSTAT(_i)  (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */ +#define IXGBE_TDHMPN      0x07F08 +#define IXGBE_TDHMPN2     0x082FC +#define IXGBE_TXDESCIC    0x082CC +#define IXGBE_TIC_DW(_i)  (0x07F10 + ((_i) * 4)) +#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4)) +#define IXGBE_TDPROBE     0x07F20 +#define IXGBE_TXBUFCTRL   0x0C600 +#define IXGBE_TXBUFDATA0  0x0C610 +#define IXGBE_TXBUFDATA1  0x0C614 +#define IXGBE_TXBUFDATA2  0x0C618 +#define IXGBE_TXBUFDATA3  0x0C61C +#define IXGBE_RXBUFCTRL   0x03600 +#define IXGBE_RXBUFDATA0  0x03610 +#define IXGBE_RXBUFDATA1  0x03614 +#define IXGBE_RXBUFDATA2  0x03618 +#define IXGBE_RXBUFDATA3  0x0361C +#define IXGBE_PCIE_DIAG(_i)     (0x11090 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_RFVAL     0x050A4 +#define IXGBE_MDFTC1    0x042B8 +#define IXGBE_MDFTC2    0x042C0 +#define IXGBE_MDFTFIFO1 0x042C4 +#define IXGBE_MDFTFIFO2 0x042C8 +#define IXGBE_MDFTS     0x042CC +#define IXGBE_RXDATAWRPTR(_i)   (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/ +#define IXGBE_RXDESCWRPTR(_i)   (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/ +#define IXGBE_RXDATARDPTR(_i)   (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/ +#define IXGBE_RXDESCRDPTR(_i)   (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/ +#define IXGBE_TXDATAWRPTR(_i)   (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/ +#define IXGBE_TXDESCWRPTR(_i)   (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/ +#define IXGBE_TXDATARDPTR(_i)   (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/ +#define IXGBE_TXDESCRDPTR(_i)   (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/ +#define IXGBE_PCIEECCCTL 0x1106C +#define IXGBE_RXWRPTR(_i)       (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/ +#define IXGBE_RXUSED(_i)        (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/ +#define IXGBE_RXRDPTR(_i)       (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/ +#define IXGBE_RXRDWRPTR(_i)     (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/ +#define IXGBE_TXWRPTR(_i)       (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/ +#define IXGBE_TXUSED(_i)        (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/ +#define IXGBE_TXRDPTR(_i)       (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/ +#define IXGBE_TXRDWRPTR(_i)     (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/ +#define IXGBE_PCIEECCCTL0 0x11100 +#define IXGBE_PCIEECCCTL1 0x11104 +#define IXGBE_RXDBUECC  0x03F70 +#define IXGBE_TXDBUECC  0x0CF70 +#define IXGBE_RXDBUEST 0x03F74 +#define IXGBE_TXDBUEST 0x0CF74 +#define IXGBE_PBTXECC   0x0C300 +#define IXGBE_PBRXECC   0x03300 +#define IXGBE_GHECCR    0x110B0 + +/* MAC Registers */ +#define IXGBE_PCS1GCFIG 0x04200 +#define IXGBE_PCS1GLCTL 0x04208 +#define IXGBE_PCS1GLSTA 0x0420C +#define IXGBE_PCS1GDBG0 0x04210 +#define IXGBE_PCS1GDBG1 0x04214 +#define IXGBE_PCS1GANA  0x04218 +#define IXGBE_PCS1GANLP 0x0421C +#define IXGBE_PCS1GANNP 0x04220 +#define IXGBE_PCS1GANLPNP 0x04224 +#define IXGBE_HLREG0    0x04240 +#define IXGBE_HLREG1    0x04244 +#define IXGBE_PAP       0x04248 +#define IXGBE_MACA      0x0424C +#define IXGBE_APAE      0x04250 +#define IXGBE_ARD       0x04254 +#define IXGBE_AIS       0x04258 +#define IXGBE_MSCA      0x0425C +#define IXGBE_MSRWD     0x04260 +#define IXGBE_MLADD     0x04264 +#define IXGBE_MHADD     0x04268 +#define IXGBE_MAXFRS    0x04268 +#define IXGBE_TREG      0x0426C +#define IXGBE_PCSS1     0x04288 +#define IXGBE_PCSS2     0x0428C +#define IXGBE_XPCSS     0x04290 +#define IXGBE_MFLCN     0x04294 +#define IXGBE_SERDESC   0x04298 +#define IXGBE_MACS      0x0429C +#define IXGBE_AUTOC     0x042A0 +#define IXGBE_LINKS     0x042A4 +#define IXGBE_LINKS2    0x04324 +#define IXGBE_AUTOC2    0x042A8 +#define IXGBE_AUTOC3    0x042AC +#define IXGBE_ANLP1     0x042B0 +#define IXGBE_ANLP2     0x042B4 +#define IXGBE_MACC      0x04330 +#define IXGBE_ATLASCTL  0x04800 +#define IXGBE_MMNGC     0x042D0 +#define IXGBE_ANLPNP1   0x042D4 +#define IXGBE_ANLPNP2   0x042D8 +#define IXGBE_KRPCSFC   0x042E0 +#define IXGBE_KRPCSS    0x042E4 +#define IXGBE_FECS1     0x042E8 +#define IXGBE_FECS2     0x042EC +#define IXGBE_SMADARCTL 0x14F10 +#define IXGBE_MPVC      0x04318 +#define IXGBE_SGMIIC    0x04314 + +/* Statistics Registers */ +#define IXGBE_RXNFGPC      0x041B0 +#define IXGBE_RXNFGBCL     0x041B4 +#define IXGBE_RXNFGBCH     0x041B8 +#define IXGBE_RXDGPC       0x02F50 +#define IXGBE_RXDGBCL      0x02F54 +#define IXGBE_RXDGBCH      0x02F58 +#define IXGBE_RXDDGPC      0x02F5C +#define IXGBE_RXDDGBCL     0x02F60 +#define IXGBE_RXDDGBCH     0x02F64 +#define IXGBE_RXLPBKGPC    0x02F68 +#define IXGBE_RXLPBKGBCL   0x02F6C +#define IXGBE_RXLPBKGBCH   0x02F70 +#define IXGBE_RXDLPBKGPC   0x02F74 +#define IXGBE_RXDLPBKGBCL  0x02F78 +#define IXGBE_RXDLPBKGBCH  0x02F7C +#define IXGBE_TXDGPC       0x087A0 +#define IXGBE_TXDGBCL      0x087A4 +#define IXGBE_TXDGBCH      0x087A8 + +#define IXGBE_RXDSTATCTRL 0x02F40 + +/* Copper Pond 2 link timeout */ +#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50 + +/* Omer CORECTL */ +#define IXGBE_CORECTL           0x014F00 +/* BARCTRL */ +#define IXGBE_BARCTRL               0x110F4 +#define IXGBE_BARCTRL_FLSIZE        0x0700 +#define IXGBE_BARCTRL_FLSIZE_SHIFT  8 +#define IXGBE_BARCTRL_CSRSIZE       0x2000 + +/* RSCCTL Bit Masks */ +#define IXGBE_RSCCTL_RSCEN          0x01 +#define IXGBE_RSCCTL_MAXDESC_1      0x00 +#define IXGBE_RSCCTL_MAXDESC_4      0x04 +#define IXGBE_RSCCTL_MAXDESC_8      0x08 +#define IXGBE_RSCCTL_MAXDESC_16     0x0C + +/* RSCDBU Bit Masks */ +#define IXGBE_RSCDBU_RSCSMALDIS_MASK    0x0000007F +#define IXGBE_RSCDBU_RSCACKDIS          0x00000080 + +/* RDRXCTL Bit Masks */ +#define IXGBE_RDRXCTL_RDMTS_1_2     0x00000000 /* Rx Desc Min Threshold Size */ +#define IXGBE_RDRXCTL_CRCSTRIP      0x00000002 /* CRC Strip */ +#define IXGBE_RDRXCTL_MVMEN         0x00000020 +#define IXGBE_RDRXCTL_DMAIDONE      0x00000008 /* DMA init cycle done */ +#define IXGBE_RDRXCTL_AGGDIS        0x00010000 /* Aggregation disable */ +#define IXGBE_RDRXCTL_RSCFRSTSIZE   0x003E0000 /* RSC First packet size */ +#define IXGBE_RDRXCTL_RSCLLIDIS     0x00800000 /* Disable RSC compl on LLI */ +#define IXGBE_RDRXCTL_RSCACKC       0x02000000 /* must set 1 when RSC enabled */ +#define IXGBE_RDRXCTL_FCOE_WRFIX    0x04000000 /* must set 1 when RSC enabled */ + +/* RQTC Bit Masks and Shifts */ +#define IXGBE_RQTC_SHIFT_TC(_i)     ((_i) * 4) +#define IXGBE_RQTC_TC0_MASK         (0x7 << 0) +#define IXGBE_RQTC_TC1_MASK         (0x7 << 4) +#define IXGBE_RQTC_TC2_MASK         (0x7 << 8) +#define IXGBE_RQTC_TC3_MASK         (0x7 << 12) +#define IXGBE_RQTC_TC4_MASK         (0x7 << 16) +#define IXGBE_RQTC_TC5_MASK         (0x7 << 20) +#define IXGBE_RQTC_TC6_MASK         (0x7 << 24) +#define IXGBE_RQTC_TC7_MASK         (0x7 << 28) + +/* PSRTYPE.RQPL Bit masks and shift */ +#define IXGBE_PSRTYPE_RQPL_MASK     0x7 +#define IXGBE_PSRTYPE_RQPL_SHIFT    29 + +/* CTRL Bit Masks */ +#define IXGBE_CTRL_GIO_DIS      0x00000004 /* Global IO Master Disable bit */ +#define IXGBE_CTRL_LNK_RST      0x00000008 /* Link Reset. Resets everything. */ +#define IXGBE_CTRL_RST          0x04000000 /* Reset (SW) */ + +/* FACTPS */ +#define IXGBE_FACTPS_LFS        0x40000000 /* LAN Function Select */ + +/* MHADD Bit Masks */ +#define IXGBE_MHADD_MFS_MASK    0xFFFF0000 +#define IXGBE_MHADD_MFS_SHIFT   16 + +/* Extended Device Control */ +#define IXGBE_CTRL_EXT_PFRSTD   0x00004000 /* Physical Function Reset Done */ +#define IXGBE_CTRL_EXT_NS_DIS   0x00010000 /* No Snoop disable */ +#define IXGBE_CTRL_EXT_RO_DIS   0x00020000 /* Relaxed Ordering disable */ +#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ + +/* Direct Cache Access (DCA) definitions */ +#define IXGBE_DCA_CTRL_DCA_ENABLE  0x00000000 /* DCA Enable */ +#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ + +#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599  0xFF000000 /* Rx CPUID Mask */ +#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */ +#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ +#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ +#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ +#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */ +#define IXGBE_DCA_RXCTRL_DESC_WRO_EN (1 << 13) /* DCA Rx wr Desc Relax Order */ +#define IXGBE_DCA_RXCTRL_DESC_HSRO_EN (1 << 15) /* DCA Rx Split Header RO */ + +#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599  0xFF000000 /* Tx CPUID Mask */ +#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */ +#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define IXGBE_DCA_MAX_QUEUES_82598   16 /* DCA regs only on 16 queues */ + +/* MSCA Bit Masks */ +#define IXGBE_MSCA_NP_ADDR_MASK      0x0000FFFF /* MDI Address (new protocol) */ +#define IXGBE_MSCA_NP_ADDR_SHIFT     0 +#define IXGBE_MSCA_DEV_TYPE_MASK     0x001F0000 /* Device Type (new protocol) */ +#define IXGBE_MSCA_DEV_TYPE_SHIFT    16 /* Register Address (old protocol */ +#define IXGBE_MSCA_PHY_ADDR_MASK     0x03E00000 /* PHY Address mask */ +#define IXGBE_MSCA_PHY_ADDR_SHIFT    21 /* PHY Address shift*/ +#define IXGBE_MSCA_OP_CODE_MASK      0x0C000000 /* OP CODE mask */ +#define IXGBE_MSCA_OP_CODE_SHIFT     26 /* OP CODE shift */ +#define IXGBE_MSCA_ADDR_CYCLE        0x00000000 /* OP CODE 00 (addr cycle) */ +#define IXGBE_MSCA_WRITE             0x04000000 /* OP CODE 01 (write) */ +#define IXGBE_MSCA_READ              0x0C000000 /* OP CODE 11 (read) */ +#define IXGBE_MSCA_READ_AUTOINC      0x08000000 /* OP CODE 10 (read, auto inc)*/ +#define IXGBE_MSCA_ST_CODE_MASK      0x30000000 /* ST Code mask */ +#define IXGBE_MSCA_ST_CODE_SHIFT     28 /* ST Code shift */ +#define IXGBE_MSCA_NEW_PROTOCOL      0x00000000 /* ST CODE 00 (new protocol) */ +#define IXGBE_MSCA_OLD_PROTOCOL      0x10000000 /* ST CODE 01 (old protocol) */ +#define IXGBE_MSCA_MDI_COMMAND       0x40000000 /* Initiate MDI command */ +#define IXGBE_MSCA_MDI_IN_PROG_EN    0x80000000 /* MDI in progress enable */ + +/* MSRWD bit masks */ +#define IXGBE_MSRWD_WRITE_DATA_MASK     0x0000FFFF +#define IXGBE_MSRWD_WRITE_DATA_SHIFT    0 +#define IXGBE_MSRWD_READ_DATA_MASK      0xFFFF0000 +#define IXGBE_MSRWD_READ_DATA_SHIFT     16 + +/* Atlas registers */ +#define IXGBE_ATLAS_PDN_LPBK    0x24 +#define IXGBE_ATLAS_PDN_10G     0xB +#define IXGBE_ATLAS_PDN_1G      0xC +#define IXGBE_ATLAS_PDN_AN      0xD + +/* Atlas bit masks */ +#define IXGBE_ATLASCTL_WRITE_CMD        0x00010000 +#define IXGBE_ATLAS_PDN_TX_REG_EN       0x10 +#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL   0xF0 +#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL    0xF0 +#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL    0xF0 + +/* Omer bit masks */ +#define IXGBE_CORECTL_WRITE_CMD         0x00010000 + +/* MDIO definitions */ + +#define IXGBE_MDIO_COMMAND_TIMEOUT     100 /* PHY Timeout for 1 GB mode */ + +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL      0x0    /* VS1 Control Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS       0x1    /* VS1 Status Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS  0x0008 /* 1 = Link Up */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0 - 10G, 1 - 1G */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED    0x0018 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED     0x0010 + +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR     0xC30A /* PHY_XS SDA/SCL Addr Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA     0xC30B /* PHY_XS SDA/SCL Data Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT     0xC30C /* PHY_XS SDA/SCL Status Reg */ + +/* MII clause 22/28 definitions */ +#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ +#define IXGBE_MII_AUTONEG_XNP_TX_REG             0x17   /* 1G XNP Transmit */ +#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX      0x4000 /* full duplex, bit:14*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE             0x8000 /* full duplex, bit:15*/ +#define IXGBE_MII_AUTONEG_REG                    0x0 + +#define IXGBE_PHY_REVISION_MASK        0xFFFFFFF0 +#define IXGBE_MAX_PHY_ADDR             32 + +/* PHY IDs*/ +#define TN1010_PHY_ID    0x00A19410 +#define TNX_FW_REV       0xB +#define X540_PHY_ID      0x01540200 +#define QT2022_PHY_ID    0x0043A400 +#define ATH_PHY_ID       0x03429050 +#define AQ_FW_REV        0x20 + +/* PHY Types */ +#define IXGBE_M88E1145_E_PHY_ID  0x01410CD0 + +/* Special PHY Init Routine */ +#define IXGBE_PHY_INIT_OFFSET_NL 0x002B +#define IXGBE_PHY_INIT_END_NL    0xFFFF +#define IXGBE_CONTROL_MASK_NL    0xF000 +#define IXGBE_DATA_MASK_NL       0x0FFF +#define IXGBE_CONTROL_SHIFT_NL   12 +#define IXGBE_DELAY_NL           0 +#define IXGBE_DATA_NL            1 +#define IXGBE_CONTROL_NL         0x000F +#define IXGBE_CONTROL_EOL_NL     0x0FFF +#define IXGBE_CONTROL_SOL_NL     0x0000 + +/* General purpose Interrupt Enable */ +#define IXGBE_SDP0_GPIEN         0x00000001 /* SDP0 */ +#define IXGBE_SDP1_GPIEN         0x00000002 /* SDP1 */ +#define IXGBE_SDP2_GPIEN         0x00000004 /* SDP2 */ +#define IXGBE_GPIE_MSIX_MODE     0x00000010 /* MSI-X mode */ +#define IXGBE_GPIE_OCD           0x00000020 /* Other Clear Disable */ +#define IXGBE_GPIE_EIMEN         0x00000040 /* Immediate Interrupt Enable */ +#define IXGBE_GPIE_EIAME         0x40000000 +#define IXGBE_GPIE_PBA_SUPPORT   0x80000000 +#define IXGBE_GPIE_RSC_DELAY_SHIFT 11 +#define IXGBE_GPIE_VTMODE_MASK   0x0000C000 /* VT Mode Mask */ +#define IXGBE_GPIE_VTMODE_16     0x00004000 /* 16 VFs 8 queues per VF */ +#define IXGBE_GPIE_VTMODE_32     0x00008000 /* 32 VFs 4 queues per VF */ +#define IXGBE_GPIE_VTMODE_64     0x0000C000 /* 64 VFs 2 queues per VF */ + +/* Packet Buffer Initialization */ +#define IXGBE_TXPBSIZE_20KB     0x00005000 /* 20KB Packet Buffer */ +#define IXGBE_TXPBSIZE_40KB     0x0000A000 /* 40KB Packet Buffer */ +#define IXGBE_RXPBSIZE_48KB     0x0000C000 /* 48KB Packet Buffer */ +#define IXGBE_RXPBSIZE_64KB     0x00010000 /* 64KB Packet Buffer */ +#define IXGBE_RXPBSIZE_80KB     0x00014000 /* 80KB Packet Buffer */ +#define IXGBE_RXPBSIZE_128KB    0x00020000 /* 128KB Packet Buffer */ +#define IXGBE_RXPBSIZE_MAX      0x00080000 /* 512KB Packet Buffer*/ +#define IXGBE_TXPBSIZE_MAX      0x00028000 /* 160KB Packet Buffer*/ + +#define IXGBE_TXPKT_SIZE_MAX    0xA        /* Max Tx Packet size  */ +#define IXGBE_MAX_PB		8 + +/* Packet buffer allocation strategies */ +enum { +	PBA_STRATEGY_EQUAL	= 0,	/* Distribute PB space equally */ +#define PBA_STRATEGY_EQUAL	PBA_STRATEGY_EQUAL +	PBA_STRATEGY_WEIGHTED	= 1,	/* Weight front half of TCs */ +#define PBA_STRATEGY_WEIGHTED	PBA_STRATEGY_WEIGHTED +}; + +/* Transmit Flow Control status */ +#define IXGBE_TFCS_TXOFF         0x00000001 +#define IXGBE_TFCS_TXOFF0        0x00000100 +#define IXGBE_TFCS_TXOFF1        0x00000200 +#define IXGBE_TFCS_TXOFF2        0x00000400 +#define IXGBE_TFCS_TXOFF3        0x00000800 +#define IXGBE_TFCS_TXOFF4        0x00001000 +#define IXGBE_TFCS_TXOFF5        0x00002000 +#define IXGBE_TFCS_TXOFF6        0x00004000 +#define IXGBE_TFCS_TXOFF7        0x00008000 + +/* TCP Timer */ +#define IXGBE_TCPTIMER_KS            0x00000100 +#define IXGBE_TCPTIMER_COUNT_ENABLE  0x00000200 +#define IXGBE_TCPTIMER_COUNT_FINISH  0x00000400 +#define IXGBE_TCPTIMER_LOOP          0x00000800 +#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF + +/* HLREG0 Bit Masks */ +#define IXGBE_HLREG0_TXCRCEN      0x00000001   /* bit  0 */ +#define IXGBE_HLREG0_RXCRCSTRP    0x00000002   /* bit  1 */ +#define IXGBE_HLREG0_JUMBOEN      0x00000004   /* bit  2 */ +#define IXGBE_HLREG0_TXPADEN      0x00000400   /* bit 10 */ +#define IXGBE_HLREG0_TXPAUSEEN    0x00001000   /* bit 12 */ +#define IXGBE_HLREG0_RXPAUSEEN    0x00004000   /* bit 14 */ +#define IXGBE_HLREG0_LPBK         0x00008000   /* bit 15 */ +#define IXGBE_HLREG0_MDCSPD       0x00010000   /* bit 16 */ +#define IXGBE_HLREG0_CONTMDC      0x00020000   /* bit 17 */ +#define IXGBE_HLREG0_CTRLFLTR     0x00040000   /* bit 18 */ +#define IXGBE_HLREG0_PREPEND      0x00F00000   /* bits 20-23 */ +#define IXGBE_HLREG0_PRIPAUSEEN   0x01000000   /* bit 24 */ +#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000   /* bits 25-26 */ +#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000   /* bit 27 */ +#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000   /* bit 28 */ + +/* VMD_CTL bitmasks */ +#define IXGBE_VMD_CTL_VMDQ_EN     0x00000001 +#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002 + +/* VT_CTL bitmasks */ +#define IXGBE_VT_CTL_DIS_DEFPL  0x20000000 /* disable default pool */ +#define IXGBE_VT_CTL_REPLEN     0x40000000 /* replication enabled */ +#define IXGBE_VT_CTL_VT_ENABLE  0x00000001  /* Enable VT Mode */ +#define IXGBE_VT_CTL_POOL_SHIFT 7 +#define IXGBE_VT_CTL_POOL_MASK  (0x3F << IXGBE_VT_CTL_POOL_SHIFT) + +/* VMOLR bitmasks */ +#define IXGBE_VMOLR_AUPE        0x01000000 /* accept untagged packets */ +#define IXGBE_VMOLR_ROMPE       0x02000000 /* accept packets in MTA tbl */ +#define IXGBE_VMOLR_ROPE        0x04000000 /* accept packets in UC tbl */ +#define IXGBE_VMOLR_BAM         0x08000000 /* accept broadcast packets */ +#define IXGBE_VMOLR_MPE         0x10000000 /* multicast promiscuous */ + +/* VFRE bitmask */ +#define IXGBE_VFRE_ENABLE_ALL   0xFFFFFFFF + +#define IXGBE_VF_INIT_TIMEOUT   200 /* Number of retries to clear RSTI */ + +/* RDHMPN and TDHMPN bitmasks */ +#define IXGBE_RDHMPN_RDICADDR       0x007FF800 +#define IXGBE_RDHMPN_RDICRDREQ      0x00800000 +#define IXGBE_RDHMPN_RDICADDR_SHIFT 11 +#define IXGBE_TDHMPN_TDICADDR       0x003FF800 +#define IXGBE_TDHMPN_TDICRDREQ      0x00800000 +#define IXGBE_TDHMPN_TDICADDR_SHIFT 11 + +#define IXGBE_RDMAM_MEM_SEL_SHIFT   13 +#define IXGBE_RDMAM_DWORD_SHIFT     9 +#define IXGBE_RDMAM_DESC_COMP_FIFO  1 +#define IXGBE_RDMAM_DFC_CMD_FIFO    2 +#define IXGBE_RDMAM_TCN_STATUS_RAM  4 +#define IXGBE_RDMAM_WB_COLL_FIFO    5 +#define IXGBE_RDMAM_QSC_CNT_RAM     6 +#define IXGBE_RDMAM_QSC_QUEUE_CNT   8 +#define IXGBE_RDMAM_QSC_QUEUE_RAM   0xA +#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE     135 +#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT     4 +#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE      48 +#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT      7 +#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE    256 +#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT    9 +#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE      8 +#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT      4 +#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE       64 +#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT       4 +#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE     32 +#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT     4 +#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE     128 +#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT     8 + +#define IXGBE_TXDESCIC_READY        0x80000000 + +/* Receive Checksum Control */ +#define IXGBE_RXCSUM_IPPCSE     0x00001000   /* IP payload checksum enable */ +#define IXGBE_RXCSUM_PCSD       0x00002000   /* packet checksum disabled */ + +/* FCRTL Bit Masks */ +#define IXGBE_FCRTL_XONE        0x80000000  /* XON enable */ +#define IXGBE_FCRTH_FCEN        0x80000000  /* Packet buffer fc enable */ + +/* PAP bit masks*/ +#define IXGBE_PAP_TXPAUSECNT_MASK   0x0000FFFF /* Pause counter mask */ + +/* RMCS Bit Masks */ +#define IXGBE_RMCS_RRM          0x00000002 /* Receive Recycle Mode enable */ +/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ +#define IXGBE_RMCS_RAC          0x00000004 +#define IXGBE_RMCS_DFP          IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */ +#define IXGBE_RMCS_TFCE_802_3X         0x00000008 /* Tx Priority FC ena */ +#define IXGBE_RMCS_TFCE_PRIORITY       0x00000010 /* Tx Priority FC ena */ +#define IXGBE_RMCS_ARBDIS       0x00000040 /* Arbitration disable bit */ + +/* FCCFG Bit Masks */ +#define IXGBE_FCCFG_TFCE_802_3X         0x00000008 /* Tx link FC enable */ +#define IXGBE_FCCFG_TFCE_PRIORITY       0x00000010 /* Tx priority FC enable */ + +/* Interrupt register bitmasks */ + +/* Extended Interrupt Cause Read */ +#define IXGBE_EICR_RTX_QUEUE    0x0000FFFF /* RTx Queue Interrupt */ +#define IXGBE_EICR_FLOW_DIR     0x00010000 /* FDir Exception */ +#define IXGBE_EICR_RX_MISS      0x00020000 /* Packet Buffer Overrun */ +#define IXGBE_EICR_PCI          0x00040000 /* PCI Exception */ +#define IXGBE_EICR_MAILBOX      0x00080000 /* VF to PF Mailbox Interrupt */ +#define IXGBE_EICR_LSC          0x00100000 /* Link Status Change */ +#define IXGBE_EICR_LINKSEC      0x00200000 /* PN Threshold */ +#define IXGBE_EICR_MNG          0x00400000 /* Manageability Event Interrupt */ +#define IXGBE_EICR_GPI_SDP0     0x01000000 /* Gen Purpose Interrupt on SDP0 */ +#define IXGBE_EICR_GPI_SDP1     0x02000000 /* Gen Purpose Interrupt on SDP1 */ +#define IXGBE_EICR_GPI_SDP2     0x04000000 /* Gen Purpose Interrupt on SDP2 */ +#define IXGBE_EICR_ECC          0x10000000 /* ECC Error */ +#define IXGBE_EICR_PBUR         0x10000000 /* Packet Buffer Handler Error */ +#define IXGBE_EICR_DHER         0x20000000 /* Descriptor Handler Error */ +#define IXGBE_EICR_TCP_TIMER    0x40000000 /* TCP Timer */ +#define IXGBE_EICR_OTHER        0x80000000 /* Interrupt Cause Active */ + +/* Extended Interrupt Cause Set */ +#define IXGBE_EICS_RTX_QUEUE    IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EICS_FLOW_DIR     IXGBE_EICR_FLOW_DIR  /* FDir Exception */ +#define IXGBE_EICS_RX_MISS      IXGBE_EICR_RX_MISS   /* Pkt Buffer Overrun */ +#define IXGBE_EICS_PCI          IXGBE_EICR_PCI       /* PCI Exception */ +#define IXGBE_EICS_MAILBOX      IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */ +#define IXGBE_EICS_LSC          IXGBE_EICR_LSC       /* Link Status Change */ +#define IXGBE_EICS_MNG          IXGBE_EICR_MNG       /* MNG Event Interrupt */ +#define IXGBE_EICS_GPI_SDP0     IXGBE_EICR_GPI_SDP0  /* SDP0 Gen Purpose Int */ +#define IXGBE_EICS_GPI_SDP1     IXGBE_EICR_GPI_SDP1  /* SDP1 Gen Purpose Int */ +#define IXGBE_EICS_GPI_SDP2     IXGBE_EICR_GPI_SDP2  /* SDP2 Gen Purpose Int */ +#define IXGBE_EICS_ECC          IXGBE_EICR_ECC       /* ECC Error */ +#define IXGBE_EICS_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Err */ +#define IXGBE_EICS_DHER         IXGBE_EICR_DHER      /* Desc Handler Error */ +#define IXGBE_EICS_TCP_TIMER    IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EICS_OTHER        IXGBE_EICR_OTHER     /* INT Cause Active */ + +/* Extended Interrupt Mask Set */ +#define IXGBE_EIMS_RTX_QUEUE    IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMS_FLOW_DIR     IXGBE_EICR_FLOW_DIR  /* FDir Exception */ +#define IXGBE_EIMS_RX_MISS      IXGBE_EICR_RX_MISS   /* Packet Buffer Overrun */ +#define IXGBE_EIMS_PCI          IXGBE_EICR_PCI       /* PCI Exception */ +#define IXGBE_EIMS_MAILBOX      IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */ +#define IXGBE_EIMS_LSC          IXGBE_EICR_LSC       /* Link Status Change */ +#define IXGBE_EIMS_MNG          IXGBE_EICR_MNG       /* MNG Event Interrupt */ +#define IXGBE_EIMS_GPI_SDP0     IXGBE_EICR_GPI_SDP0  /* SDP0 Gen Purpose Int */ +#define IXGBE_EIMS_GPI_SDP1     IXGBE_EICR_GPI_SDP1  /* SDP1 Gen Purpose Int */ +#define IXGBE_EIMS_GPI_SDP2     IXGBE_EICR_GPI_SDP2  /* SDP2 Gen Purpose Int */ +#define IXGBE_EIMS_ECC          IXGBE_EICR_ECC       /* ECC Error */ +#define IXGBE_EIMS_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Err */ +#define IXGBE_EIMS_DHER         IXGBE_EICR_DHER      /* Descr Handler Error */ +#define IXGBE_EIMS_TCP_TIMER    IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMS_OTHER        IXGBE_EICR_OTHER     /* INT Cause Active */ + +/* Extended Interrupt Mask Clear */ +#define IXGBE_EIMC_RTX_QUEUE    IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMC_FLOW_DIR     IXGBE_EICR_FLOW_DIR  /* FDir Exception */ +#define IXGBE_EIMC_RX_MISS      IXGBE_EICR_RX_MISS   /* Packet Buffer Overrun */ +#define IXGBE_EIMC_PCI          IXGBE_EICR_PCI       /* PCI Exception */ +#define IXGBE_EIMC_MAILBOX      IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */ +#define IXGBE_EIMC_LSC          IXGBE_EICR_LSC       /* Link Status Change */ +#define IXGBE_EIMC_MNG          IXGBE_EICR_MNG       /* MNG Event Interrupt */ +#define IXGBE_EIMC_GPI_SDP0     IXGBE_EICR_GPI_SDP0  /* SDP0 Gen Purpose Int */ +#define IXGBE_EIMC_GPI_SDP1     IXGBE_EICR_GPI_SDP1  /* SDP1 Gen Purpose Int */ +#define IXGBE_EIMC_GPI_SDP2     IXGBE_EICR_GPI_SDP2  /* SDP2 Gen Purpose Int */ +#define IXGBE_EIMC_ECC          IXGBE_EICR_ECC       /* ECC Error */ +#define IXGBE_EIMC_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Err */ +#define IXGBE_EIMC_DHER         IXGBE_EICR_DHER      /* Desc Handler Err */ +#define IXGBE_EIMC_TCP_TIMER    IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMC_OTHER        IXGBE_EICR_OTHER     /* INT Cause Active */ + +#define IXGBE_EIMS_ENABLE_MASK ( \ +                                IXGBE_EIMS_RTX_QUEUE       | \ +                                IXGBE_EIMS_LSC             | \ +                                IXGBE_EIMS_TCP_TIMER       | \ +                                IXGBE_EIMS_OTHER) + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define IXGBE_IMIR_PORT_IM_EN     0x00010000  /* TCP port enable */ +#define IXGBE_IMIR_PORT_BP        0x00020000  /* TCP port check bypass */ +#define IXGBE_IMIREXT_SIZE_BP     0x00001000  /* Packet size bypass */ +#define IXGBE_IMIREXT_CTRL_URG    0x00002000  /* Check URG bit in header */ +#define IXGBE_IMIREXT_CTRL_ACK    0x00004000  /* Check ACK bit in header */ +#define IXGBE_IMIREXT_CTRL_PSH    0x00008000  /* Check PSH bit in header */ +#define IXGBE_IMIREXT_CTRL_RST    0x00010000  /* Check RST bit in header */ +#define IXGBE_IMIREXT_CTRL_SYN    0x00020000  /* Check SYN bit in header */ +#define IXGBE_IMIREXT_CTRL_FIN    0x00040000  /* Check FIN bit in header */ +#define IXGBE_IMIREXT_CTRL_BP     0x00080000  /* Bypass check of control bits */ +#define IXGBE_IMIR_SIZE_BP_82599  0x00001000 /* Packet size bypass */ +#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */ +#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */ +#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */ +#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */ +#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */ +#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */ +#define IXGBE_IMIR_CTRL_BP_82599  0x00080000 /* Bypass check of control bits */ +#define IXGBE_IMIR_LLI_EN_82599   0x00100000 /* Enables low latency Int */ +#define IXGBE_IMIR_RX_QUEUE_MASK_82599  0x0000007F /* Rx Queue Mask */ +#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */ +#define IXGBE_IMIRVP_PRIORITY_MASK      0x00000007 /* VLAN priority mask */ +#define IXGBE_IMIRVP_PRIORITY_EN        0x00000008 /* VLAN priority enable */ + +#define IXGBE_MAX_FTQF_FILTERS          128 +#define IXGBE_FTQF_PROTOCOL_MASK        0x00000003 +#define IXGBE_FTQF_PROTOCOL_TCP         0x00000000 +#define IXGBE_FTQF_PROTOCOL_UDP         0x00000001 +#define IXGBE_FTQF_PROTOCOL_SCTP        2 +#define IXGBE_FTQF_PRIORITY_MASK        0x00000007 +#define IXGBE_FTQF_PRIORITY_SHIFT       2 +#define IXGBE_FTQF_POOL_MASK            0x0000003F +#define IXGBE_FTQF_POOL_SHIFT           8 +#define IXGBE_FTQF_5TUPLE_MASK_MASK     0x0000001F +#define IXGBE_FTQF_5TUPLE_MASK_SHIFT    25 +#define IXGBE_FTQF_SOURCE_ADDR_MASK     0x1E +#define IXGBE_FTQF_DEST_ADDR_MASK       0x1D +#define IXGBE_FTQF_SOURCE_PORT_MASK     0x1B +#define IXGBE_FTQF_DEST_PORT_MASK       0x17 +#define IXGBE_FTQF_PROTOCOL_COMP_MASK   0x0F +#define IXGBE_FTQF_POOL_MASK_EN         0x40000000 +#define IXGBE_FTQF_QUEUE_ENABLE         0x80000000 + +/* Interrupt clear mask */ +#define IXGBE_IRQ_CLEAR_MASK    0xFFFFFFFF + +/* Interrupt Vector Allocation Registers */ +#define IXGBE_IVAR_REG_NUM      25 +#define IXGBE_IVAR_REG_NUM_82599       64 +#define IXGBE_IVAR_TXRX_ENTRY   96 +#define IXGBE_IVAR_RX_ENTRY     64 +#define IXGBE_IVAR_RX_QUEUE(_i)    (0 + (_i)) +#define IXGBE_IVAR_TX_QUEUE(_i)    (64 + (_i)) +#define IXGBE_IVAR_TX_ENTRY     32 + +#define IXGBE_IVAR_TCP_TIMER_INDEX       96 /* 0 based index */ +#define IXGBE_IVAR_OTHER_CAUSES_INDEX    97 /* 0 based index */ + +#define IXGBE_MSIX_VECTOR(_i)   (0 + (_i)) + +#define IXGBE_IVAR_ALLOC_VAL    0x80 /* Interrupt Allocation valid */ + +/* ETYPE Queue Filter/Select Bit Masks */ +#define IXGBE_MAX_ETQF_FILTERS  8 +#define IXGBE_ETQF_FCOE         0x08000000 /* bit 27 */ +#define IXGBE_ETQF_BCN          0x10000000 /* bit 28 */ +#define IXGBE_ETQF_1588         0x40000000 /* bit 30 */ +#define IXGBE_ETQF_FILTER_EN    0x80000000 /* bit 31 */ +#define IXGBE_ETQF_POOL_ENABLE   (1 << 26) /* bit 26 */ + +#define IXGBE_ETQS_RX_QUEUE     0x007F0000 /* bits 22:16 */ +#define IXGBE_ETQS_RX_QUEUE_SHIFT       16 +#define IXGBE_ETQS_LLI          0x20000000 /* bit 29 */ +#define IXGBE_ETQS_QUEUE_EN     0x80000000 /* bit 31 */ + +/* + * ETQF filter list: one static filter per filter consumer. This is + *                   to avoid filter collisions later. Add new filters + *                   here!! + * + * Current filters: + *    EAPOL 802.1x (0x888e): Filter 0 + *    FCoE (0x8906):         Filter 2 + *    1588 (0x88f7):         Filter 3 + *    FIP  (0x8914):         Filter 4 + */ +#define IXGBE_ETQF_FILTER_EAPOL          0 +#define IXGBE_ETQF_FILTER_FCOE           2 +#define IXGBE_ETQF_FILTER_1588           3 +#define IXGBE_ETQF_FILTER_FIP            4 +/* VLAN Control Bit Masks */ +#define IXGBE_VLNCTRL_VET       0x0000FFFF  /* bits 0-15 */ +#define IXGBE_VLNCTRL_CFI       0x10000000  /* bit 28 */ +#define IXGBE_VLNCTRL_CFIEN     0x20000000  /* bit 29 */ +#define IXGBE_VLNCTRL_VFE       0x40000000  /* bit 30 */ +#define IXGBE_VLNCTRL_VME       0x80000000  /* bit 31 */ + +/* VLAN pool filtering masks */ +#define IXGBE_VLVF_VIEN         0x80000000  /* filter is valid */ +#define IXGBE_VLVF_ENTRIES      64 +#define IXGBE_VLVF_VLANID_MASK  0x00000FFF + +/* Per VF Port VLAN insertion rules */ +#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define IXGBE_VMVIR_VLANA_NEVER   0x80000000 /* Never insert VLAN tag */ + +#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100  /* 802.1q protocol */ + +/* STATUS Bit Masks */ +#define IXGBE_STATUS_LAN_ID         0x0000000C /* LAN ID */ +#define IXGBE_STATUS_LAN_ID_SHIFT   2          /* LAN ID Shift*/ +#define IXGBE_STATUS_GIO            0x00080000 /* GIO Master Enable Status */ + +#define IXGBE_STATUS_LAN_ID_0   0x00000000 /* LAN ID 0 */ +#define IXGBE_STATUS_LAN_ID_1   0x00000004 /* LAN ID 1 */ + +/* ESDP Bit Masks */ +#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */ +#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */ +#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */ +#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */ +#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */ +#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ +#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ +#define IXGBE_ESDP_SDP4_DIR     0x00000004 /* SDP4 IO direction */ +#define IXGBE_ESDP_SDP5_DIR     0x00002000 /* SDP5 IO direction */ + +/* LEDCTL Bit Masks */ +#define IXGBE_LED_IVRT_BASE      0x00000040 +#define IXGBE_LED_BLINK_BASE     0x00000080 +#define IXGBE_LED_MODE_MASK_BASE 0x0000000F +#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i))) +#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i)) +#define IXGBE_LED_IVRT(_i)       IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) +#define IXGBE_LED_BLINK(_i)      IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) +#define IXGBE_LED_MODE_MASK(_i)  IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) + +/* LED modes */ +#define IXGBE_LED_LINK_UP       0x0 +#define IXGBE_LED_LINK_10G      0x1 +#define IXGBE_LED_MAC           0x2 +#define IXGBE_LED_FILTER        0x3 +#define IXGBE_LED_LINK_ACTIVE   0x4 +#define IXGBE_LED_LINK_1G       0x5 +#define IXGBE_LED_ON            0xE +#define IXGBE_LED_OFF           0xF + +/* AUTOC Bit Masks */ +#define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000 +#define IXGBE_AUTOC_KX4_SUPP    0x80000000 +#define IXGBE_AUTOC_KX_SUPP     0x40000000 +#define IXGBE_AUTOC_PAUSE       0x30000000 +#define IXGBE_AUTOC_ASM_PAUSE   0x20000000 +#define IXGBE_AUTOC_SYM_PAUSE   0x10000000 +#define IXGBE_AUTOC_RF          0x08000000 +#define IXGBE_AUTOC_PD_TMR      0x06000000 +#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 +#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000 +#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000 +#define IXGBE_AUTOC_FECA        0x00040000 +#define IXGBE_AUTOC_FECR        0x00020000 +#define IXGBE_AUTOC_KR_SUPP     0x00010000 +#define IXGBE_AUTOC_AN_RESTART  0x00001000 +#define IXGBE_AUTOC_FLU         0x00000001 +#define IXGBE_AUTOC_LMS_SHIFT   13 +#define IXGBE_AUTOC_LMS_10G_SERIAL      (0x3 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR       (0x4 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_SGMII_1G_100M   (0x5 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_MASK            (0x7 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN   (0x0 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN  (0x1 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_AN           (0x2 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN          (0x4 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN    (0x6 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_ATTACH_TYPE     (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) + +#define IXGBE_AUTOC_1G_PMA_PMD_MASK    0x00000200 +#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT   9 +#define IXGBE_AUTOC_10G_PMA_PMD_MASK   0x00000180 +#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT  7 +#define IXGBE_AUTOC_10G_XAUI   (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_KX4    (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_CX4    (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_BX      (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX      (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_SFI     (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX_BX   (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) + +#define IXGBE_AUTOC2_UPPER_MASK  0xFFFF0000 +#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK  0x00030000 +#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16 +#define IXGBE_AUTOC2_10G_KR  (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) + +#define IXGBE_MACC_FLU       0x00000001 +#define IXGBE_MACC_FSV_10G   0x00030000 +#define IXGBE_MACC_FS        0x00040000 +#define IXGBE_MAC_RX2TX_LPBK 0x00000002 + +/* LINKS Bit Masks */ +#define IXGBE_LINKS_KX_AN_COMP  0x80000000 +#define IXGBE_LINKS_UP          0x40000000 +#define IXGBE_LINKS_SPEED       0x20000000 +#define IXGBE_LINKS_MODE        0x18000000 +#define IXGBE_LINKS_RX_MODE     0x06000000 +#define IXGBE_LINKS_TX_MODE     0x01800000 +#define IXGBE_LINKS_XGXS_EN     0x00400000 +#define IXGBE_LINKS_SGMII_EN    0x02000000 +#define IXGBE_LINKS_PCS_1G_EN   0x00200000 +#define IXGBE_LINKS_1G_AN_EN    0x00100000 +#define IXGBE_LINKS_KX_AN_IDLE  0x00080000 +#define IXGBE_LINKS_1G_SYNC     0x00040000 +#define IXGBE_LINKS_10G_ALIGN   0x00020000 +#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000 +#define IXGBE_LINKS_TL_FAULT    0x00001000 +#define IXGBE_LINKS_SIGNAL      0x00000F00 + +#define IXGBE_LINKS_SPEED_82599     0x30000000 +#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 +#define IXGBE_LINKS_SPEED_1G_82599  0x20000000 +#define IXGBE_LINKS_SPEED_100_82599 0x10000000 +#define IXGBE_LINK_UP_TIME      90 /* 9.0 Seconds */ +#define IXGBE_AUTO_NEG_TIME     45 /* 4.5 Seconds */ + +#define IXGBE_LINKS2_AN_SUPPORTED   0x00000040 + +/* PCS1GLSTA Bit Masks */ +#define IXGBE_PCS1GLSTA_LINK_OK         1 +#define IXGBE_PCS1GLSTA_SYNK_OK         0x10 +#define IXGBE_PCS1GLSTA_AN_COMPLETE     0x10000 +#define IXGBE_PCS1GLSTA_AN_PAGE_RX      0x20000 +#define IXGBE_PCS1GLSTA_AN_TIMED_OUT    0x40000 +#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000 +#define IXGBE_PCS1GLSTA_AN_ERROR_RWS    0x100000 + +#define IXGBE_PCS1GANA_SYM_PAUSE        0x80 +#define IXGBE_PCS1GANA_ASM_PAUSE        0x100 + +/* PCS1GLCTL Bit Masks */ +#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN  0x00040000 /* PCS 1G autoneg to en */ +#define IXGBE_PCS1GLCTL_FLV_LINK_UP     1 +#define IXGBE_PCS1GLCTL_FORCE_LINK      0x20 +#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH  0x40 +#define IXGBE_PCS1GLCTL_AN_ENABLE       0x10000 +#define IXGBE_PCS1GLCTL_AN_RESTART      0x20000 + +/* ANLP1 Bit Masks */ +#define IXGBE_ANLP1_PAUSE               0x0C00 +#define IXGBE_ANLP1_SYM_PAUSE           0x0400 +#define IXGBE_ANLP1_ASM_PAUSE           0x0800 +#define IXGBE_ANLP1_AN_STATE_MASK       0x000f0000 + +/* SW Semaphore Register bitmasks */ +#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */ + +/* SW_FW_SYNC/GSSR definitions */ +#define IXGBE_GSSR_EEP_SM     0x0001 +#define IXGBE_GSSR_PHY0_SM    0x0002 +#define IXGBE_GSSR_PHY1_SM    0x0004 +#define IXGBE_GSSR_MAC_CSR_SM 0x0008 +#define IXGBE_GSSR_FLASH_SM   0x0010 +#define IXGBE_GSSR_SW_MNG_SM  0x0400 + +/* FW Status register bitmask */ +#define IXGBE_FWSTS_FWRI    0x00000200 /* Firmware Reset Indication */ + +/* EEC Register */ +#define IXGBE_EEC_SK        0x00000001 /* EEPROM Clock */ +#define IXGBE_EEC_CS        0x00000002 /* EEPROM Chip Select */ +#define IXGBE_EEC_DI        0x00000004 /* EEPROM Data In */ +#define IXGBE_EEC_DO        0x00000008 /* EEPROM Data Out */ +#define IXGBE_EEC_FWE_MASK  0x00000030 /* FLASH Write Enable */ +#define IXGBE_EEC_FWE_DIS   0x00000010 /* Disable FLASH writes */ +#define IXGBE_EEC_FWE_EN    0x00000020 /* Enable FLASH writes */ +#define IXGBE_EEC_FWE_SHIFT 4 +#define IXGBE_EEC_REQ       0x00000040 /* EEPROM Access Request */ +#define IXGBE_EEC_GNT       0x00000080 /* EEPROM Access Grant */ +#define IXGBE_EEC_PRES      0x00000100 /* EEPROM Present */ +#define IXGBE_EEC_ARD       0x00000200 /* EEPROM Auto Read Done */ +#define IXGBE_EEC_FLUP      0x00800000 /* Flash update command */ +#define IXGBE_EEC_SEC1VAL   0x02000000 /* Sector 1 Valid */ +#define IXGBE_EEC_FLUDONE   0x04000000 /* Flash update done */ +/* EEPROM Addressing bits based on type (0-small, 1-large) */ +#define IXGBE_EEC_ADDR_SIZE 0x00000400 +#define IXGBE_EEC_SIZE      0x00007800 /* EEPROM Size */ +#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */ + +#define IXGBE_EEC_SIZE_SHIFT          11 +#define IXGBE_EEPROM_WORD_SIZE_SHIFT  6 +#define IXGBE_EEPROM_OPCODE_BITS      8 + +/* Part Number String Length */ +#define IXGBE_PBANUM_LENGTH 11 + +/* Checksum and EEPROM pointers */ +#define IXGBE_PBANUM_PTR_GUARD  0xFAFA +#define IXGBE_EEPROM_CHECKSUM   0x3F +#define IXGBE_EEPROM_SUM        0xBABA +#define IXGBE_PCIE_ANALOG_PTR   0x03 +#define IXGBE_ATLAS0_CONFIG_PTR 0x04 +#define IXGBE_PHY_PTR           0x04 +#define IXGBE_ATLAS1_CONFIG_PTR 0x05 +#define IXGBE_OPTION_ROM_PTR    0x05 +#define IXGBE_PCIE_GENERAL_PTR  0x06 +#define IXGBE_PCIE_CONFIG0_PTR  0x07 +#define IXGBE_PCIE_CONFIG1_PTR  0x08 +#define IXGBE_CORE0_PTR         0x09 +#define IXGBE_CORE1_PTR         0x0A +#define IXGBE_MAC0_PTR          0x0B +#define IXGBE_MAC1_PTR          0x0C +#define IXGBE_CSR0_CONFIG_PTR   0x0D +#define IXGBE_CSR1_CONFIG_PTR   0x0E +#define IXGBE_FW_PTR            0x0F +#define IXGBE_PBANUM0_PTR       0x15 +#define IXGBE_PBANUM1_PTR       0x16 +#define IXGBE_FREE_SPACE_PTR    0X3E +#define IXGBE_SAN_MAC_ADDR_PTR  0x28 +#define IXGBE_DEVICE_CAPS       0x2C +#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11 +#define IXGBE_PCIE_MSIX_82599_CAPS  0x72 +#define IXGBE_PCIE_MSIX_82598_CAPS  0x62 + +/* MSI-X capability fields masks */ +#define IXGBE_PCIE_MSIX_TBL_SZ_MASK     0x7FF + +/* Legacy EEPROM word offsets */ +#define IXGBE_ISCSI_BOOT_CAPS           0x0033 +#define IXGBE_ISCSI_SETUP_PORT_0        0x0030 +#define IXGBE_ISCSI_SETUP_PORT_1        0x0034 + +/* EEPROM Commands - SPI */ +#define IXGBE_EEPROM_MAX_RETRY_SPI      5000 /* Max wait 5ms for RDY signal */ +#define IXGBE_EEPROM_STATUS_RDY_SPI     0x01 +#define IXGBE_EEPROM_READ_OPCODE_SPI    0x03  /* EEPROM read opcode */ +#define IXGBE_EEPROM_WRITE_OPCODE_SPI   0x02  /* EEPROM write opcode */ +#define IXGBE_EEPROM_A8_OPCODE_SPI      0x08  /* opcode bit-3 = addr bit-8 */ +#define IXGBE_EEPROM_WREN_OPCODE_SPI    0x06  /* EEPROM set Write Ena latch */ +/* EEPROM reset Write Enable latch */ +#define IXGBE_EEPROM_WRDI_OPCODE_SPI    0x04 +#define IXGBE_EEPROM_RDSR_OPCODE_SPI    0x05  /* EEPROM read Status reg */ +#define IXGBE_EEPROM_WRSR_OPCODE_SPI    0x01  /* EEPROM write Status reg */ +#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20  /* EEPROM ERASE 4KB */ +#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI  0xD8  /* EEPROM ERASE 64KB */ +#define IXGBE_EEPROM_ERASE256_OPCODE_SPI  0xDB  /* EEPROM ERASE 256B */ + +/* EEPROM Read Register */ +#define IXGBE_EEPROM_RW_REG_DATA   16 /* data offset in EEPROM read reg */ +#define IXGBE_EEPROM_RW_REG_DONE   2  /* Offset to READ done bit */ +#define IXGBE_EEPROM_RW_REG_START  1  /* First bit to start operation */ +#define IXGBE_EEPROM_RW_ADDR_SHIFT 2  /* Shift to the address bits */ +#define IXGBE_NVM_POLL_WRITE       1  /* Flag for polling for write complete */ +#define IXGBE_NVM_POLL_READ        0  /* Flag for polling for read complete */ + +#define IXGBE_ETH_LENGTH_OF_ADDRESS   6 + +#define IXGBE_EEPROM_PAGE_SIZE_MAX       128 +#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */ +#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */ + +#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS +#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#endif + +#ifndef IXGBE_EERD_EEWR_ATTEMPTS +/* Number of 5 microseconds we wait for EERD read and + * EERW write to complete */ +#define IXGBE_EERD_EEWR_ATTEMPTS 100000 +#endif + +#ifndef IXGBE_FLUDONE_ATTEMPTS +/* # attempts we wait for flush update to complete */ +#define IXGBE_FLUDONE_ATTEMPTS 20000 +#endif + +#define IXGBE_PCIE_CTRL2                 0x5   /* PCIe Control 2 Offset */ +#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE    0x8   /* Dummy Function Enable */ +#define IXGBE_PCIE_CTRL2_LAN_DISABLE     0x2   /* LAN PCI Disable */ +#define IXGBE_PCIE_CTRL2_DISABLE_SELECT  0x1   /* LAN Disable Select */ + +#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET  0x0 +#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET  0x3 +#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP  0x1 +#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS  0x2 +#define IXGBE_FW_LESM_PARAMETERS_PTR     0x2 +#define IXGBE_FW_LESM_STATE_1            0x1 +#define IXGBE_FW_LESM_STATE_ENABLED      0x8000 /* LESM Enable bit */ +#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR   0x4 +#define IXGBE_FW_PATCH_VERSION_4         0x7 +#define IXGBE_FCOE_IBA_CAPS_BLK_PTR         0x33 /* iSCSI/FCOE block */ +#define IXGBE_FCOE_IBA_CAPS_FCOE            0x20 /* FCOE flags */ +#define IXGBE_ISCSI_FCOE_BLK_PTR            0x17 /* iSCSI/FCOE block */ +#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET       0x0  /* FCOE flags */ +#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE       0x1  /* FCOE flags enable bit */ +#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR      0x27 /* Alt. SAN MAC block */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET  0x0 /* Alt. SAN MAC capability */ +#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET  0x7 /* Alt. WWNN prefix offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET  0x8 /* Alt. WWPN prefix offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC  0x0 /* Alt. SAN MAC exists */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN  0x1 /* Alt. WWN base exists */ + +/* PCI Bus Info */ +#define IXGBE_PCI_DEVICE_STATUS   0xAA +#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING   0x0020 +#define IXGBE_PCI_LINK_STATUS     0xB2 +#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 +#define IXGBE_PCI_LINK_WIDTH      0x3F0 +#define IXGBE_PCI_LINK_WIDTH_1    0x10 +#define IXGBE_PCI_LINK_WIDTH_2    0x20 +#define IXGBE_PCI_LINK_WIDTH_4    0x40 +#define IXGBE_PCI_LINK_WIDTH_8    0x80 +#define IXGBE_PCI_LINK_SPEED      0xF +#define IXGBE_PCI_LINK_SPEED_2500 0x1 +#define IXGBE_PCI_LINK_SPEED_5000 0x2 +#define IXGBE_PCI_HEADER_TYPE_REGISTER  0x0E +#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define IXGBE_PCI_DEVICE_CONTROL2_16ms  0x0005 + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 + +/* Check whether address is multicast.  This is little-endian specific check.*/ +#define IXGBE_IS_MULTICAST(Address) \ +                (bool)(((u8 *)(Address))[0] & ((u8)0x01)) + +/* Check whether an address is broadcast. */ +#define IXGBE_IS_BROADCAST(Address)                      \ +                ((((u8 *)(Address))[0] == ((u8)0xff)) && \ +                (((u8 *)(Address))[1] == ((u8)0xff))) + +/* RAH */ +#define IXGBE_RAH_VIND_MASK     0x003C0000 +#define IXGBE_RAH_VIND_SHIFT    18 +#define IXGBE_RAH_AV            0x80000000 +#define IXGBE_CLEAR_VMDQ_ALL    0xFFFFFFFF + +/* Header split receive */ +#define IXGBE_RFCTL_ISCSI_DIS       0x00000001 +#define IXGBE_RFCTL_ISCSI_DWC_MASK  0x0000003E +#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1 +#define IXGBE_RFCTL_NFSW_DIS        0x00000040 +#define IXGBE_RFCTL_NFSR_DIS        0x00000080 +#define IXGBE_RFCTL_NFS_VER_MASK    0x00000300 +#define IXGBE_RFCTL_NFS_VER_SHIFT   8 +#define IXGBE_RFCTL_NFS_VER_2       0 +#define IXGBE_RFCTL_NFS_VER_3       1 +#define IXGBE_RFCTL_NFS_VER_4       2 +#define IXGBE_RFCTL_IPV6_DIS        0x00000400 +#define IXGBE_RFCTL_IPV6_XSUM_DIS   0x00000800 +#define IXGBE_RFCTL_IPFRSP_DIS      0x00004000 +#define IXGBE_RFCTL_IPV6_EX_DIS     0x00010000 +#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Transmit Config masks */ +#define IXGBE_TXDCTL_ENABLE     0x02000000 /* Enable specific Tx Queue */ +#define IXGBE_TXDCTL_SWFLSH     0x04000000 /* Tx Desc. write-back flushing */ +#define IXGBE_TXDCTL_WTHRESH_SHIFT      16 /* shift to WTHRESH bits */ +/* Enable short packet padding to 64 bytes */ +#define IXGBE_TX_PAD_ENABLE     0x00000400 +#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004  /* Allow jumbo frames */ +/* This allows for 16K packets + 4k for vlan */ +#define IXGBE_MAX_FRAME_SZ      0x40040000 + +#define IXGBE_TDWBAL_HEAD_WB_ENABLE   0x1      /* Tx head write-back enable */ +#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2      /* Tx seq# write-back enable */ + +/* Receive Config masks */ +#define IXGBE_RXCTRL_RXEN       0x00000001  /* Enable Receiver */ +#define IXGBE_RXCTRL_DMBYPS     0x00000002  /* Descriptor Monitor Bypass */ +#define IXGBE_RXDCTL_ENABLE     0x02000000  /* Enable specific Rx Queue */ +#define IXGBE_RXDCTL_RLPMLMASK  0x00003FFF  /* Only supported on the X540 */ +#define IXGBE_RXDCTL_RLPML_EN   0x00008000 +#define IXGBE_RXDCTL_VME        0x40000000  /* VLAN mode enable */ + +#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ +#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ +#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ +#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ +#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ +#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ +/* Receive Priority Flow Control Enable */ +#define IXGBE_FCTRL_RPFCE 0x00004000 +#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ +#define IXGBE_MFLCN_PMCF        0x00000001 /* Pass MAC Control Frames */ +#define IXGBE_MFLCN_DPF         0x00000002 /* Discard Pause Frame */ +#define IXGBE_MFLCN_RPFCE       0x00000004 /* Receive Priority FC Enable */ +#define IXGBE_MFLCN_RFCE        0x00000008 /* Receive FC Enable */ + +#define IXGBE_MFLCN_RPFCE_SHIFT		 4 + +/* Multiple Receive Queue Control */ +#define IXGBE_MRQC_RSSEN                 0x00000001  /* RSS Enable */ +#define IXGBE_MRQC_MRQE_MASK                    0xF /* Bits 3:0 */ +#define IXGBE_MRQC_RT8TCEN               0x00000002 /* 8 TC no RSS */ +#define IXGBE_MRQC_RT4TCEN               0x00000003 /* 4 TC no RSS */ +#define IXGBE_MRQC_RTRSS8TCEN            0x00000004 /* 8 TC w/ RSS */ +#define IXGBE_MRQC_RTRSS4TCEN            0x00000005 /* 4 TC w/ RSS */ +#define IXGBE_MRQC_VMDQEN                0x00000008 /* VMDq2 64 pools no RSS */ +#define IXGBE_MRQC_VMDQRSS32EN           0x0000000A /* VMDq2 32 pools w/ RSS */ +#define IXGBE_MRQC_VMDQRSS64EN           0x0000000B /* VMDq2 64 pools w/ RSS */ +#define IXGBE_MRQC_VMDQRT8TCEN           0x0000000C /* VMDq2/RT 16 pool 8 TC */ +#define IXGBE_MRQC_VMDQRT4TCEN           0x0000000D /* VMDq2/RT 32 pool 4 TC */ +#define IXGBE_MRQC_RSS_FIELD_MASK        0xFFFF0000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP    0x00010000 +#define IXGBE_MRQC_RSS_FIELD_IPV4        0x00020000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX     0x00080000 +#define IXGBE_MRQC_RSS_FIELD_IPV6        0x00100000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP    0x00200000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP    0x00400000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP    0x00800000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 +#define IXGBE_MRQC_L3L4TXSWEN            0x00008000 + +/* Queue Drop Enable */ +#define IXGBE_QDE_ENABLE     0x00000001 +#define IXGBE_QDE_IDX_MASK   0x00007F00 +#define IXGBE_QDE_IDX_SHIFT           8 + +#define IXGBE_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */ +#define IXGBE_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */ +#define IXGBE_TXD_CMD_EOP    0x01000000 /* End of Packet */ +#define IXGBE_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IXGBE_TXD_CMD_IC     0x04000000 /* Insert Checksum */ +#define IXGBE_TXD_CMD_RS     0x08000000 /* Report Status */ +#define IXGBE_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */ +#define IXGBE_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */ +#define IXGBE_TXD_STAT_DD    0x00000001 /* Descriptor Done */ + +#define IXGBE_RXDADV_IPSEC_STATUS_SECP                  0x00020000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL       0x08000000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH         0x10000000 +#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED            0x18000000 +#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK               0x18000000 +/* Multiple Transmit Queue Command Register */ +#define IXGBE_MTQC_RT_ENA       0x1 /* DCB Enable */ +#define IXGBE_MTQC_VT_ENA       0x2 /* VMDQ2 Enable */ +#define IXGBE_MTQC_64Q_1PB      0x0 /* 64 queues 1 pack buffer */ +#define IXGBE_MTQC_32VF         0x8 /* 4 TX Queues per pool w/32VF's */ +#define IXGBE_MTQC_64VF         0x4 /* 2 TX Queues per pool w/64VF's */ +#define IXGBE_MTQC_8TC_8TQ      0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */ +#define IXGBE_MTQC_4TC_4TQ	0x8 /* 4 TC if RT_ENA or 4 TQ if VT_ENA */ + +/* Receive Descriptor bit definitions */ +#define IXGBE_RXD_STAT_DD       0x01    /* Descriptor Done */ +#define IXGBE_RXD_STAT_EOP      0x02    /* End of Packet */ +#define IXGBE_RXD_STAT_FLM      0x04    /* FDir Match */ +#define IXGBE_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */ +#define IXGBE_RXDADV_NEXTP_MASK   0x000FFFF0 /* Next Descriptor Index */ +#define IXGBE_RXDADV_NEXTP_SHIFT  0x00000004 +#define IXGBE_RXD_STAT_UDPCS    0x10    /* UDP xsum calculated */ +#define IXGBE_RXD_STAT_L4CS     0x20    /* L4 xsum calculated */ +#define IXGBE_RXD_STAT_IPCS     0x40    /* IP xsum calculated */ +#define IXGBE_RXD_STAT_PIF      0x80    /* passed in-exact filter */ +#define IXGBE_RXD_STAT_CRCV     0x100   /* Speculative CRC Valid */ +#define IXGBE_RXD_STAT_VEXT     0x200   /* 1st VLAN found */ +#define IXGBE_RXD_STAT_UDPV     0x400   /* Valid UDP checksum */ +#define IXGBE_RXD_STAT_DYNINT   0x800   /* Pkt caused INT via DYNINT */ +#define IXGBE_RXD_STAT_LLINT    0x800   /* Pkt caused Low Latency Interrupt */ +#define IXGBE_RXD_STAT_TS       0x10000 /* Time Stamp */ +#define IXGBE_RXD_STAT_SECP     0x20000 /* Security Processing */ +#define IXGBE_RXD_STAT_LB       0x40000 /* Loopback Status */ +#define IXGBE_RXD_STAT_ACK      0x8000  /* ACK Packet indication */ +#define IXGBE_RXD_ERR_CE        0x01    /* CRC Error */ +#define IXGBE_RXD_ERR_LE        0x02    /* Length Error */ +#define IXGBE_RXD_ERR_PE        0x08    /* Packet Error */ +#define IXGBE_RXD_ERR_OSE       0x10    /* Oversize Error */ +#define IXGBE_RXD_ERR_USE       0x20    /* Undersize Error */ +#define IXGBE_RXD_ERR_TCPE      0x40    /* TCP/UDP Checksum Error */ +#define IXGBE_RXD_ERR_IPE       0x80    /* IP Checksum Error */ +#define IXGBE_RXDADV_ERR_MASK           0xfff00000 /* RDESC.ERRORS mask */ +#define IXGBE_RXDADV_ERR_SHIFT          20         /* RDESC.ERRORS shift */ +#define IXGBE_RXDADV_ERR_FCEOFE         0x80000000 /* FCoEFe/IPE */ +#define IXGBE_RXDADV_ERR_FCERR          0x00700000 /* FCERR/FDIRERR */ +#define IXGBE_RXDADV_ERR_FDIR_LEN       0x00100000 /* FDIR Length error */ +#define IXGBE_RXDADV_ERR_FDIR_DROP      0x00200000 /* FDIR Drop error */ +#define IXGBE_RXDADV_ERR_FDIR_COLL      0x00400000 /* FDIR Collision error */ +#define IXGBE_RXDADV_ERR_HBO    0x00800000 /*Header Buffer Overflow */ +#define IXGBE_RXDADV_ERR_CE     0x01000000 /* CRC Error */ +#define IXGBE_RXDADV_ERR_LE     0x02000000 /* Length Error */ +#define IXGBE_RXDADV_ERR_PE     0x08000000 /* Packet Error */ +#define IXGBE_RXDADV_ERR_OSE    0x10000000 /* Oversize Error */ +#define IXGBE_RXDADV_ERR_USE    0x20000000 /* Undersize Error */ +#define IXGBE_RXDADV_ERR_TCPE   0x40000000 /* TCP/UDP Checksum Error */ +#define IXGBE_RXDADV_ERR_IPE    0x80000000 /* IP Checksum Error */ +#define IXGBE_RXD_VLAN_ID_MASK  0x0FFF  /* VLAN ID is in lower 12 bits */ +#define IXGBE_RXD_PRI_MASK      0xE000  /* Priority is in upper 3 bits */ +#define IXGBE_RXD_PRI_SHIFT     13 +#define IXGBE_RXD_CFI_MASK      0x1000  /* CFI is bit 12 */ +#define IXGBE_RXD_CFI_SHIFT     12 + +#define IXGBE_RXDADV_STAT_DD            IXGBE_RXD_STAT_DD  /* Done */ +#define IXGBE_RXDADV_STAT_EOP           IXGBE_RXD_STAT_EOP /* End of Packet */ +#define IXGBE_RXDADV_STAT_FLM           IXGBE_RXD_STAT_FLM /* FDir Match */ +#define IXGBE_RXDADV_STAT_VP            IXGBE_RXD_STAT_VP  /* IEEE VLAN Pkt */ +#define IXGBE_RXDADV_STAT_MASK          0x000fffff /* Stat/NEXTP: bit 0-19 */ +#define IXGBE_RXDADV_STAT_FCEOFS        0x00000040 /* FCoE EOF/SOF Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT        0x00000030 /* FCoE Pkt Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ +#define IXGBE_RXDADV_STAT_FCSTAT_NODDP  0x00000010 /* 01: Ctxt w/o DDP */ +#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ +#define IXGBE_RXDADV_STAT_FCSTAT_DDP    0x00000030 /* 11: Ctxt w/ DDP */ + +/* PSRTYPE bit definitions */ +#define IXGBE_PSRTYPE_TCPHDR    0x00000010 +#define IXGBE_PSRTYPE_UDPHDR    0x00000020 +#define IXGBE_PSRTYPE_IPV4HDR   0x00000100 +#define IXGBE_PSRTYPE_IPV6HDR   0x00000200 +#define IXGBE_PSRTYPE_L2HDR     0x00001000 + +/* SRRCTL bit definitions */ +#define IXGBE_SRRCTL_BSIZEPKT_SHIFT     10     /* so many KBs */ +#define IXGBE_SRRCTL_RDMTS_SHIFT        22 +#define IXGBE_SRRCTL_RDMTS_MASK         0x01C00000 +#define IXGBE_SRRCTL_DROP_EN            0x10000000 +#define IXGBE_SRRCTL_BSIZEPKT_MASK      0x0000007F +#define IXGBE_SRRCTL_BSIZEHDR_MASK      0x00003F00 +#define IXGBE_SRRCTL_DESCTYPE_LEGACY    0x00000000 +#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT  0x04000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define IXGBE_SRRCTL_DESCTYPE_MASK      0x0E000000 + +#define IXGBE_RXDPS_HDRSTAT_HDRSP       0x00008000 +#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +#define IXGBE_RXDADV_RSSTYPE_MASK       0x0000000F +#define IXGBE_RXDADV_PKTTYPE_MASK       0x0000FFF0 +#define IXGBE_RXDADV_PKTTYPE_MASK_EX    0x0001FFF0 +#define IXGBE_RXDADV_HDRBUFLEN_MASK     0x00007FE0 +#define IXGBE_RXDADV_RSCCNT_MASK        0x001E0000 +#define IXGBE_RXDADV_RSCCNT_SHIFT       17 +#define IXGBE_RXDADV_HDRBUFLEN_SHIFT    5 +#define IXGBE_RXDADV_SPLITHEADER_EN     0x00001000 +#define IXGBE_RXDADV_SPH                0x8000 + +/* RSS Hash results */ +#define IXGBE_RXDADV_RSSTYPE_NONE       0x00000000 +#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP   0x00000001 +#define IXGBE_RXDADV_RSSTYPE_IPV4       0x00000002 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP   0x00000003 +#define IXGBE_RXDADV_RSSTYPE_IPV6_EX    0x00000004 +#define IXGBE_RXDADV_RSSTYPE_IPV6       0x00000005 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP   0x00000007 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP   0x00000008 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + +/* RSS Packet Types as indicated in the receive descriptor. */ +#define IXGBE_RXDADV_PKTTYPE_NONE       0x00000000 +#define IXGBE_RXDADV_PKTTYPE_IPV4       0x00000010 /* IPv4 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV4_EX    0x00000020 /* IPv4 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_IPV6       0x00000040 /* IPv6 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV6_EX    0x00000080 /* IPv6 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_TCP        0x00000100 /* TCP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_UDP        0x00000200 /* UDP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_SCTP       0x00000400 /* SCTP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_NFS        0x00000800 /* NFS hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP  0x00001000 /* IPSec ESP */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH   0x00002000 /* IPSec AH */ +#define IXGBE_RXDADV_PKTTYPE_LINKSEC    0x00004000 /* LinkSec Encap */ +#define IXGBE_RXDADV_PKTTYPE_ETQF       0x00008000 /* PKTTYPE is ETQF index */ +#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK  0x00000070 /* ETQF has 8 indices */ +#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4          /* Right-shift 4 bits */ + +/* Security Processing bit Indication */ +#define IXGBE_RXDADV_LNKSEC_STATUS_SECP         0x00020000 +#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH   0x08000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR  0x10000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK      0x18000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG       0x18000000 + +/* Masks to determine if packets should be dropped due to frame errors */ +#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ +                                      IXGBE_RXD_ERR_CE | \ +                                      IXGBE_RXD_ERR_LE | \ +                                      IXGBE_RXD_ERR_PE | \ +                                      IXGBE_RXD_ERR_OSE | \ +                                      IXGBE_RXD_ERR_USE) + +#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ +                                      IXGBE_RXDADV_ERR_CE | \ +                                      IXGBE_RXDADV_ERR_LE | \ +                                      IXGBE_RXDADV_ERR_PE | \ +                                      IXGBE_RXDADV_ERR_OSE | \ +                                      IXGBE_RXDADV_ERR_USE) + +/* Multicast bit mask */ +#define IXGBE_MCSTCTRL_MFE      0x4 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE  8 +#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE  8 +#define IXGBE_REQ_TX_BUFFER_GRANULARITY   1024 + +/* Vlan-specific macros */ +#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK  0x0FFF /* VLAN ID in lower 12 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_MASK   0xE000 /* Priority in upper 3 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT  0x000D /* Priority in upper 3 of 16 */ +#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT  IXGBE_RX_DESC_SPECIAL_PRI_SHIFT + +/* SR-IOV specific macros */ +#define IXGBE_MBVFICR_INDEX(vf_number)   (vf_number >> 4) +#define IXGBE_MBVFICR(_i)                (0x00710 + (_i * 4)) +#define IXGBE_VFLRE(_i)                  (((_i & 1) ? 0x001C0 : 0x00600)) +#define IXGBE_VFLREC(_i)                 (0x00700 + (_i * 4)) + +enum ixgbe_fdir_pballoc_type { +	IXGBE_FDIR_PBALLOC_NONE = 0, +	IXGBE_FDIR_PBALLOC_64K  = 1, +	IXGBE_FDIR_PBALLOC_128K = 2, +	IXGBE_FDIR_PBALLOC_256K = 3, +}; +#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT           16 + +/* Flow Director register values */ +#define IXGBE_FDIRCTRL_PBALLOC_64K              0x00000001 +#define IXGBE_FDIRCTRL_PBALLOC_128K             0x00000002 +#define IXGBE_FDIRCTRL_PBALLOC_256K             0x00000003 +#define IXGBE_FDIRCTRL_INIT_DONE                0x00000008 +#define IXGBE_FDIRCTRL_PERFECT_MATCH            0x00000010 +#define IXGBE_FDIRCTRL_REPORT_STATUS            0x00000020 +#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS     0x00000080 +#define IXGBE_FDIRCTRL_DROP_Q_SHIFT             8 +#define IXGBE_FDIRCTRL_FLEX_SHIFT               16 +#define IXGBE_FDIRCTRL_SEARCHLIM                0x00800000 +#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT         24 +#define IXGBE_FDIRCTRL_FULL_THRESH_MASK         0xF0000000 +#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT        28 + +#define IXGBE_FDIRTCPM_DPORTM_SHIFT             16 +#define IXGBE_FDIRUDPM_DPORTM_SHIFT             16 +#define IXGBE_FDIRIP6M_DIPM_SHIFT               16 +#define IXGBE_FDIRM_VLANID                      0x00000001 +#define IXGBE_FDIRM_VLANP                       0x00000002 +#define IXGBE_FDIRM_POOL                        0x00000004 +#define IXGBE_FDIRM_L4P                         0x00000008 +#define IXGBE_FDIRM_FLEX                        0x00000010 +#define IXGBE_FDIRM_DIPv6                       0x00000020 + +#define IXGBE_FDIRFREE_FREE_MASK                0xFFFF +#define IXGBE_FDIRFREE_FREE_SHIFT               0 +#define IXGBE_FDIRFREE_COLL_MASK                0x7FFF0000 +#define IXGBE_FDIRFREE_COLL_SHIFT               16 +#define IXGBE_FDIRLEN_MAXLEN_MASK               0x3F +#define IXGBE_FDIRLEN_MAXLEN_SHIFT              0 +#define IXGBE_FDIRLEN_MAXHASH_MASK              0x7FFF0000 +#define IXGBE_FDIRLEN_MAXHASH_SHIFT             16 +#define IXGBE_FDIRUSTAT_ADD_MASK                0xFFFF +#define IXGBE_FDIRUSTAT_ADD_SHIFT               0 +#define IXGBE_FDIRUSTAT_REMOVE_MASK             0xFFFF0000 +#define IXGBE_FDIRUSTAT_REMOVE_SHIFT            16 +#define IXGBE_FDIRFSTAT_FADD_MASK               0x00FF +#define IXGBE_FDIRFSTAT_FADD_SHIFT              0 +#define IXGBE_FDIRFSTAT_FREMOVE_MASK            0xFF00 +#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT           8 +#define IXGBE_FDIRPORT_DESTINATION_SHIFT        16 +#define IXGBE_FDIRVLAN_FLEX_SHIFT               16 +#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT       15 +#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT       16 + +#define IXGBE_FDIRCMD_CMD_MASK                  0x00000003 +#define IXGBE_FDIRCMD_CMD_ADD_FLOW              0x00000001 +#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW           0x00000002 +#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT        0x00000003 +#define IXGBE_FDIRCMD_FILTER_VALID              0x00000004 +#define IXGBE_FDIRCMD_FILTER_UPDATE             0x00000008 +#define IXGBE_FDIRCMD_IPv6DMATCH                0x00000010 +#define IXGBE_FDIRCMD_L4TYPE_UDP                0x00000020 +#define IXGBE_FDIRCMD_L4TYPE_TCP                0x00000040 +#define IXGBE_FDIRCMD_L4TYPE_SCTP               0x00000060 +#define IXGBE_FDIRCMD_IPV6                      0x00000080 +#define IXGBE_FDIRCMD_CLEARHT                   0x00000100 +#define IXGBE_FDIRCMD_DROP                      0x00000200 +#define IXGBE_FDIRCMD_INT                       0x00000400 +#define IXGBE_FDIRCMD_LAST                      0x00000800 +#define IXGBE_FDIRCMD_COLLISION                 0x00001000 +#define IXGBE_FDIRCMD_QUEUE_EN                  0x00008000 +#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT           5 +#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT            16 +#define IXGBE_FDIRCMD_VT_POOL_SHIFT             24 +#define IXGBE_FDIR_INIT_DONE_POLL               10 +#define IXGBE_FDIRCMD_CMD_POLL                  10 + +#define IXGBE_FDIR_DROP_QUEUE                   127 + +/* Manageablility Host Interface defines */ +#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH       1792 /* Num of bytes in range */ +#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH      448 /* Num of dwords in range */ +#define IXGBE_HI_COMMAND_TIMEOUT             500 /* Process HI command limit */ + +/* CEM Support */ +#define FW_CEM_HDR_LEN                0x4 +#define FW_CEM_CMD_DRIVER_INFO        0xDD +#define FW_CEM_CMD_DRIVER_INFO_LEN    0x5 +#define FW_CEM_CMD_RESERVED           0x0 +#define FW_CEM_UNUSED_VER             0x0 +#define FW_CEM_MAX_RETRIES            3 +#define FW_CEM_RESP_STATUS_SUCCESS    0x1 + +/* Host Interface Command Structures */ +struct ixgbe_hic_hdr { +	u8 cmd; +	u8 buf_len; +	union { +		u8 cmd_resv; +		u8 ret_status; +	} cmd_or_resp; +	u8 checksum; +}; + +struct ixgbe_hic_drv_info { +	struct ixgbe_hic_hdr hdr; +	u8 port_num; +	u8 ver_sub; +	u8 ver_build; +	u8 ver_min; +	u8 ver_maj; +	u8 pad; /* end spacing to ensure length is mult. of dword */ +	u16 pad2; /* end spacing to ensure length is mult. of dword2 */ +}; + +/* Transmit Descriptor - Advanced */ +union ixgbe_adv_tx_desc { +	struct { +		__le64 buffer_addr;      /* Address of descriptor's data buf */ +		__le32 cmd_type_len; +		__le32 olinfo_status; +	} read; +	struct { +		__le64 rsvd;       /* Reserved */ +		__le32 nxtseq_seed; +		__le32 status; +	} wb; +}; + +/* Receive Descriptor - Advanced */ +union ixgbe_adv_rx_desc { +	struct { +		__le64 pkt_addr; /* Packet buffer address */ +		__le64 hdr_addr; /* Header buffer address */ +	} read; +	struct { +		struct { +			union { +				__le32 data; +				struct { +					__le16 pkt_info; /* RSS, Pkt type */ +					__le16 hdr_info; /* Splithdr, hdrlen */ +				} hs_rss; +			} lo_dword; +			union { +				__le32 rss; /* RSS Hash */ +				struct { +					__le16 ip_id; /* IP id */ +					__le16 csum; /* Packet Checksum */ +				} csum_ip; +			} hi_dword; +		} lower; +		struct { +			__le32 status_error; /* ext status/error */ +			__le16 length; /* Packet length */ +			__le16 vlan; /* VLAN tag */ +		} upper; +	} wb;  /* writeback */ +}; + +/* Context descriptors */ +struct ixgbe_adv_tx_context_desc { +	__le32 vlan_macip_lens; +	__le32 seqnum_seed; +	__le32 type_tucmd_mlhl; +	__le32 mss_l4len_idx; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IXGBE_ADVTXD_DTALEN_MASK      0x0000FFFF /* Data buf length(bytes) */ +#define IXGBE_ADVTXD_MAC_LINKSEC      0x00040000 /* Insert LinkSec */ +#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK   0x000003FF /* IPSec SA index */ +#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK    0x000001FF /* IPSec ESP length */ +#define IXGBE_ADVTXD_DTYP_MASK  0x00F00000 /* DTYP mask */ +#define IXGBE_ADVTXD_DTYP_CTXT  0x00200000 /* Advanced Context Desc */ +#define IXGBE_ADVTXD_DTYP_DATA  0x00300000 /* Advanced Data Descriptor */ +#define IXGBE_ADVTXD_DCMD_EOP   IXGBE_TXD_CMD_EOP  /* End of Packet */ +#define IXGBE_ADVTXD_DCMD_IFCS  IXGBE_TXD_CMD_IFCS /* Insert FCS */ +#define IXGBE_ADVTXD_DCMD_RS    IXGBE_TXD_CMD_RS   /* Report Status */ +#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000    /* DDP hdr type or iSCSI */ +#define IXGBE_ADVTXD_DCMD_DEXT  IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ +#define IXGBE_ADVTXD_DCMD_VLE   IXGBE_TXD_CMD_VLE  /* VLAN pkt enable */ +#define IXGBE_ADVTXD_DCMD_TSE   0x80000000 /* TCP Seg enable */ +#define IXGBE_ADVTXD_STAT_DD    IXGBE_TXD_STAT_DD  /* Descriptor Done */ +#define IXGBE_ADVTXD_STAT_SN_CRC      0x00000002 /* NXTSEQ/SEED pres in WB */ +#define IXGBE_ADVTXD_STAT_RSV   0x0000000C /* STA Reserved */ +#define IXGBE_ADVTXD_IDX_SHIFT  4 /* Adv desc Index shift */ +#define IXGBE_ADVTXD_CC         0x00000080 /* Check Context */ +#define IXGBE_ADVTXD_POPTS_SHIFT      8  /* Adv desc POPTS shift */ +#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ +                                 IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ +                                 IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_ISCO_1ST  0x00000000 /* 1st TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_MDL  0x00000800 /* Middle TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_RSV       0x00002000 /* POPTS Reserved */ +#define IXGBE_ADVTXD_PAYLEN_SHIFT    14 /* Adv desc PAYLEN shift */ +#define IXGBE_ADVTXD_MACLEN_SHIFT    9  /* Adv ctxt desc mac len shift */ +#define IXGBE_ADVTXD_VLAN_SHIFT      16  /* Adv ctxt vlan tag shift */ +#define IXGBE_ADVTXD_TUCMD_IPV4      0x00000400  /* IP Packet Type: 1=IPv4 */ +#define IXGBE_ADVTXD_TUCMD_IPV6      0x00000000  /* IP Packet Type: 0=IPv6 */ +#define IXGBE_ADVTXD_TUCMD_L4T_UDP   0x00000000  /* L4 Packet TYPE of UDP */ +#define IXGBE_ADVTXD_TUCMD_L4T_TCP   0x00000800  /* L4 Packet TYPE of TCP */ +#define IXGBE_ADVTXD_TUCMD_L4T_SCTP  0x00001000  /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_TUCMD_MKRREQ    0x00002000 /*Req requires Markers and CRC*/ +#define IXGBE_ADVTXD_POPTS_IPSEC      0x00000400 /* IPSec offload request */ +#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ +#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */ +#define IXGBE_ADVTXT_TUCMD_FCOE      0x00008000       /* FCoE Frame Type */ +#define IXGBE_ADVTXD_FCOEF_EOF_MASK  (0x3 << 10)      /* FC EOF index */ +#define IXGBE_ADVTXD_FCOEF_SOF       ((1 << 2) << 10) /* FC SOF index */ +#define IXGBE_ADVTXD_FCOEF_PARINC    ((1 << 3) << 10) /* Rel_Off in F_CTL */ +#define IXGBE_ADVTXD_FCOEF_ORIE      ((1 << 4) << 10) /* Orientation: End */ +#define IXGBE_ADVTXD_FCOEF_ORIS      ((1 << 5) << 10) /* Orientation: Start */ +#define IXGBE_ADVTXD_FCOEF_EOF_N     (0x0 << 10)      /* 00: EOFn */ +#define IXGBE_ADVTXD_FCOEF_EOF_T     (0x1 << 10)      /* 01: EOFt */ +#define IXGBE_ADVTXD_FCOEF_EOF_NI    (0x2 << 10)      /* 10: EOFni */ +#define IXGBE_ADVTXD_FCOEF_EOF_A     (0x3 << 10)      /* 11: EOFa */ +#define IXGBE_ADVTXD_L4LEN_SHIFT     8  /* Adv ctxt L4LEN shift */ +#define IXGBE_ADVTXD_MSS_SHIFT       16  /* Adv ctxt MSS shift */ + +/* Autonegotiation advertised speeds */ +typedef u32 ixgbe_autoneg_advertised; +/* Link speed */ +typedef u32 ixgbe_link_speed; +#define IXGBE_LINK_SPEED_UNKNOWN   0 +#define IXGBE_LINK_SPEED_100_FULL  0x0008 +#define IXGBE_LINK_SPEED_1GB_FULL  0x0020 +#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 +#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ +                                        IXGBE_LINK_SPEED_10GB_FULL) +#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ +                                        IXGBE_LINK_SPEED_1GB_FULL | \ +                                        IXGBE_LINK_SPEED_10GB_FULL) + + +/* Physical layer type */ +typedef u32 ixgbe_physical_layer; +#define IXGBE_PHYSICAL_LAYER_UNKNOWN      0 +#define IXGBE_PHYSICAL_LAYER_10GBASE_T    0x0001 +#define IXGBE_PHYSICAL_LAYER_1000BASE_T   0x0002 +#define IXGBE_PHYSICAL_LAYER_100BASE_TX   0x0004 +#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU  0x0008 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LR   0x0010 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM  0x0020 +#define IXGBE_PHYSICAL_LAYER_10GBASE_SR   0x0040 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4  0x0080 +#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4  0x0100 +#define IXGBE_PHYSICAL_LAYER_1000BASE_KX  0x0200 +#define IXGBE_PHYSICAL_LAYER_1000BASE_BX  0x0400 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KR   0x0800 +#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000 +#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000 + +/* Flow Control Macros */ +#define PAUSE_RTT	8 +#define PAUSE_MTU(MTU)	((MTU + 1024 - 1) / 1024) + +#define FC_HIGH_WATER(MTU) ((((PAUSE_RTT + PAUSE_MTU(MTU)) * 144) + 99) / 100 +\ +				PAUSE_MTU(MTU)) +#define FC_LOW_WATER(MTU)  (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT)) + +/* Software ATR hash keys */ +#define IXGBE_ATR_BUCKET_HASH_KEY    0x3DAD14E2 +#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 + +/* Software ATR input stream values and masks */ +#define IXGBE_ATR_HASH_MASK     0x7fff +#define IXGBE_ATR_L4TYPE_MASK      0x3 +#define IXGBE_ATR_L4TYPE_UDP       0x1 +#define IXGBE_ATR_L4TYPE_TCP       0x2 +#define IXGBE_ATR_L4TYPE_SCTP      0x3 +#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 +enum ixgbe_atr_flow_type { +	IXGBE_ATR_FLOW_TYPE_IPV4   = 0x0, +	IXGBE_ATR_FLOW_TYPE_UDPV4  = 0x1, +	IXGBE_ATR_FLOW_TYPE_TCPV4  = 0x2, +	IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, +	IXGBE_ATR_FLOW_TYPE_IPV6   = 0x4, +	IXGBE_ATR_FLOW_TYPE_UDPV6  = 0x5, +	IXGBE_ATR_FLOW_TYPE_TCPV6  = 0x6, +	IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, +}; + +/* Flow Director ATR input struct. */ +union ixgbe_atr_input { +	/* +	 * Byte layout in order, all values with MSB first: +	 * +	 * vm_pool    - 1 byte +	 * flow_type  - 1 byte +	 * vlan_id    - 2 bytes +	 * src_ip     - 16 bytes +	 * dst_ip     - 16 bytes +	 * src_port   - 2 bytes +	 * dst_port   - 2 bytes +	 * flex_bytes - 2 bytes +	 * bkt_hash   - 2 bytes +	 */ +	struct { +		u8     vm_pool; +		u8     flow_type; +		__be16 vlan_id; +		__be32 dst_ip[4]; +		__be32 src_ip[4]; +		__be16 src_port; +		__be16 dst_port; +		__be16 flex_bytes; +		__be16 bkt_hash; +	} formatted; +	__be32 dword_stream[11]; +}; + +/* Flow Director compressed ATR hash input struct */ +union ixgbe_atr_hash_dword { +	struct { +		u8 vm_pool; +		u8 flow_type; +		__be16 vlan_id; +	} formatted; +	__be32 ip; +	struct { +		__be16 src; +		__be16 dst; +	} port; +	__be16 flex_bytes; +	__be32 dword; +}; + +enum ixgbe_eeprom_type { +	ixgbe_eeprom_uninitialized = 0, +	ixgbe_eeprom_spi, +	ixgbe_flash, +	ixgbe_eeprom_none /* No NVM support */ +}; + +enum ixgbe_mac_type { +	ixgbe_mac_unknown = 0, +	ixgbe_mac_82598EB, +	ixgbe_mac_82599EB, +	ixgbe_mac_X540, +	ixgbe_num_macs +}; + +enum ixgbe_phy_type { +	ixgbe_phy_unknown = 0, +	ixgbe_phy_none, +	ixgbe_phy_tn, +	ixgbe_phy_aq, +	ixgbe_phy_cu_unknown, +	ixgbe_phy_qt, +	ixgbe_phy_xaui, +	ixgbe_phy_nl, +	ixgbe_phy_sfp_passive_tyco, +	ixgbe_phy_sfp_passive_unknown, +	ixgbe_phy_sfp_active_unknown, +	ixgbe_phy_sfp_avago, +	ixgbe_phy_sfp_ftl, +	ixgbe_phy_sfp_ftl_active, +	ixgbe_phy_sfp_unknown, +	ixgbe_phy_sfp_intel, +	ixgbe_phy_sfp_unsupported, +	ixgbe_phy_generic +}; + +/* + * SFP+ module type IDs: + * + * ID   Module Type + * ============= + * 0    SFP_DA_CU + * 1    SFP_SR + * 2    SFP_LR + * 3    SFP_DA_CU_CORE0 - 82599-specific + * 4    SFP_DA_CU_CORE1 - 82599-specific + * 5    SFP_SR/LR_CORE0 - 82599-specific + * 6    SFP_SR/LR_CORE1 - 82599-specific + */ +enum ixgbe_sfp_type { +	ixgbe_sfp_type_da_cu = 0, +	ixgbe_sfp_type_sr = 1, +	ixgbe_sfp_type_lr = 2, +	ixgbe_sfp_type_da_cu_core0 = 3, +	ixgbe_sfp_type_da_cu_core1 = 4, +	ixgbe_sfp_type_srlr_core0 = 5, +	ixgbe_sfp_type_srlr_core1 = 6, +	ixgbe_sfp_type_da_act_lmt_core0 = 7, +	ixgbe_sfp_type_da_act_lmt_core1 = 8, +	ixgbe_sfp_type_1g_cu_core0 = 9, +	ixgbe_sfp_type_1g_cu_core1 = 10, +	ixgbe_sfp_type_not_present = 0xFFFE, +	ixgbe_sfp_type_unknown = 0xFFFF +}; + +enum ixgbe_media_type { +	ixgbe_media_type_unknown = 0, +	ixgbe_media_type_fiber, +	ixgbe_media_type_fiber_lco, +	ixgbe_media_type_copper, +	ixgbe_media_type_backplane, +	ixgbe_media_type_cx4, +	ixgbe_media_type_virtual +}; + +/* Flow Control Settings */ +enum ixgbe_fc_mode { +	ixgbe_fc_none = 0, +	ixgbe_fc_rx_pause, +	ixgbe_fc_tx_pause, +	ixgbe_fc_full, +#ifdef CONFIG_DCB +	ixgbe_fc_pfc, +#endif +	ixgbe_fc_default +}; + +/* Smart Speed Settings */ +#define IXGBE_SMARTSPEED_MAX_RETRIES	3 +enum ixgbe_smart_speed { +	ixgbe_smart_speed_auto = 0, +	ixgbe_smart_speed_on, +	ixgbe_smart_speed_off +}; + +/* PCI bus types */ +enum ixgbe_bus_type { +	ixgbe_bus_type_unknown = 0, +	ixgbe_bus_type_pci, +	ixgbe_bus_type_pcix, +	ixgbe_bus_type_pci_express, +	ixgbe_bus_type_reserved +}; + +/* PCI bus speeds */ +enum ixgbe_bus_speed { +	ixgbe_bus_speed_unknown = 0, +	ixgbe_bus_speed_33      = 33, +	ixgbe_bus_speed_66      = 66, +	ixgbe_bus_speed_100     = 100, +	ixgbe_bus_speed_120     = 120, +	ixgbe_bus_speed_133     = 133, +	ixgbe_bus_speed_2500    = 2500, +	ixgbe_bus_speed_5000    = 5000, +	ixgbe_bus_speed_reserved +}; + +/* PCI bus widths */ +enum ixgbe_bus_width { +	ixgbe_bus_width_unknown = 0, +	ixgbe_bus_width_pcie_x1 = 1, +	ixgbe_bus_width_pcie_x2 = 2, +	ixgbe_bus_width_pcie_x4 = 4, +	ixgbe_bus_width_pcie_x8 = 8, +	ixgbe_bus_width_32      = 32, +	ixgbe_bus_width_64      = 64, +	ixgbe_bus_width_reserved +}; + +struct ixgbe_addr_filter_info { +	u32 num_mc_addrs; +	u32 rar_used_count; +	u32 mta_in_use; +	u32 overflow_promisc; +	bool uc_set_promisc; +	bool user_set_promisc; +}; + +/* Bus parameters */ +struct ixgbe_bus_info { +	enum ixgbe_bus_speed speed; +	enum ixgbe_bus_width width; +	enum ixgbe_bus_type type; + +	u16 func; +	u16 lan_id; +}; + +/* Flow control parameters */ +struct ixgbe_fc_info { +	u32 high_water; /* Flow Control High-water */ +	u32 low_water; /* Flow Control Low-water */ +	u16 pause_time; /* Flow Control Pause timer */ +	bool send_xon; /* Flow control send XON */ +	bool strict_ieee; /* Strict IEEE mode */ +	bool disable_fc_autoneg; /* Do not autonegotiate FC */ +	bool fc_was_autonegged; /* Is current_mode the result of autonegging? */ +	enum ixgbe_fc_mode current_mode; /* FC mode in effect */ +	enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +/* Statistics counters collected by the MAC */ +struct ixgbe_hw_stats { +	u64 crcerrs; +	u64 illerrc; +	u64 errbc; +	u64 mspdc; +	u64 mpctotal; +	u64 mpc[8]; +	u64 mlfc; +	u64 mrfc; +	u64 rlec; +	u64 lxontxc; +	u64 lxonrxc; +	u64 lxofftxc; +	u64 lxoffrxc; +	u64 pxontxc[8]; +	u64 pxonrxc[8]; +	u64 pxofftxc[8]; +	u64 pxoffrxc[8]; +	u64 prc64; +	u64 prc127; +	u64 prc255; +	u64 prc511; +	u64 prc1023; +	u64 prc1522; +	u64 gprc; +	u64 bprc; +	u64 mprc; +	u64 gptc; +	u64 gorc; +	u64 gotc; +	u64 rnbc[8]; +	u64 ruc; +	u64 rfc; +	u64 roc; +	u64 rjc; +	u64 mngprc; +	u64 mngpdc; +	u64 mngptc; +	u64 tor; +	u64 tpr; +	u64 tpt; +	u64 ptc64; +	u64 ptc127; +	u64 ptc255; +	u64 ptc511; +	u64 ptc1023; +	u64 ptc1522; +	u64 mptc; +	u64 bptc; +	u64 xec; +	u64 rqsmr[16]; +	u64 tqsmr[8]; +	u64 qprc[16]; +	u64 qptc[16]; +	u64 qbrc[16]; +	u64 qbtc[16]; +	u64 qprdc[16]; +	u64 pxon2offc[8]; +	u64 fdirustat_add; +	u64 fdirustat_remove; +	u64 fdirfstat_fadd; +	u64 fdirfstat_fremove; +	u64 fdirmatch; +	u64 fdirmiss; +	u64 fccrc; +	u64 fcoerpdc; +	u64 fcoeprc; +	u64 fcoeptc; +	u64 fcoedwrc; +	u64 fcoedwtc; +	u64 b2ospc; +	u64 b2ogprc; +	u64 o2bgptc; +	u64 o2bspc; +}; + +/* forward declaration */ +struct ixgbe_hw; + +/* iterator type for walking multicast address lists */ +typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, +                                  u32 *vmdq); + +/* Function pointer table */ +struct ixgbe_eeprom_operations { +	s32 (*init_params)(struct ixgbe_hw *); +	s32 (*read)(struct ixgbe_hw *, u16, u16 *); +	s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *); +	s32 (*write)(struct ixgbe_hw *, u16, u16); +	s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *); +	s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); +	s32 (*update_checksum)(struct ixgbe_hw *); +	u16 (*calc_checksum)(struct ixgbe_hw *); +}; + +struct ixgbe_mac_operations { +	s32 (*init_hw)(struct ixgbe_hw *); +	s32 (*reset_hw)(struct ixgbe_hw *); +	s32 (*start_hw)(struct ixgbe_hw *); +	s32 (*clear_hw_cntrs)(struct ixgbe_hw *); +	enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); +	u32 (*get_supported_physical_layer)(struct ixgbe_hw *); +	s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); +	s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *); +	s32 (*get_device_caps)(struct ixgbe_hw *, u16 *); +	s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *); +	s32 (*stop_adapter)(struct ixgbe_hw *); +	s32 (*get_bus_info)(struct ixgbe_hw *); +	void (*set_lan_id)(struct ixgbe_hw *); +	s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*); +	s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); +	s32 (*setup_sfp)(struct ixgbe_hw *); +	s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); +	s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16); +	void (*release_swfw_sync)(struct ixgbe_hw *, u16); + +	/* Link */ +	void (*disable_tx_laser)(struct ixgbe_hw *); +	void (*enable_tx_laser)(struct ixgbe_hw *); +	void (*flap_tx_laser)(struct ixgbe_hw *); +	s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); +	s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); +	s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, +	                             bool *); + +	/* Packet Buffer Manipulation */ +	void (*set_rxpba)(struct ixgbe_hw *, int, u32, int); + +	/* LED */ +	s32 (*led_on)(struct ixgbe_hw *, u32); +	s32 (*led_off)(struct ixgbe_hw *, u32); +	s32 (*blink_led_start)(struct ixgbe_hw *, u32); +	s32 (*blink_led_stop)(struct ixgbe_hw *, u32); + +	/* RAR, Multicast, VLAN */ +	s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); +	s32 (*clear_rar)(struct ixgbe_hw *, u32); +	s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); +	s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); +	s32 (*init_rx_addrs)(struct ixgbe_hw *); +	s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); +	s32 (*enable_mc)(struct ixgbe_hw *); +	s32 (*disable_mc)(struct ixgbe_hw *); +	s32 (*clear_vfta)(struct ixgbe_hw *); +	s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); +	s32 (*init_uta_tables)(struct ixgbe_hw *); +	void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int); +	void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int); + +	/* Flow Control */ +	s32 (*fc_enable)(struct ixgbe_hw *, s32); + +	/* Manageability interface */ +	s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); +}; + +struct ixgbe_phy_operations { +	s32 (*identify)(struct ixgbe_hw *); +	s32 (*identify_sfp)(struct ixgbe_hw *); +	s32 (*init)(struct ixgbe_hw *); +	s32 (*reset)(struct ixgbe_hw *); +	s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); +	s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16); +	s32 (*setup_link)(struct ixgbe_hw *); +	s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool, +	                        bool); +	s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); +	s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); +	s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); +	s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); +	s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); +	s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); +	s32 (*check_overtemp)(struct ixgbe_hw *); +}; + +struct ixgbe_eeprom_info { +	struct ixgbe_eeprom_operations  ops; +	enum ixgbe_eeprom_type          type; +	u32                             semaphore_delay; +	u16                             word_size; +	u16                             address_bits; +	u16                             word_page_size; +}; + +#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED	0x01 +struct ixgbe_mac_info { +	struct ixgbe_mac_operations     ops; +	enum ixgbe_mac_type             type; +	u8                              addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; +	u8                              perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; +	u8                              san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; +	/* prefix for World Wide Node Name (WWNN) */ +	u16                             wwnn_prefix; +	/* prefix for World Wide Port Name (WWPN) */ +	u16                             wwpn_prefix; +#define IXGBE_MAX_MTA			128 +	u32				mta_shadow[IXGBE_MAX_MTA]; +	s32                             mc_filter_type; +	u32                             mcft_size; +	u32                             vft_size; +	u32                             num_rar_entries; +	u32                             rar_highwater; +	u32				rx_pb_size; +	u32                             max_tx_queues; +	u32                             max_rx_queues; +	u32                             max_msix_vectors; +	u32                             orig_autoc; +	u32                             orig_autoc2; +	bool                            orig_link_settings_stored; +	bool                            autotry_restart; +	u8                              flags; +}; + +struct ixgbe_phy_info { +	struct ixgbe_phy_operations     ops; +	struct mdio_if_info		mdio; +	enum ixgbe_phy_type             type; +	u32                             id; +	enum ixgbe_sfp_type             sfp_type; +	bool                            sfp_setup_needed; +	u32                             revision; +	enum ixgbe_media_type           media_type; +	bool                            reset_disable; +	ixgbe_autoneg_advertised        autoneg_advertised; +	enum ixgbe_smart_speed          smart_speed; +	bool                            smart_speed_active; +	bool                            multispeed_fiber; +	bool                            reset_if_overtemp; +}; + +#include "ixgbe_mbx.h" + +struct ixgbe_mbx_operations { +	s32 (*init_params)(struct ixgbe_hw *hw); +	s32 (*read)(struct ixgbe_hw *, u32 *, u16,  u16); +	s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16); +	s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16,  u16); +	s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16); +	s32 (*check_for_msg)(struct ixgbe_hw *, u16); +	s32 (*check_for_ack)(struct ixgbe_hw *, u16); +	s32 (*check_for_rst)(struct ixgbe_hw *, u16); +}; + +struct ixgbe_mbx_stats { +	u32 msgs_tx; +	u32 msgs_rx; + +	u32 acks; +	u32 reqs; +	u32 rsts; +}; + +struct ixgbe_mbx_info { +	struct ixgbe_mbx_operations ops; +	struct ixgbe_mbx_stats stats; +	u32 timeout; +	u32 usec_delay; +	u32 v2p_mailbox; +	u16 size; +}; + +struct ixgbe_hw { +	u8 __iomem			*hw_addr; +	void				*back; +	struct ixgbe_mac_info		mac; +	struct ixgbe_addr_filter_info	addr_ctrl; +	struct ixgbe_fc_info		fc; +	struct ixgbe_phy_info		phy; +	struct ixgbe_eeprom_info	eeprom; +	struct ixgbe_bus_info		bus; +	struct ixgbe_mbx_info		mbx; +	u16				device_id; +	u16				vendor_id; +	u16				subsystem_device_id; +	u16				subsystem_vendor_id; +	u8				revision_id; +	bool				adapter_stopped; +	bool				force_full_reset; +}; + +struct ixgbe_info { +	enum ixgbe_mac_type		mac; +	s32 				(*get_invariants)(struct ixgbe_hw *); +	struct ixgbe_mac_operations	*mac_ops; +	struct ixgbe_eeprom_operations	*eeprom_ops; +	struct ixgbe_phy_operations	*phy_ops; +	struct ixgbe_mbx_operations	*mbx_ops; +}; + + +/* Error Codes */ +#define IXGBE_ERR_EEPROM                        -1 +#define IXGBE_ERR_EEPROM_CHECKSUM               -2 +#define IXGBE_ERR_PHY                           -3 +#define IXGBE_ERR_CONFIG                        -4 +#define IXGBE_ERR_PARAM                         -5 +#define IXGBE_ERR_MAC_TYPE                      -6 +#define IXGBE_ERR_UNKNOWN_PHY                   -7 +#define IXGBE_ERR_LINK_SETUP                    -8 +#define IXGBE_ERR_ADAPTER_STOPPED               -9 +#define IXGBE_ERR_INVALID_MAC_ADDR              -10 +#define IXGBE_ERR_DEVICE_NOT_SUPPORTED          -11 +#define IXGBE_ERR_MASTER_REQUESTS_PENDING       -12 +#define IXGBE_ERR_INVALID_LINK_SETTINGS         -13 +#define IXGBE_ERR_AUTONEG_NOT_COMPLETE          -14 +#define IXGBE_ERR_RESET_FAILED                  -15 +#define IXGBE_ERR_SWFW_SYNC                     -16 +#define IXGBE_ERR_PHY_ADDR_INVALID              -17 +#define IXGBE_ERR_I2C                           -18 +#define IXGBE_ERR_SFP_NOT_SUPPORTED             -19 +#define IXGBE_ERR_SFP_NOT_PRESENT               -20 +#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT       -21 +#define IXGBE_ERR_NO_SAN_ADDR_PTR               -22 +#define IXGBE_ERR_FDIR_REINIT_FAILED            -23 +#define IXGBE_ERR_EEPROM_VERSION                -24 +#define IXGBE_ERR_NO_SPACE                      -25 +#define IXGBE_ERR_OVERTEMP                      -26 +#define IXGBE_ERR_FC_NOT_NEGOTIATED             -27 +#define IXGBE_ERR_FC_NOT_SUPPORTED              -28 +#define IXGBE_ERR_FLOW_CONTROL                  -29 +#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE        -30 +#define IXGBE_ERR_PBA_SECTION                   -31 +#define IXGBE_ERR_INVALID_ARGUMENT              -32 +#define IXGBE_ERR_HOST_INTERFACE_COMMAND        -33 +#define IXGBE_NOT_IMPLEMENTED                   0x7FFFFFFF + +#endif /* _IXGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c new file mode 100644 index 00000000000..2696c78e9f4 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -0,0 +1,941 @@ +/******************************************************************************* + +  Intel 10 Gigabit PCI Express Linux driver +  Copyright(c) 1999 - 2011 Intel Corporation. + +  This program is free software; you can redistribute it and/or modify it +  under the terms and conditions of the GNU General Public License, +  version 2, as published by the Free Software Foundation. + +  This program is distributed in the hope it will be useful, but WITHOUT +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for +  more details. + +  You should have received a copy of the GNU General Public License along with +  this program; if not, write to the Free Software Foundation, Inc., +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + +  The full GNU General Public License is included in this distribution in +  the file called "COPYING". + +  Contact Information: +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/sched.h> + +#include "ixgbe.h" +#include "ixgbe_phy.h" + +#define IXGBE_X540_MAX_TX_QUEUES 128 +#define IXGBE_X540_MAX_RX_QUEUES 128 +#define IXGBE_X540_RAR_ENTRIES   128 +#define IXGBE_X540_MC_TBL_SIZE   128 +#define IXGBE_X540_VFT_TBL_SIZE  128 +#define IXGBE_X540_RX_PB_SIZE	 384 + +static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); +static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); +static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask); +static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask); +static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); +static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); + +static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) +{ +	return ixgbe_media_type_copper; +} + +static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) +{ +	struct ixgbe_mac_info *mac = &hw->mac; + +	/* Call PHY identify routine to get the phy type */ +	ixgbe_identify_phy_generic(hw); + +	mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; +	mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; +	mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; +	mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES; +	mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES; +	mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + +	return 0; +} + +/** + *  ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires + *  @hw: pointer to hardware structure + *  @speed: new link speed + *  @autoneg: true if autonegotiation enabled + *  @autoneg_wait_to_complete: true when waiting for completion is needed + **/ +static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, +                                     ixgbe_link_speed speed, bool autoneg, +                                     bool autoneg_wait_to_complete) +{ +	return hw->phy.ops.setup_link_speed(hw, speed, autoneg, +	                                    autoneg_wait_to_complete); +} + +/** + *  ixgbe_reset_hw_X540 - Perform hardware reset + *  @hw: pointer to hardware structure + * + *  Resets the hardware by resetting the transmit and receive units, masks + *  and clears all interrupts, perform a PHY reset, and perform a link (MAC) + *  reset. + **/ +static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) +{ +	ixgbe_link_speed link_speed; +	s32 status = 0; +	u32 ctrl; +	u32 ctrl_ext; +	u32 reset_bit; +	u32 i; +	u32 autoc; +	u32 autoc2; +	bool link_up = false; + +	/* Call adapter stop to disable tx/rx and clear interrupts */ +	hw->mac.ops.stop_adapter(hw); + +	/* +	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master +	 * access and verify no pending requests before reset +	 */ +	ixgbe_disable_pcie_master(hw); + +mac_reset_top: +	/* +	 * Issue global reset to the MAC.  Needs to be SW reset if link is up. +	 * If link reset is used when link is up, it might reset the PHY when +	 * mng is using it.  If link is down or the flag to force full link +	 * reset is set, then perform link reset. +	 */ +	if (hw->force_full_reset) { +		reset_bit = IXGBE_CTRL_LNK_RST; +	} else { +		hw->mac.ops.check_link(hw, &link_speed, &link_up, false); +		if (!link_up) +			reset_bit = IXGBE_CTRL_LNK_RST; +		else +			reset_bit = IXGBE_CTRL_RST; +	} + +	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); +	IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit)); +	IXGBE_WRITE_FLUSH(hw); + +	/* Poll for reset bit to self-clear indicating reset is complete */ +	for (i = 0; i < 10; i++) { +		udelay(1); +		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); +		if (!(ctrl & reset_bit)) +			break; +	} +	if (ctrl & reset_bit) { +		status = IXGBE_ERR_RESET_FAILED; +		hw_dbg(hw, "Reset polling failed to complete.\n"); +	} + +	/* +	 * Double resets are required for recovery from certain error +	 * conditions.  Between resets, it is necessary to stall to allow time +	 * for any pending HW events to complete.  We use 1usec since that is +	 * what is needed for ixgbe_disable_pcie_master().  The second reset +	 * then clears out any effects of those events. +	 */ +	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { +		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; +		udelay(1); +		goto mac_reset_top; +	} + +	/* Clear PF Reset Done bit so PF/VF Mail Ops can work */ +	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); +	ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; +	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); +	IXGBE_WRITE_FLUSH(hw); + +	msleep(50); + +	/* Set the Rx packet buffer size. */ +	IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT); + +	/* Store the permanent mac address */ +	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + +	/* +	 * Store the original AUTOC/AUTOC2 values if they have not been +	 * stored off yet.  Otherwise restore the stored original +	 * values since the reset operation sets back to defaults. +	 */ +	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); +	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); +	if (hw->mac.orig_link_settings_stored == false) { +		hw->mac.orig_autoc = autoc; +		hw->mac.orig_autoc2 = autoc2; +		hw->mac.orig_link_settings_stored = true; +	} else { +		if (autoc != hw->mac.orig_autoc) +			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | +			                IXGBE_AUTOC_AN_RESTART)); + +		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != +		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { +			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; +			autoc2 |= (hw->mac.orig_autoc2 & +			           IXGBE_AUTOC2_UPPER_MASK); +			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); +		} +	} + +	/* +	 * Store MAC address from RAR0, clear receive address registers, and +	 * clear the multicast table.  Also reset num_rar_entries to 128, +	 * since we modify this value when programming the SAN MAC address. +	 */ +	hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES; +	hw->mac.ops.init_rx_addrs(hw); + +	/* Store the permanent mac address */ +	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + +	/* Store the permanent SAN mac address */ +	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); + +	/* Add the SAN MAC address to the RAR only if it's a valid address */ +	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { +		hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, +		                    hw->mac.san_addr, 0, IXGBE_RAH_AV); + +		/* Reserve the last RAR for the SAN MAC address */ +		hw->mac.num_rar_entries--; +	} + +	/* Store the alternative WWNN/WWPN prefix */ +	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, +	                           &hw->mac.wwpn_prefix); + +	return status; +} + +/** + *  ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx + *  @hw: pointer to hardware structure + * + *  Starts the hardware using the generic start_hw function + *  and the generation start_hw function. + *  Then performs revision-specific operations, if any. + **/ +static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) +{ +	s32 ret_val = 0; + +	ret_val = ixgbe_start_hw_generic(hw); +	if (ret_val != 0) +		goto out; + +	ret_val = ixgbe_start_hw_gen2(hw); +	hw->mac.rx_pb_size = IXGBE_X540_RX_PB_SIZE; +out: +	return ret_val; +} + +/** + *  ixgbe_get_supported_physical_layer_X540 - Returns physical layer type + *  @hw: pointer to hardware structure + * + *  Determines physical layer capabilities of the current configuration. + **/ +static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw) +{ +	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; +	u16 ext_ability = 0; + +	hw->phy.ops.identify(hw); + +	hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, +			     &ext_ability); +	if (ext_ability & MDIO_PMA_EXTABLE_10GBT) +		physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; +	if (ext_ability & MDIO_PMA_EXTABLE_1000BT) +		physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; +	if (ext_ability & MDIO_PMA_EXTABLE_100BTX) +		physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + +	return physical_layer; +} + +/** + *  ixgbe_init_eeprom_params_X540 - Initialize EEPROM params + *  @hw: pointer to hardware structure + * + *  Initializes the EEPROM parameters ixgbe_eeprom_info within the + *  ixgbe_hw struct in order to set up EEPROM access. + **/ +static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) +{ +	struct ixgbe_eeprom_info *eeprom = &hw->eeprom; +	u32 eec; +	u16 eeprom_size; + +	if (eeprom->type == ixgbe_eeprom_uninitialized) { +		eeprom->semaphore_delay = 10; +		eeprom->type = ixgbe_flash; + +		eec = IXGBE_READ_REG(hw, IXGBE_EEC); +		eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> +		                    IXGBE_EEC_SIZE_SHIFT); +		eeprom->word_size = 1 << (eeprom_size + +		                          IXGBE_EEPROM_WORD_SIZE_SHIFT); + +		hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", +		       eeprom->type, eeprom->word_size); +	} + +	return 0; +} + +/** + *  ixgbe_read_eerd_X540- Read EEPROM word using EERD + *  @hw: pointer to hardware structure + *  @offset: offset of  word in the EEPROM to read + *  @data: word read from the EEPROM + * + *  Reads a 16 bit word from the EEPROM using the EERD register. + **/ +static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ +	s32 status = 0; + +	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == +	    0) +		status = ixgbe_read_eerd_generic(hw, offset, data); +	else +		status = IXGBE_ERR_SWFW_SYNC; + +	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); +	return status; +} + +/** + *  ixgbe_read_eerd_buffer_X540 - Read EEPROM word(s) using EERD + *  @hw: pointer to hardware structure + *  @offset: offset of  word in the EEPROM to read + *  @words: number of words + *  @data: word(s) read from the EEPROM + * + *  Reads a 16 bit word(s) from the EEPROM using the EERD register. + **/ +static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, +				       u16 offset, u16 words, u16 *data) +{ +	s32 status = 0; + +	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == +	    0) +		status = ixgbe_read_eerd_buffer_generic(hw, offset, +							words, data); +	else +		status = IXGBE_ERR_SWFW_SYNC; + +	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); +	return status; +} + +/** + *  ixgbe_write_eewr_X540 - Write EEPROM word using EEWR + *  @hw: pointer to hardware structure + *  @offset: offset of  word in the EEPROM to write + *  @data: word write to the EEPROM + * + *  Write a 16 bit word to the EEPROM using the EEWR register. + **/ +static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) +{ +	s32 status = 0; + +	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) +		status = ixgbe_write_eewr_generic(hw, offset, data); +	else +		status = IXGBE_ERR_SWFW_SYNC; + +	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); +	return status; +} + +/** + *  ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR + *  @hw: pointer to hardware structure + *  @offset: offset of  word in the EEPROM to write + *  @words: number of words + *  @data: word(s) write to the EEPROM + * + *  Write a 16 bit word(s) to the EEPROM using the EEWR register. + **/ +static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, +					u16 offset, u16 words, u16 *data) +{ +	s32 status = 0; + +	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == +	    0) +		status = ixgbe_write_eewr_buffer_generic(hw, offset, +							 words, data); +	else +		status = IXGBE_ERR_SWFW_SYNC; + +	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); +	return status; +} + +/** + *  ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum + * + *  This function does not use synchronization for EERD and EEWR. It can + *  be used internally by function which utilize ixgbe_acquire_swfw_sync_X540. + * + *  @hw: pointer to hardware structure + **/ +static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) +{ +	u16 i; +	u16 j; +	u16 checksum = 0; +	u16 length = 0; +	u16 pointer = 0; +	u16 word = 0; + +	/* +	 * Do not use hw->eeprom.ops.read because we do not want to take +	 * the synchronization semaphores here. Instead use +	 * ixgbe_read_eerd_generic +	 */ + +	/* Include 0x0-0x3F in the checksum */ +	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { +		if (ixgbe_read_eerd_generic(hw, i, &word) != 0) { +			hw_dbg(hw, "EEPROM read failed\n"); +			break; +		} +		checksum += word; +	} + +	/* +	 * Include all data from pointers 0x3, 0x6-0xE.  This excludes the +	 * FW, PHY module, and PCIe Expansion/Option ROM pointers. +	 */ +	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { +		if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) +			continue; + +		if (ixgbe_read_eerd_generic(hw, i, &pointer) != 0) { +			hw_dbg(hw, "EEPROM read failed\n"); +			break; +		} + +		/* Skip pointer section if the pointer is invalid. */ +		if (pointer == 0xFFFF || pointer == 0 || +		    pointer >= hw->eeprom.word_size) +			continue; + +		if (ixgbe_read_eerd_generic(hw, pointer, &length) != 0) { +			hw_dbg(hw, "EEPROM read failed\n"); +			break; +		} + +		/* Skip pointer section if length is invalid. */ +		if (length == 0xFFFF || length == 0 || +		    (pointer + length) >= hw->eeprom.word_size) +			continue; + +		for (j = pointer+1; j <= pointer+length; j++) { +			if (ixgbe_read_eerd_generic(hw, j, &word) != 0) { +				hw_dbg(hw, "EEPROM read failed\n"); +				break; +			} +			checksum += word; +		} +	} + +	checksum = (u16)IXGBE_EEPROM_SUM - checksum; + +	return checksum; +} + +/** + *  ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum + *  @hw: pointer to hardware structure + *  @checksum_val: calculated checksum + * + *  Performs checksum calculation and validates the EEPROM checksum.  If the + *  caller does not need checksum_val, the value can be NULL. + **/ +static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, +					       u16 *checksum_val) +{ +	s32 status; +	u16 checksum; +	u16 read_checksum = 0; + +	/* +	 * Read the first word from the EEPROM. If this times out or fails, do +	 * not continue or we could be in for a very long wait while every +	 * EEPROM read fails +	 */ +	status = hw->eeprom.ops.read(hw, 0, &checksum); + +	if (status != 0) { +		hw_dbg(hw, "EEPROM read failed\n"); +		goto out; +	} + +	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { +		checksum = hw->eeprom.ops.calc_checksum(hw); + +		/* +		 * Do not use hw->eeprom.ops.read because we do not want to take +		 * the synchronization semaphores twice here. +		 */ +		ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM, +					&read_checksum); + +		/* +		 * Verify read checksum from EEPROM is the same as +		 * calculated checksum +		 */ +		if (read_checksum != checksum) +			status = IXGBE_ERR_EEPROM_CHECKSUM; + +		/* If the user cares, return the calculated checksum */ +		if (checksum_val) +			*checksum_val = checksum; +	} else { +		status = IXGBE_ERR_SWFW_SYNC; +	} + +	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); +out: +	return status; +} + +/** + * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) +{ +	s32 status; +	u16 checksum; + +	/* +	 * Read the first word from the EEPROM. If this times out or fails, do +	 * not continue or we could be in for a very long wait while every +	 * EEPROM read fails +	 */ +	status = hw->eeprom.ops.read(hw, 0, &checksum); + +	if (status != 0) +		hw_dbg(hw, "EEPROM read failed\n"); + +	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { +		checksum = hw->eeprom.ops.calc_checksum(hw); + +		/* +		 * Do not use hw->eeprom.ops.write because we do not want to +		 * take the synchronization semaphores twice here. +		 */ +		status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, +						  checksum); + +	if (status == 0) +		status = ixgbe_update_flash_X540(hw); +	else +		status = IXGBE_ERR_SWFW_SYNC; +	} + +	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + +	return status; +} + +/** + * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device + * @hw: pointer to hardware structure + * + * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy + * EEPROM from shadow RAM to the flash device. + **/ +static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) +{ +	u32 flup; +	s32 status = IXGBE_ERR_EEPROM; + +	status = ixgbe_poll_flash_update_done_X540(hw); +	if (status == IXGBE_ERR_EEPROM) { +		hw_dbg(hw, "Flash update time out\n"); +		goto out; +	} + +	flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP; +	IXGBE_WRITE_REG(hw, IXGBE_EEC, flup); + +	status = ixgbe_poll_flash_update_done_X540(hw); +	if (status == 0) +		hw_dbg(hw, "Flash update complete\n"); +	else +		hw_dbg(hw, "Flash update time out\n"); + +	if (hw->revision_id == 0) { +		flup = IXGBE_READ_REG(hw, IXGBE_EEC); + +		if (flup & IXGBE_EEC_SEC1VAL) { +			flup |= IXGBE_EEC_FLUP; +			IXGBE_WRITE_REG(hw, IXGBE_EEC, flup); +		} + +		status = ixgbe_poll_flash_update_done_X540(hw); +		if (status == 0) +			hw_dbg(hw, "Flash update complete\n"); +		else +			hw_dbg(hw, "Flash update time out\n"); +	} +out: +	return status; +} + +/** + * ixgbe_poll_flash_update_done_X540 - Poll flash update status + * @hw: pointer to hardware structure + * + * Polls the FLUDONE (bit 26) of the EEC Register to determine when the + * flash update is done. + **/ +static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) +{ +	u32 i; +	u32 reg; +	s32 status = IXGBE_ERR_EEPROM; + +	for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) { +		reg = IXGBE_READ_REG(hw, IXGBE_EEC); +		if (reg & IXGBE_EEC_FLUDONE) { +			status = 0; +			break; +		} +		udelay(5); +	} +	return status; +} + +/** + * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore thought the SW_FW_SYNC register for + * the specified function (CSR, PHY0, PHY1, NVM, Flash) + **/ +static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) +{ +	u32 swfw_sync; +	u32 swmask = mask; +	u32 fwmask = mask << 5; +	u32 hwmask = 0; +	u32 timeout = 200; +	u32 i; + +	if (swmask == IXGBE_GSSR_EEP_SM) +		hwmask = IXGBE_GSSR_FLASH_SM; + +	for (i = 0; i < timeout; i++) { +		/* +		 * SW NVM semaphore bit is used for access to all +		 * SW_FW_SYNC bits (not just NVM) +		 */ +		if (ixgbe_get_swfw_sync_semaphore(hw)) +			return IXGBE_ERR_SWFW_SYNC; + +		swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); +		if (!(swfw_sync & (fwmask | swmask | hwmask))) { +			swfw_sync |= swmask; +			IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); +			ixgbe_release_swfw_sync_semaphore(hw); +			break; +		} else { +			/* +			 * Firmware currently using resource (fwmask), +			 * hardware currently using resource (hwmask), +			 * or other software thread currently using +			 * resource (swmask) +			 */ +			ixgbe_release_swfw_sync_semaphore(hw); +			usleep_range(5000, 10000); +		} +	} + +	/* +	 * If the resource is not released by the FW/HW the SW can assume that +	 * the FW/HW malfunctions. In that case the SW should sets the +	 * SW bit(s) of the requested resource(s) while ignoring the +	 * corresponding FW/HW bits in the SW_FW_SYNC register. +	 */ +	if (i >= timeout) { +		swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); +		if (swfw_sync & (fwmask | hwmask)) { +			if (ixgbe_get_swfw_sync_semaphore(hw)) +				return IXGBE_ERR_SWFW_SYNC; + +			swfw_sync |= swmask; +			IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); +			ixgbe_release_swfw_sync_semaphore(hw); +		} +	} + +	usleep_range(5000, 10000); +	return 0; +} + +/** + * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore through the SW_FW_SYNC register + * for the specified function (CSR, PHY0, PHY1, EVM, Flash) + **/ +static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) +{ +	u32 swfw_sync; +	u32 swmask = mask; + +	ixgbe_get_swfw_sync_semaphore(hw); + +	swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); +	swfw_sync &= ~swmask; +	IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); + +	ixgbe_release_swfw_sync_semaphore(hw); +	usleep_range(5000, 10000); +} + +/** + * ixgbe_get_nvm_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure + * + * Sets the hardware semaphores so SW/FW can gain control of shared resources + **/ +static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) +{ +	s32 status = IXGBE_ERR_EEPROM; +	u32 timeout = 2000; +	u32 i; +	u32 swsm; + +	/* Get SMBI software semaphore between device drivers first */ +	for (i = 0; i < timeout; i++) { +		/* +		 * If the SMBI bit is 0 when we read it, then the bit will be +		 * set and we have the semaphore +		 */ +		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); +		if (!(swsm & IXGBE_SWSM_SMBI)) { +			status = 0; +			break; +		} +		udelay(50); +	} + +	/* Now get the semaphore between SW/FW through the REGSMP bit */ +	if (status) { +		for (i = 0; i < timeout; i++) { +			swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); +			if (!(swsm & IXGBE_SWFW_REGSMP)) +				break; + +			udelay(50); +		} +	} else { +		hw_dbg(hw, "Software semaphore SMBI between device drivers " +		           "not granted.\n"); +	} + +	return status; +} + +/** + * ixgbe_release_nvm_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function clears hardware semaphore bits. + **/ +static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) +{ +	 u32 swsm; + +	/* Release both semaphores by writing 0 to the bits REGSMP and SMBI */ + +	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); +	swsm &= ~IXGBE_SWSM_SMBI; +	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); + +	swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); +	swsm &= ~IXGBE_SWFW_REGSMP; +	IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm); + +	IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_blink_led_start_X540 - Blink LED based on index. + * @hw: pointer to hardware structure + * @index: led number to blink + * + * Devices that implement the version 2 interface: + *   X540 + **/ +static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) +{ +	u32 macc_reg; +	u32 ledctl_reg; + +	/* +	 * In order for the blink bit in the LED control register +	 * to work, link and speed must be forced in the MAC. We +	 * will reverse this when we stop the blinking. +	 */ +	macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); +	macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS; +	IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); + +	/* Set the LED to LINK_UP + BLINK. */ +	ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); +	ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); +	ledctl_reg |= IXGBE_LED_BLINK(index); +	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); +	IXGBE_WRITE_FLUSH(hw); + +	return 0; +} + +/** + * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index. + * @hw: pointer to hardware structure + * @index: led number to stop blinking + * + * Devices that implement the version 2 interface: + *   X540 + **/ +static s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) +{ +	u32 macc_reg; +	u32 ledctl_reg; + +	/* Restore the LED to its default value. */ +	ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); +	ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); +	ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); +	ledctl_reg &= ~IXGBE_LED_BLINK(index); +	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); + +	/* Unforce link and speed in the MAC. */ +	macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); +	macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS); +	IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); +	IXGBE_WRITE_FLUSH(hw); + +	return 0; +} +static struct ixgbe_mac_operations mac_ops_X540 = { +	.init_hw                = &ixgbe_init_hw_generic, +	.reset_hw               = &ixgbe_reset_hw_X540, +	.start_hw               = &ixgbe_start_hw_X540, +	.clear_hw_cntrs         = &ixgbe_clear_hw_cntrs_generic, +	.get_media_type         = &ixgbe_get_media_type_X540, +	.get_supported_physical_layer = +                                  &ixgbe_get_supported_physical_layer_X540, +	.enable_rx_dma          = &ixgbe_enable_rx_dma_generic, +	.get_mac_addr           = &ixgbe_get_mac_addr_generic, +	.get_san_mac_addr       = &ixgbe_get_san_mac_addr_generic, +	.get_device_caps        = &ixgbe_get_device_caps_generic, +	.get_wwn_prefix         = &ixgbe_get_wwn_prefix_generic, +	.stop_adapter           = &ixgbe_stop_adapter_generic, +	.get_bus_info           = &ixgbe_get_bus_info_generic, +	.set_lan_id             = &ixgbe_set_lan_id_multi_port_pcie, +	.read_analog_reg8       = NULL, +	.write_analog_reg8      = NULL, +	.setup_link             = &ixgbe_setup_mac_link_X540, +	.set_rxpba		= &ixgbe_set_rxpba_generic, +	.check_link             = &ixgbe_check_mac_link_generic, +	.get_link_capabilities  = &ixgbe_get_copper_link_capabilities_generic, +	.led_on                 = &ixgbe_led_on_generic, +	.led_off                = &ixgbe_led_off_generic, +	.blink_led_start        = &ixgbe_blink_led_start_X540, +	.blink_led_stop         = &ixgbe_blink_led_stop_X540, +	.set_rar                = &ixgbe_set_rar_generic, +	.clear_rar              = &ixgbe_clear_rar_generic, +	.set_vmdq               = &ixgbe_set_vmdq_generic, +	.clear_vmdq             = &ixgbe_clear_vmdq_generic, +	.init_rx_addrs          = &ixgbe_init_rx_addrs_generic, +	.update_mc_addr_list    = &ixgbe_update_mc_addr_list_generic, +	.enable_mc              = &ixgbe_enable_mc_generic, +	.disable_mc             = &ixgbe_disable_mc_generic, +	.clear_vfta             = &ixgbe_clear_vfta_generic, +	.set_vfta               = &ixgbe_set_vfta_generic, +	.fc_enable              = &ixgbe_fc_enable_generic, +	.set_fw_drv_ver         = &ixgbe_set_fw_drv_ver_generic, +	.init_uta_tables        = &ixgbe_init_uta_tables_generic, +	.setup_sfp              = NULL, +	.set_mac_anti_spoofing  = &ixgbe_set_mac_anti_spoofing, +	.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, +	.acquire_swfw_sync      = &ixgbe_acquire_swfw_sync_X540, +	.release_swfw_sync      = &ixgbe_release_swfw_sync_X540, +}; + +static struct ixgbe_eeprom_operations eeprom_ops_X540 = { +	.init_params            = &ixgbe_init_eeprom_params_X540, +	.read                   = &ixgbe_read_eerd_X540, +	.read_buffer		= &ixgbe_read_eerd_buffer_X540, +	.write                  = &ixgbe_write_eewr_X540, +	.write_buffer		= &ixgbe_write_eewr_buffer_X540, +	.calc_checksum		= &ixgbe_calc_eeprom_checksum_X540, +	.validate_checksum      = &ixgbe_validate_eeprom_checksum_X540, +	.update_checksum        = &ixgbe_update_eeprom_checksum_X540, +}; + +static struct ixgbe_phy_operations phy_ops_X540 = { +	.identify               = &ixgbe_identify_phy_generic, +	.identify_sfp           = &ixgbe_identify_sfp_module_generic, +	.init			= NULL, +	.reset                  = NULL, +	.read_reg               = &ixgbe_read_phy_reg_generic, +	.write_reg              = &ixgbe_write_phy_reg_generic, +	.setup_link             = &ixgbe_setup_phy_link_generic, +	.setup_link_speed       = &ixgbe_setup_phy_link_speed_generic, +	.read_i2c_byte          = &ixgbe_read_i2c_byte_generic, +	.write_i2c_byte         = &ixgbe_write_i2c_byte_generic, +	.read_i2c_eeprom        = &ixgbe_read_i2c_eeprom_generic, +	.write_i2c_eeprom       = &ixgbe_write_i2c_eeprom_generic, +	.check_overtemp         = &ixgbe_tn_check_overtemp, +}; + +struct ixgbe_info ixgbe_X540_info = { +	.mac                    = ixgbe_mac_X540, +	.get_invariants         = &ixgbe_get_invariants_X540, +	.mac_ops                = &mac_ops_X540, +	.eeprom_ops             = &eeprom_ops_X540, +	.phy_ops                = &phy_ops_X540, +	.mbx_ops                = &mbx_ops_generic, +};  |