diff options
| author | David Woodhouse <David.Woodhouse@intel.com> | 2008-07-11 14:36:25 +0100 | 
|---|---|---|
| committer | David Woodhouse <David.Woodhouse@intel.com> | 2008-07-11 14:36:25 +0100 | 
| commit | a8931ef380c92d121ae74ecfb03b2d63f72eea6f (patch) | |
| tree | 980fb6b019e11e6cb1ece55b7faff184721a8053 /drivers/net/sfc/falcon.c | |
| parent | 90574d0a4d4b73308ae54a2a57a4f3f1fa98e984 (diff) | |
| parent | e5a5816f7875207cb0a0a7032e39a4686c5e10a4 (diff) | |
| download | olio-linux-3.10-a8931ef380c92d121ae74ecfb03b2d63f72eea6f.tar.xz olio-linux-3.10-a8931ef380c92d121ae74ecfb03b2d63f72eea6f.zip  | |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'drivers/net/sfc/falcon.c')
| -rw-r--r-- | drivers/net/sfc/falcon.c | 97 | 
1 files changed, 45 insertions, 52 deletions
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 46db549ce58..790db89db34 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c @@ -116,17 +116,8 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");   **************************************************************************   */ -/* DMA address mask (up to 46-bit, avoiding compiler warnings) - * - * Note that it is possible to have a platform with 64-bit longs and - * 32-bit DMA addresses, or vice versa.  EFX_DMA_MASK takes care of the - * platform DMA mask. - */ -#if BITS_PER_LONG == 64 -#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL) -#else -#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL) -#endif +/* DMA address mask */ +#define FALCON_DMA_MASK DMA_BIT_MASK(46)  /* TX DMA length mask (13-bit) */  #define FALCON_TX_DMA_MASK (4096 - 1) @@ -145,7 +136,7 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");  #define PCI_EXP_LNKSTA_LNK_WID_LBN	4  #define FALCON_IS_DUAL_FUNC(efx)		\ -	(FALCON_REV(efx) < FALCON_REV_B0) +	(falcon_rev(efx) < FALCON_REV_B0)  /**************************************************************************   * @@ -465,7 +456,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)  			      TX_DESCQ_TYPE, 0,  			      TX_NON_IP_DROP_DIS_B0, 1); -	if (FALCON_REV(efx) >= FALCON_REV_B0) { +	if (falcon_rev(efx) >= FALCON_REV_B0) {  		int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM);  		EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum);  		EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); @@ -474,7 +465,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)  	falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,  			   tx_queue->queue); -	if (FALCON_REV(efx) < FALCON_REV_B0) { +	if (falcon_rev(efx) < FALCON_REV_B0) {  		efx_oword_t reg;  		BUG_ON(tx_queue->queue >= 128); /* HW limit */ @@ -635,7 +626,7 @@ int falcon_init_rx(struct efx_rx_queue *rx_queue)  	efx_oword_t rx_desc_ptr;  	struct efx_nic *efx = rx_queue->efx;  	int rc; -	int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0; +	int is_b0 = falcon_rev(efx) >= FALCON_REV_B0;  	int iscsi_digest_en = is_b0;  	EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", @@ -742,8 +733,10 @@ void falcon_fini_rx(struct efx_rx_queue *rx_queue)  			continue;  		break;  	} -	if (rc) +	if (rc) {  		EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue); +		efx_schedule_reset(efx, RESET_TYPE_INVISIBLE); +	}  	/* Remove RX descriptor ring from card */  	EFX_ZERO_OWORD(rx_desc_ptr); @@ -822,10 +815,10 @@ static inline void falcon_handle_tx_event(struct efx_channel *channel,  		tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);  		tx_queue = &efx->tx_queue[tx_ev_q_label]; -		if (NET_DEV_REGISTERED(efx)) +		if (efx_dev_registered(efx))  			netif_tx_lock(efx->net_dev);  		falcon_notify_tx_desc(tx_queue); -		if (NET_DEV_REGISTERED(efx)) +		if (efx_dev_registered(efx))  			netif_tx_unlock(efx->net_dev);  	} else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&  		   EFX_WORKAROUND_10727(efx)) { @@ -884,7 +877,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,  						   RX_EV_TCP_UDP_CHKSUM_ERR);  	rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR);  	rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); -	rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ? +	rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ?  			  0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB));  	rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); @@ -1065,7 +1058,7 @@ static void falcon_handle_global_event(struct efx_channel *channel,  	    EFX_QWORD_FIELD(*event, XG_PHY_INTR))  		is_phy_event = 1; -	if ((FALCON_REV(efx) >= FALCON_REV_B0) && +	if ((falcon_rev(efx) >= FALCON_REV_B0) &&  	    EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0))  		is_phy_event = 1; @@ -1129,6 +1122,7 @@ static void falcon_handle_driver_event(struct efx_channel *channel,  	case RX_RECOVERY_EV_DECODE:  		EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "  			"Resetting.\n", channel->channel); +		atomic_inc(&efx->rx_reset);  		efx_schedule_reset(efx,  				   EFX_WORKAROUND_6555(efx) ?  				   RESET_TYPE_RX_RECOVERY : @@ -1404,7 +1398,7 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx)  static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)  {  	struct falcon_nic_data *nic_data = efx->nic_data; -	efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; +	efx_oword_t *int_ker = efx->irq_status.addr;  	efx_oword_t fatal_intr;  	int error, mem_perr;  	static int n_int_errors; @@ -1450,8 +1444,8 @@ out:   */  static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)  { -	struct efx_nic *efx = (struct efx_nic *)dev_id; -	efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; +	struct efx_nic *efx = dev_id; +	efx_oword_t *int_ker = efx->irq_status.addr;  	struct efx_channel *channel;  	efx_dword_t reg;  	u32 queues; @@ -1488,8 +1482,8 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)  static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)  { -	struct efx_nic *efx = (struct efx_nic *)dev_id; -	efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; +	struct efx_nic *efx = dev_id; +	efx_oword_t *int_ker = efx->irq_status.addr;  	struct efx_channel *channel;  	int syserr;  	int queues; @@ -1541,9 +1535,9 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)   */  static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)  { -	struct efx_channel *channel = (struct efx_channel *)dev_id; +	struct efx_channel *channel = dev_id;  	struct efx_nic *efx = channel->efx; -	efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; +	efx_oword_t *int_ker = efx->irq_status.addr;  	int syserr;  	efx->last_irq_cpu = raw_smp_processor_id(); @@ -1571,7 +1565,7 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx)  	unsigned long offset;  	efx_dword_t dword; -	if (FALCON_REV(efx) < FALCON_REV_B0) +	if (falcon_rev(efx) < FALCON_REV_B0)  		return;  	for (offset = RX_RSS_INDIR_TBL_B0; @@ -1594,7 +1588,7 @@ int falcon_init_interrupt(struct efx_nic *efx)  	if (!EFX_INT_MODE_USE_MSI(efx)) {  		irq_handler_t handler; -		if (FALCON_REV(efx) >= FALCON_REV_B0) +		if (falcon_rev(efx) >= FALCON_REV_B0)  			handler = falcon_legacy_interrupt_b0;  		else  			handler = falcon_legacy_interrupt_a1; @@ -1635,12 +1629,13 @@ void falcon_fini_interrupt(struct efx_nic *efx)  	efx_oword_t reg;  	/* Disable MSI/MSI-X interrupts */ -	efx_for_each_channel_with_interrupt(channel, efx) +	efx_for_each_channel_with_interrupt(channel, efx) {  		if (channel->irq)  			free_irq(channel->irq, channel); +	}  	/* ACK legacy interrupt */ -	if (FALCON_REV(efx) >= FALCON_REV_B0) +	if (falcon_rev(efx) >= FALCON_REV_B0)  		falcon_read(efx, ®, INT_ISR0_B0);  	else  		falcon_irq_ack_a1(efx); @@ -1731,7 +1726,8 @@ void falcon_drain_tx_fifo(struct efx_nic *efx)  	efx_oword_t temp;  	int count; -	if (FALCON_REV(efx) < FALCON_REV_B0) +	if ((falcon_rev(efx) < FALCON_REV_B0) || +	    (efx->loopback_mode != LOOPBACK_NONE))  		return;  	falcon_read(efx, &temp, MAC0_CTRL_REG_KER); @@ -1783,7 +1779,7 @@ void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)  {  	efx_oword_t temp; -	if (FALCON_REV(efx) < FALCON_REV_B0) +	if (falcon_rev(efx) < FALCON_REV_B0)  		return;  	/* Isolate the MAC -> RX */ @@ -1821,7 +1817,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)  			     MAC_SPEED, link_speed);  	/* On B0, MAC backpressure can be disabled and packets get  	 * discarded. */ -	if (FALCON_REV(efx) >= FALCON_REV_B0) { +	if (falcon_rev(efx) >= FALCON_REV_B0) {  		EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0,  				    !efx->link_up);  	} @@ -1839,7 +1835,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)  	EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);  	/* Unisolate the MAC -> RX */ -	if (FALCON_REV(efx) >= FALCON_REV_B0) +	if (falcon_rev(efx) >= FALCON_REV_B0)  		EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1);  	falcon_write(efx, ®, RX_CFG_REG_KER);  } @@ -1854,7 +1850,7 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)  		return 0;  	/* Statistics fetch will fail if the MAC is in TX drain */ -	if (FALCON_REV(efx) >= FALCON_REV_B0) { +	if (falcon_rev(efx) >= FALCON_REV_B0) {  		efx_oword_t temp;  		falcon_read(efx, &temp, MAC0_CTRL_REG_KER);  		if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) @@ -1938,7 +1934,7 @@ static int falcon_gmii_wait(struct efx_nic *efx)  static void falcon_mdio_write(struct net_device *net_dev, int phy_id,  			      int addr, int value)  { -	struct efx_nic *efx = (struct efx_nic *)net_dev->priv; +	struct efx_nic *efx = net_dev->priv;  	unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK;  	efx_oword_t reg; @@ -2006,7 +2002,7 @@ static void falcon_mdio_write(struct net_device *net_dev, int phy_id,   * could be read, -1 will be returned. */  static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr)  { -	struct efx_nic *efx = (struct efx_nic *)net_dev->priv; +	struct efx_nic *efx = net_dev->priv;  	unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK;  	efx_oword_t reg;  	int value = -1; @@ -2091,6 +2087,8 @@ static int falcon_probe_phy(struct efx_nic *efx)  			efx->phy_type);  		return -1;  	} + +	efx->loopback_modes = LOOPBACKS_10G_INTERNAL | efx->phy_op->loopbacks;  	return 0;  } @@ -2109,7 +2107,7 @@ int falcon_probe_port(struct efx_nic *efx)  	falcon_init_mdio(&efx->mii);  	/* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ -	if (FALCON_REV(efx) >= FALCON_REV_B0) +	if (falcon_rev(efx) >= FALCON_REV_B0)  		efx->flow_control = EFX_FC_RX | EFX_FC_TX;  	else  		efx->flow_control = EFX_FC_RX; @@ -2369,7 +2367,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)  		return -ENODEV;  	} -	switch (FALCON_REV(efx)) { +	switch (falcon_rev(efx)) {  	case FALCON_REV_A0:  	case 0xff:  		EFX_ERR(efx, "Falcon rev A0 not supported\n"); @@ -2395,7 +2393,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)  		break;  	default: -		EFX_ERR(efx, "Unknown Falcon rev %d\n", FALCON_REV(efx)); +		EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx));  		return -ENODEV;  	} @@ -2415,7 +2413,7 @@ int falcon_probe_nic(struct efx_nic *efx)  	/* Allocate storage for hardware specific data */  	nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); -	efx->nic_data = (void *) nic_data; +	efx->nic_data = nic_data;  	/* Determine number of ports etc. */  	rc = falcon_probe_nic_variant(efx); @@ -2468,14 +2466,12 @@ int falcon_probe_nic(struct efx_nic *efx)   fail5:  	falcon_free_buffer(efx, &efx->irq_status);   fail4: -	/* fall-thru */   fail3:  	if (nic_data->pci_dev2) {  		pci_dev_put(nic_data->pci_dev2);  		nic_data->pci_dev2 = NULL;  	}   fail2: -	/* fall-thru */   fail1:  	kfree(efx->nic_data);  	return rc; @@ -2487,13 +2483,10 @@ int falcon_probe_nic(struct efx_nic *efx)   */  int falcon_init_nic(struct efx_nic *efx)  { -	struct falcon_nic_data *data;  	efx_oword_t temp;  	unsigned thresh;  	int rc; -	data = (struct falcon_nic_data *)efx->nic_data; -  	/* Set up the address region register. This is only needed  	 * for the B0 FPGA, but since we are just pushing in the  	 * reset defaults this may as well be unconditional. */ @@ -2560,7 +2553,7 @@ int falcon_init_nic(struct efx_nic *efx)  	/* Set number of RSS queues for receive path. */  	falcon_read(efx, &temp, RX_FILTER_CTL_REG); -	if (FALCON_REV(efx) >= FALCON_REV_B0) +	if (falcon_rev(efx) >= FALCON_REV_B0)  		EFX_SET_OWORD_FIELD(temp, NUM_KER, 0);  	else  		EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1); @@ -2598,7 +2591,7 @@ int falcon_init_nic(struct efx_nic *efx)  	/* Prefetch threshold 2 => fetch when descriptor cache half empty */  	EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2);  	/* Squash TX of packets of 16 bytes or less */ -	if (FALCON_REV(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) +	if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))  		EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1);  	falcon_write(efx, &temp, TX_CFG2_REG_KER); @@ -2615,7 +2608,7 @@ int falcon_init_nic(struct efx_nic *efx)  	if (EFX_WORKAROUND_7575(efx))  		EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,  					(3 * 4096) / 32); -	if (FALCON_REV(efx) >= FALCON_REV_B0) +	if (falcon_rev(efx) >= FALCON_REV_B0)  		EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);  	/* RX FIFO flow control thresholds */ @@ -2631,7 +2624,7 @@ int falcon_init_nic(struct efx_nic *efx)  	falcon_write(efx, &temp, RX_CFG_REG_KER);  	/* Set destination of both TX and RX Flush events */ -	if (FALCON_REV(efx) >= FALCON_REV_B0) { +	if (falcon_rev(efx) >= FALCON_REV_B0) {  		EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0);  		falcon_write(efx, &temp, DP_CTRL_REG);  	} @@ -2645,7 +2638,7 @@ void falcon_remove_nic(struct efx_nic *efx)  	falcon_free_buffer(efx, &efx->irq_status); -	(void) falcon_reset_hw(efx, RESET_TYPE_ALL); +	falcon_reset_hw(efx, RESET_TYPE_ALL);  	/* Release the second function after the reset */  	if (nic_data->pci_dev2) {  |