diff options
Diffstat (limited to 'drivers/net/ethernet/sfc/nic.c')
| -rw-r--r-- | drivers/net/ethernet/sfc/nic.c | 94 | 
1 files changed, 77 insertions, 17 deletions
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index eaa8e874a3c..b0503cd8c2a 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -305,11 +305,11 @@ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,  			 unsigned int len)  {  	buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, -					  &buffer->dma_addr, GFP_ATOMIC); +					  &buffer->dma_addr, +					  GFP_ATOMIC | __GFP_ZERO);  	if (!buffer->addr)  		return -ENOMEM;  	buffer->len = len; -	memset(buffer->addr, 0, len);  	return 0;  } @@ -592,12 +592,22 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)  	struct efx_nic *efx = rx_queue->efx;  	bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;  	bool iscsi_digest_en = is_b0; +	bool jumbo_en; + +	/* For kernel-mode queues in Falcon A1, the JUMBO flag enables +	 * DMA to continue after a PCIe page boundary (and scattering +	 * is not possible).  In Falcon B0 and Siena, it enables +	 * scatter. +	 */ +	jumbo_en = !is_b0 || efx->rx_scatter;  	netif_dbg(efx, hw, efx->net_dev,  		  "RX queue %d ring in special buffers %d-%d\n",  		  efx_rx_queue_index(rx_queue), rx_queue->rxd.index,  		  rx_queue->rxd.index + rx_queue->rxd.entries - 1); +	rx_queue->scatter_n = 0; +  	/* Pin RX descriptor ring */  	efx_init_special_buffer(efx, &rx_queue->rxd); @@ -614,8 +624,7 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)  			      FRF_AZ_RX_DESCQ_SIZE,  			      __ffs(rx_queue->rxd.entries),  			      FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , -			      /* For >=B0 this is scatter so disable */ -			      FRF_AZ_RX_DESCQ_JUMBO, !is_b0, +			      FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,  			      FRF_AZ_RX_DESCQ_EN, 1);  	efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,  			 efx_rx_queue_index(rx_queue)); @@ -969,13 +978,24 @@ static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,  		EFX_RX_PKT_DISCARD : 0;  } -/* Handle receive events that are not in-order. */ -static void +/* Handle receive events that are not in-order. Return true if this + * can be handled as a partial packet discard, false if it's more + * serious. + */ +static bool  efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)  { +	struct efx_channel *channel = efx_rx_queue_channel(rx_queue);  	struct efx_nic *efx = rx_queue->efx;  	unsigned expected, dropped; +	if (rx_queue->scatter_n && +	    index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & +		      rx_queue->ptr_mask)) { +		++channel->n_rx_nodesc_trunc; +		return true; +	} +  	expected = rx_queue->removed_count & rx_queue->ptr_mask;  	dropped = (index - expected) & rx_queue->ptr_mask;  	netif_info(efx, rx_err, efx->net_dev, @@ -984,6 +1004,7 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)  	efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?  			   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); +	return false;  }  /* Handle a packet received event @@ -999,7 +1020,7 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)  	unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;  	unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;  	unsigned expected_ptr; -	bool rx_ev_pkt_ok; +	bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;  	u16 flags;  	struct efx_rx_queue *rx_queue;  	struct efx_nic *efx = channel->efx; @@ -1007,21 +1028,56 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)  	if (unlikely(ACCESS_ONCE(efx->reset_pending)))  		return; -	/* Basic packet information */ -	rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); -	rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); -	rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); -	WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT)); -	WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1); +	rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); +	rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);  	WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=  		channel->channel);  	rx_queue = efx_channel_get_rx_queue(channel);  	rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); -	expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask; -	if (unlikely(rx_ev_desc_ptr != expected_ptr)) -		efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); +	expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & +			rx_queue->ptr_mask); + +	/* Check for partial drops and other errors */ +	if (unlikely(rx_ev_desc_ptr != expected_ptr) || +	    unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { +		if (rx_ev_desc_ptr != expected_ptr && +		    !efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr)) +			return; + +		/* Discard all pending fragments */ +		if (rx_queue->scatter_n) { +			efx_rx_packet( +				rx_queue, +				rx_queue->removed_count & rx_queue->ptr_mask, +				rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); +			rx_queue->removed_count += rx_queue->scatter_n; +			rx_queue->scatter_n = 0; +		} + +		/* Return if there is no new fragment */ +		if (rx_ev_desc_ptr != expected_ptr) +			return; + +		/* Discard new fragment if not SOP */ +		if (!rx_ev_sop) { +			efx_rx_packet( +				rx_queue, +				rx_queue->removed_count & rx_queue->ptr_mask, +				1, 0, EFX_RX_PKT_DISCARD); +			++rx_queue->removed_count; +			return; +		} +	} + +	++rx_queue->scatter_n; +	if (rx_ev_cont) +		return; + +	rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); +	rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); +	rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);  	if (likely(rx_ev_pkt_ok)) {  		/* If packet is marked as OK and packet type is TCP/IP or @@ -1049,7 +1105,11 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)  	channel->irq_mod_score += 2;  	/* Handle received packet */ -	efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags); +	efx_rx_packet(rx_queue, +		      rx_queue->removed_count & rx_queue->ptr_mask, +		      rx_queue->scatter_n, rx_ev_byte_cnt, flags); +	rx_queue->removed_count += rx_queue->scatter_n; +	rx_queue->scatter_n = 0;  }  /* If this flush done event corresponds to a &struct efx_tx_queue, then  |