diff options
| author | Ben Hutchings <bhutchings@solarflare.com> | 2012-12-20 18:48:20 +0000 | 
|---|---|---|
| committer | Ben Hutchings <bhutchings@solarflare.com> | 2013-02-26 14:55:49 +0000 | 
| commit | 3a68f19d7afb80f548d016effbc6ed52643a8085 (patch) | |
| tree | 0d087eac21de23332dddb6b27406d9c9919b0bc1 | |
| parent | eb970ff07c15f13eb474f643fd165ebe3e4e24b2 (diff) | |
sfc: Properly sync RX DMA buffer when it is not the last in the page
We may currently allocate two RX DMA buffers to a page, and only unmap
the page when the second is completed.  We do not sync the first RX
buffer to be completed; this can result in packet loss or corruption
if the last RX buffer completed in a NAPI poll is the first in a page
and is not DMA-coherent.  (In the middle of a NAPI poll, we will
handle the following RX completion and unmap the page *before* looking
at the content of the first buffer.)
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
| -rw-r--r-- | drivers/net/ethernet/sfc/rx.c | 15 | 
1 files changed, 10 insertions, 5 deletions
| diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index d780a0d096b4..a5754916478e 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -236,7 +236,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)  }  static void efx_unmap_rx_buffer(struct efx_nic *efx, -				struct efx_rx_buffer *rx_buf) +				struct efx_rx_buffer *rx_buf, +				unsigned int used_len)  {  	if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {  		struct efx_rx_page_state *state; @@ -247,6 +248,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,  				       state->dma_addr,  				       efx_rx_buf_size(efx),  				       DMA_FROM_DEVICE); +		} else if (used_len) { +			dma_sync_single_for_cpu(&efx->pci_dev->dev, +						rx_buf->dma_addr, used_len, +						DMA_FROM_DEVICE);  		}  	} else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {  		dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr, @@ -269,7 +274,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,  static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,  			       struct efx_rx_buffer *rx_buf)  { -	efx_unmap_rx_buffer(rx_queue->efx, rx_buf); +	efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);  	efx_free_rx_buffer(rx_queue->efx, rx_buf);  } @@ -535,10 +540,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,  		goto out;  	} -	/* Release card resources - assumes all RX buffers consumed in-order -	 * per RX queue +	/* Release and/or sync DMA mapping - assumes all RX buffers +	 * consumed in-order per RX queue  	 */ -	efx_unmap_rx_buffer(efx, rx_buf); +	efx_unmap_rx_buffer(efx, rx_buf, len);  	/* Prefetch nice and early so data will (hopefully) be in cache by  	 * the time we look at it. | 
