diff options
author | Amit Cohen <amcohen@nvidia.com> | 2024-10-25 16:26:26 +0200 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2024-10-30 18:24:39 -0700 |
commit | 15f73e601a9c67aa83bde92b2d940a6532d8614d (patch) | |
tree | 9b7e479a7690142d5a6fa6146785a31e1ed6d94c /drivers | |
parent | 0a66e5582b5102c4d7b866b977ff7c850c1174ce (diff) |
mlxsw: pci: Sync Rx buffers for CPU
When Rx packet is received, drivers should sync the pages for CPU, to
ensure the CPU reads the data written by the device and not stale
data from its cache.
Add the missing sync call in Rx path, sync the actual length of data for
each fragment.
Cc: Jiri Pirko <jiri@resnulli.us>
Fixes: b5b60bb491b2 ("mlxsw: pci: Use page pool for Rx buffers allocation")
Signed-off-by: Amit Cohen <amcohen@nvidia.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: Petr Machata <petrm@nvidia.com>
Link: https://patch.msgid.link/461486fac91755ca4e04c2068c102250026dcd0b.1729866134.git.petrm@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlxsw/pci.c | 22 |
1 files changed, 15 insertions, 7 deletions
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 060e5b939211..2320a5f323b4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -389,15 +389,27 @@ static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe, dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction); } -static struct sk_buff *mlxsw_pci_rdq_build_skb(struct page *pages[], +static struct sk_buff *mlxsw_pci_rdq_build_skb(struct mlxsw_pci_queue *q, + struct page *pages[], u16 byte_count) { + struct mlxsw_pci_queue *cq = q->u.rdq.cq; unsigned int linear_data_size; + struct page_pool *page_pool; struct sk_buff *skb; int page_index = 0; bool linear_only; void *data; + linear_only = byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD <= PAGE_SIZE; + linear_data_size = linear_only ? byte_count : + PAGE_SIZE - + MLXSW_PCI_RX_BUF_SW_OVERHEAD; + + page_pool = cq->u.cq.page_pool; + page_pool_dma_sync_for_cpu(page_pool, pages[page_index], + MLXSW_PCI_SKB_HEADROOM, linear_data_size); + data = page_address(pages[page_index]); net_prefetch(data); @@ -405,11 +417,6 @@ static struct sk_buff *mlxsw_pci_rdq_build_skb(struct page *pages[], if (unlikely(!skb)) return ERR_PTR(-ENOMEM); - linear_only = byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD <= PAGE_SIZE; - linear_data_size = linear_only ? byte_count : - PAGE_SIZE - - MLXSW_PCI_RX_BUF_SW_OVERHEAD; - skb_reserve(skb, MLXSW_PCI_SKB_HEADROOM); skb_put(skb, linear_data_size); @@ -425,6 +432,7 @@ static struct sk_buff *mlxsw_pci_rdq_build_skb(struct page *pages[], page = pages[page_index]; frag_size = min(byte_count, PAGE_SIZE); + page_pool_dma_sync_for_cpu(page_pool, page, 0, frag_size); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0, frag_size, PAGE_SIZE); byte_count -= frag_size; @@ -760,7 +768,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, if (err) goto out; - skb = mlxsw_pci_rdq_build_skb(pages, byte_count); + skb = mlxsw_pci_rdq_build_skb(q, pages, byte_count); if (IS_ERR(skb)) { dev_err_ratelimited(&pdev->dev, "Failed to build skb for RDQ\n"); mlxsw_pci_rdq_pages_recycle(q, pages, num_sg_entries); |