diff options
author | William Tu <witu@nvidia.com> | 2024-10-31 14:58:56 +0200 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2024-11-03 15:37:15 -0800 |
commit | 355cf2749769ce7ada9afcaad8802f5ed37e88d5 (patch) | |
tree | 75f0266c7bb50e193c9ca22058355d5e62cdc35a /drivers/net/ethernet/mellanox | |
parent | bb135e40129ddd254cfb474b58981313be79a631 (diff) |
net/mlx5e: do not create xdp_redirect for non-uplink rep
XDP and XDP socket require extra SQ/RQ/CQs. Most of these resources
are dynamically created: no XDP program loaded, no resources are
created. One exception is the SQ/CQ created for XDP_REDRIECT, used
for other netdev to forward packet to mlx5 for transmit. The patch
disables creation of SQ and CQ used for egress XDP_REDIRECT, by
checking whether ndo_xdp_xmit is set or not.
For netdev without XDP support such as non-uplink representor, this
saves around 0.35MB of memory, per representor netdevice per channel.
Signed-off-by: William Tu <witu@nvidia.com>
Reviewed-by: Parav Pandit <parav@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/20241031125856.530927-6-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/mellanox')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 17 |
1 files changed, 11 insertions, 6 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 2f609b92d29b..59d7a0e28f24 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2514,6 +2514,7 @@ static int mlx5e_open_queues(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_channel_param *cparam) { + const struct net_device_ops *netdev_ops = c->netdev->netdev_ops; struct dim_cq_moder icocq_moder = {0, 0}; struct mlx5e_create_cq_param ccp; int err; @@ -2534,10 +2535,12 @@ static int mlx5e_open_queues(struct mlx5e_channel *c, if (err) goto err_close_icosq_cq; - c->xdpsq = mlx5e_open_xdpredirect_sq(c, params, cparam, &ccp); - if (IS_ERR(c->xdpsq)) { - err = PTR_ERR(c->xdpsq); - goto err_close_tx_cqs; + if (netdev_ops->ndo_xdp_xmit) { + c->xdpsq = mlx5e_open_xdpredirect_sq(c, params, cparam, &ccp); + if (IS_ERR(c->xdpsq)) { + err = PTR_ERR(c->xdpsq); + goto err_close_tx_cqs; + } } err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, @@ -2601,7 +2604,8 @@ err_close_rx_cq: mlx5e_close_cq(&c->rq.cq); err_close_xdpredirect_sq: - mlx5e_close_xdpredirect_sq(c->xdpsq); + if (c->xdpsq) + mlx5e_close_xdpredirect_sq(c->xdpsq); err_close_tx_cqs: mlx5e_close_tx_cqs(c); @@ -2629,7 +2633,8 @@ static void mlx5e_close_queues(struct mlx5e_channel *c) if (c->xdp) mlx5e_close_cq(&c->rq_xdpsq.cq); mlx5e_close_cq(&c->rq.cq); - mlx5e_close_xdpredirect_sq(c->xdpsq); + if (c->xdpsq) + mlx5e_close_xdpredirect_sq(c->xdpsq); mlx5e_close_tx_cqs(c); mlx5e_close_cq(&c->icosq.cq); mlx5e_close_cq(&c->async_icosq.cq); |