diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-01-12 13:52:21 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-01-12 13:52:21 -0800 |
commit | bf9ca811bbadd7d853469d58284ed87906cc9321 (patch) | |
tree | ee7803b1615dbf0f9af6df402af7a63ed983ad37 /drivers/net | |
parent | 141d9c6e003b806d8faeddeec7053ee2691ea61a (diff) | |
parent | d24b923f1d696ddacb09f0f2d1b1f4f045cfe65e (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"Small cycle, with some typical driver updates:
- General code tidying in siw, hfi1, idrdma, usnic, hns rtrs and
bnxt_re
- Many small siw cleanups without an overeaching theme
- Debugfs stats for hns
- Fix a TX queue timeout in IPoIB and missed locking of the mcast
list
- Support more features of P7 devices in bnxt_re including a new work
submission protocol
- CQ interrupts for MANA
- netlink stats for erdma
- EFA multipath PCI support
- Fix Incorrect MR invalidation in iser"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (66 commits)
RDMA/bnxt_re: Fix error code in bnxt_re_create_cq()
RDMA/efa: Add EFA query MR support
IB/iser: Prevent invalidating wrong MR
RDMA/erdma: Add hardware statistics support
RDMA/erdma: Introduce dma pool for hardware responses of CMDQ requests
IB/iser: iscsi_iser.h: fix kernel-doc warning and spellos
RDMA/mana_ib: Add CQ interrupt support for RAW QP
RDMA/mana_ib: query device capabilities
RDMA/mana_ib: register RDMA device with GDMA
RDMA/bnxt_re: Fix the sparse warnings
RDMA/bnxt_re: Fix the offset for GenP7 adapters for user applications
RDMA/bnxt_re: Share a page to expose per CQ info with userspace
RDMA/bnxt_re: Add UAPI to share a page with user space
IB/ipoib: Fix mcast list locking
RDMA/mlx5: Expose register c0 for RDMA device
net/mlx5: E-Switch, expose eswitch manager vport
net/mlx5: Manage ICM type of SW encap
RDMA/mlx5: Support handling of SW encap ICM area
net/mlx5: Introduce indirect-sw-encap ICM properties
RDMA/bnxt_re: Adds MSN table capability for Gen P7 adapters
...
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c | 38 | ||||
-rw-r--r-- | drivers/net/ethernet/microsoft/mana/gdma_main.c | 5 |
3 files changed, 42 insertions, 8 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index b4eb17141edf..349e28a6dd8d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -618,13 +618,6 @@ static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw) return esw && MLX5_ESWITCH_MANAGER(esw->dev); } -/* The returned number is valid only when the dev is eswitch manager. */ -static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev) -{ - return mlx5_core_is_ecpf_esw_manager(dev) ? - MLX5_VPORT_ECPF : MLX5_VPORT_PF; -} - static inline bool mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c index 9482e51ac82a..7c5516b0a844 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c @@ -13,11 +13,13 @@ struct mlx5_dm { unsigned long *steering_sw_icm_alloc_blocks; unsigned long *header_modify_sw_icm_alloc_blocks; unsigned long *header_modify_pattern_sw_icm_alloc_blocks; + unsigned long *header_encap_sw_icm_alloc_blocks; }; struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev) { u64 header_modify_pattern_icm_blocks = 0; + u64 header_sw_encap_icm_blocks = 0; u64 header_modify_icm_blocks = 0; u64 steering_icm_blocks = 0; struct mlx5_dm *dm; @@ -54,6 +56,17 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev) goto err_modify_hdr; } + if (MLX5_CAP_DEV_MEM(dev, log_indirect_encap_sw_icm_size)) { + header_sw_encap_icm_blocks = + BIT(MLX5_CAP_DEV_MEM(dev, log_indirect_encap_sw_icm_size) - + MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)); + + dm->header_encap_sw_icm_alloc_blocks = + bitmap_zalloc(header_sw_encap_icm_blocks, GFP_KERNEL); + if (!dm->header_encap_sw_icm_alloc_blocks) + goto err_pattern; + } + support_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) && MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2) && MLX5_CAP64_DEV_MEM(dev, header_modify_pattern_sw_icm_start_address); @@ -66,11 +79,14 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev) dm->header_modify_pattern_sw_icm_alloc_blocks = bitmap_zalloc(header_modify_pattern_icm_blocks, GFP_KERNEL); if (!dm->header_modify_pattern_sw_icm_alloc_blocks) - goto err_pattern; + goto err_sw_encap; } return dm; +err_sw_encap: + bitmap_free(dm->header_encap_sw_icm_alloc_blocks); + err_pattern: bitmap_free(dm->header_modify_sw_icm_alloc_blocks); @@ -105,6 +121,14 @@ void mlx5_dm_cleanup(struct mlx5_core_dev *dev) bitmap_free(dm->header_modify_sw_icm_alloc_blocks); } + if (dm->header_encap_sw_icm_alloc_blocks) { + WARN_ON(!bitmap_empty(dm->header_encap_sw_icm_alloc_blocks, + BIT(MLX5_CAP_DEV_MEM(dev, + log_indirect_encap_sw_icm_size) - + MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)))); + bitmap_free(dm->header_encap_sw_icm_alloc_blocks); + } + if (dm->header_modify_pattern_sw_icm_alloc_blocks) { WARN_ON(!bitmap_empty(dm->header_modify_pattern_sw_icm_alloc_blocks, BIT(MLX5_CAP_DEV_MEM(dev, @@ -164,6 +188,13 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, log_header_modify_pattern_sw_icm_size); block_map = dm->header_modify_pattern_sw_icm_alloc_blocks; break; + case MLX5_SW_ICM_TYPE_SW_ENCAP: + icm_start_addr = MLX5_CAP64_DEV_MEM(dev, + indirect_encap_sw_icm_start_address); + log_icm_size = MLX5_CAP_DEV_MEM(dev, + log_indirect_encap_sw_icm_size); + block_map = dm->header_encap_sw_icm_alloc_blocks; + break; default: return -EINVAL; } @@ -242,6 +273,11 @@ int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type header_modify_pattern_sw_icm_start_address); block_map = dm->header_modify_pattern_sw_icm_alloc_blocks; break; + case MLX5_SW_ICM_TYPE_SW_ENCAP: + icm_start_addr = MLX5_CAP64_DEV_MEM(dev, + indirect_encap_sw_icm_start_address); + block_map = dm->header_encap_sw_icm_alloc_blocks; + break; default: return -EINVAL; } diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index a6863011d682..d33b27214539 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -158,6 +158,9 @@ static int mana_gd_detect_devices(struct pci_dev *pdev) if (dev_type == GDMA_DEVICE_MANA) { gc->mana.gdma_context = gc; gc->mana.dev_id = dev; + } else if (dev_type == GDMA_DEVICE_MANA_IB) { + gc->mana_ib.dev_id = dev; + gc->mana_ib.gdma_context = gc; } } @@ -967,6 +970,7 @@ int mana_gd_register_device(struct gdma_dev *gd) return 0; } +EXPORT_SYMBOL_NS(mana_gd_register_device, NET_MANA); int mana_gd_deregister_device(struct gdma_dev *gd) { @@ -997,6 +1001,7 @@ int mana_gd_deregister_device(struct gdma_dev *gd) return err; } +EXPORT_SYMBOL_NS(mana_gd_deregister_device, NET_MANA); u32 mana_gd_wq_avail_space(struct gdma_queue *wq) { |