diff options
author | Jason Gunthorpe <jgg@nvidia.com> | 2023-10-31 10:54:48 -0300 |
---|---|---|
committer | Jason Gunthorpe <jgg@nvidia.com> | 2023-10-31 10:54:48 -0300 |
commit | 162e3480246ef69386d4647d2320d86741bf08a2 (patch) | |
tree | caada42ce40114e7abe5e74acb54781ad4aa424a /drivers/infiniband/hw/mlx5 | |
parent | d4b2d165714c0ce8777d5131f6e0aad617b7adc4 (diff) | |
parent | ffc253263a1375a65fa6c9f62a893e9767fbebfa (diff) |
Merge tag 'v6.6' into rdma.git for-next
Resolve conflict by taking the spin_lock hunk from for-next:
https://lore.kernel.org/r/20230928113851.5197a1ec@canb.auug.org.au
Required for the next patch.
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx5')
-rw-r--r-- | drivers/infiniband/hw/mlx5/fs.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/main.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 14 |
3 files changed, 13 insertions, 5 deletions
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c index 1e419e080b53..520034acf73a 100644 --- a/drivers/infiniband/hw/mlx5/fs.c +++ b/drivers/infiniband/hw/mlx5/fs.c @@ -2470,8 +2470,8 @@ destroy_res: mlx5_steering_anchor_destroy_res(ft_prio); put_flow_table: put_flow_table(dev, ft_prio, true); - mutex_unlock(&dev->flow_db->lock); free_obj: + mutex_unlock(&dev->flow_db->lock); kfree(obj); return err; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index ec7c45272764..e39e4b06712a 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2089,7 +2089,7 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd) case MLX5_IB_MMAP_DEVICE_MEM: return "Device Memory"; default: - return NULL; + return "Unknown"; } } diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index b0fa2d644973..1d6c54a53df6 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -239,7 +239,8 @@ static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs) static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc) { - set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd); + set_mkc_access_pd_addr_fields(mkc, ent->rb_key.access_flags, 0, + ent->dev->umrc.pd); MLX5_SET(mkc, mkc, free, 1); MLX5_SET(mkc, mkc, umr_en, 1); MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3); @@ -998,19 +999,26 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev) if (!dev->cache.wq) return; - cancel_delayed_work_sync(&dev->cache.remove_ent_dwork); mutex_lock(&dev->cache.rb_lock); for (node = rb_first(root); node; node = rb_next(node)) { ent = rb_entry(node, struct mlx5_cache_ent, node); spin_lock_irq(&ent->mkeys_queue.lock); ent->disabled = true; spin_unlock_irq(&ent->mkeys_queue.lock); - cancel_delayed_work_sync(&ent->dwork); } + mutex_unlock(&dev->cache.rb_lock); + + /* + * After all entries are disabled and will not reschedule on WQ, + * flush it and all async commands. + */ + flush_workqueue(dev->cache.wq); mlx5_mkey_cache_debugfs_cleanup(dev); mlx5_cmd_cleanup_async_ctx(&dev->async_ctx); + /* At this point all entries are disabled and have no concurrent work. */ + mutex_lock(&dev->cache.rb_lock); node = rb_first(root); while (node) { ent = rb_entry(node, struct mlx5_cache_ent, node); |