summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/microsoft/mana/gdma_main.c
diff options
context:
space:
mode:
authorAjay Sharma <sharmaajay@microsoft.com>2022-11-03 12:16:29 -0700
committerLeon Romanovsky <leonro@nvidia.com>2022-11-10 07:57:27 +0200
commit28c66cfa45388af1126985d1114e0ed762eb2abd (patch)
tree9092ab25d0f457ee5dac39e7fbb6c8bd884786c0 /drivers/net/ethernet/microsoft/mana/gdma_main.c
parentf72ececfc197e9b0bbb5595294908a950cf444fa (diff)
net: mana: Define data structures for protection domain and memory registration
The MANA hardware support protection domain and memory registration for use in RDMA environment. Add those definitions and expose them for use by the RDMA driver. Signed-off-by: Ajay Sharma <sharmaajay@microsoft.com> Signed-off-by: Long Li <longli@microsoft.com> Link: https://lore.kernel.org/r/1667502990-2559-12-git-send-email-longli@linuxonhyperv.com Reviewed-by: Dexuan Cui <decui@microsoft.com> Acked-by: Haiyang Zhang <haiyangz@microsoft.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Diffstat (limited to 'drivers/net/ethernet/microsoft/mana/gdma_main.c')
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c27
1 files changed, 17 insertions, 10 deletions
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 69795bc679e7..46a7d1e6ece9 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -198,7 +198,7 @@ static int mana_gd_create_hw_eq(struct gdma_context *gc,
req.type = queue->type;
req.pdid = queue->gdma_dev->pdid;
req.doolbell_id = queue->gdma_dev->doorbell;
- req.gdma_region = queue->mem_info.gdma_region;
+ req.gdma_region = queue->mem_info.dma_region_handle;
req.queue_size = queue->queue_size;
req.log2_throttle_limit = queue->eq.log2_throttle_limit;
req.eq_pci_msix_index = queue->eq.msix_index;
@@ -212,7 +212,7 @@ static int mana_gd_create_hw_eq(struct gdma_context *gc,
queue->id = resp.queue_index;
queue->eq.disable_needed = true;
- queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+ queue->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
return 0;
}
@@ -671,24 +671,30 @@ free_q:
return err;
}
-static void mana_gd_destroy_dma_region(struct gdma_context *gc, u64 gdma_region)
+int mana_gd_destroy_dma_region(struct gdma_context *gc,
+ gdma_obj_handle_t dma_region_handle)
{
struct gdma_destroy_dma_region_req req = {};
struct gdma_general_resp resp = {};
int err;
- if (gdma_region == GDMA_INVALID_DMA_REGION)
- return;
+ if (dma_region_handle == GDMA_INVALID_DMA_REGION)
+ return 0;
mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req),
sizeof(resp));
- req.gdma_region = gdma_region;
+ req.dma_region_handle = dma_region_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
- if (err || resp.hdr.status)
+ if (err || resp.hdr.status) {
dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
err, resp.hdr.status);
+ return -EPROTO;
+ }
+
+ return 0;
}
+EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, NET_MANA);
static int mana_gd_create_dma_region(struct gdma_dev *gd,
struct gdma_mem_info *gmi)
@@ -733,14 +739,15 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
if (err)
goto out;
- if (resp.hdr.status || resp.gdma_region == GDMA_INVALID_DMA_REGION) {
+ if (resp.hdr.status ||
+ resp.dma_region_handle == GDMA_INVALID_DMA_REGION) {
dev_err(gc->dev, "Failed to create DMA region: 0x%x\n",
resp.hdr.status);
err = -EPROTO;
goto out;
}
- gmi->gdma_region = resp.gdma_region;
+ gmi->dma_region_handle = resp.dma_region_handle;
out:
kfree(req);
return err;
@@ -863,7 +870,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
return;
}
- mana_gd_destroy_dma_region(gc, gmi->gdma_region);
+ mana_gd_destroy_dma_region(gc, gmi->dma_region_handle);
mana_gd_free_memory(gmi);
kfree(queue);
}