summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/marvell/octeontx2
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c76
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c68
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c1056
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h265
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c114
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c66
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.c5
16 files changed, 1619 insertions, 121 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 62c07407eb94..005ca8a056c0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -313,6 +313,10 @@ M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
msg_rsp) \
M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
nix_bandprof_get_hwinfo_rsp) \
+M(NIX_CPT_BP_ENABLE, 0x8020, nix_cpt_bp_enable, nix_bp_cfg_req, \
+ nix_bp_cfg_rsp) \
+M(NIX_CPT_BP_DISABLE, 0x8021, nix_cpt_bp_disable, nix_bp_cfg_req, \
+ msg_rsp) \
M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \
msg_req, nix_inline_ipsec_cfg) \
M(NIX_MCAST_GRP_CREATE, 0x802b, nix_mcast_grp_create, nix_mcast_grp_create_req, \
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 148144f5b61d..a1f9ec03c2ce 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -917,19 +917,18 @@ static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
/* The 'qsize' entry dumps current Aura/Pool context Qsize
* and each context's current enable/disable status in a bitmap.
*/
-static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
+static int rvu_dbg_qsize_display(struct seq_file *s, void *unsused,
int blktype)
{
- void (*print_qsize)(struct seq_file *filp,
+ void (*print_qsize)(struct seq_file *s,
struct rvu_pfvf *pfvf) = NULL;
- struct dentry *current_dir;
struct rvu_pfvf *pfvf;
struct rvu *rvu;
int qsize_id;
u16 pcifunc;
int blkaddr;
- rvu = filp->private;
+ rvu = s->private;
switch (blktype) {
case BLKTYPE_NPA:
qsize_id = rvu->rvu_dbg.npa_qsize_id;
@@ -945,32 +944,28 @@ static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
return -EINVAL;
}
- if (blktype == BLKTYPE_NPA) {
+ if (blktype == BLKTYPE_NPA)
blkaddr = BLKADDR_NPA;
- } else {
- current_dir = filp->file->f_path.dentry->d_parent;
- blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
- BLKADDR_NIX1 : BLKADDR_NIX0);
- }
+ else
+ blkaddr = debugfs_get_aux_num(s->file);
if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
- print_qsize(filp, pfvf);
+ print_qsize(s, pfvf);
return 0;
}
-static ssize_t rvu_dbg_qsize_write(struct file *filp,
+static ssize_t rvu_dbg_qsize_write(struct file *file,
const char __user *buffer, size_t count,
loff_t *ppos, int blktype)
{
char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
- struct seq_file *seqfile = filp->private_data;
+ struct seq_file *seqfile = file->private_data;
char *cmd_buf, *cmd_buf_tmp, *subtoken;
struct rvu *rvu = seqfile->private;
- struct dentry *current_dir;
int blkaddr;
u16 pcifunc;
int ret, lf;
@@ -996,13 +991,10 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
goto qsize_write_done;
}
- if (blktype == BLKTYPE_NPA) {
+ if (blktype == BLKTYPE_NPA)
blkaddr = BLKADDR_NPA;
- } else {
- current_dir = filp->f_path.dentry->d_parent;
- blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
- BLKADDR_NIX1 : BLKADDR_NIX0);
- }
+ else
+ blkaddr = debugfs_get_aux_num(file);
if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
ret = -EINVAL;
@@ -2704,8 +2696,8 @@ static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
&rvu_dbg_nix_ndc_tx_hits_miss_fops);
debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
&rvu_dbg_nix_ndc_rx_hits_miss_fops);
- debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_qsize_fops);
+ debugfs_create_file_aux_num("qsize", 0600, rvu->rvu_dbg.nix, rvu,
+ blkaddr, &rvu_dbg_nix_qsize_fops);
debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
&rvu_dbg_nix_band_prof_ctx_fops);
debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
@@ -2854,28 +2846,14 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return err;
}
-static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
+static int rvu_dbg_derive_lmacid(struct seq_file *s)
{
- struct dentry *current_dir;
- char *buf;
-
- current_dir = filp->file->f_path.dentry->d_parent;
- buf = strrchr(current_dir->d_name.name, 'c');
- if (!buf)
- return -EINVAL;
-
- return kstrtoint(buf + 1, 10, lmac_id);
+ return debugfs_get_aux_num(s->file);
}
-static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+static int rvu_dbg_cgx_stat_display(struct seq_file *s, void *unused)
{
- int lmac_id, err;
-
- err = rvu_dbg_derive_lmacid(filp, &lmac_id);
- if (!err)
- return cgx_print_stats(filp, lmac_id);
-
- return err;
+ return cgx_print_stats(s, rvu_dbg_derive_lmacid(s));
}
RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
@@ -2933,15 +2911,9 @@ static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
return 0;
}
-static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
+static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *s, void *unused)
{
- int err, lmac_id;
-
- err = rvu_dbg_derive_lmacid(filp, &lmac_id);
- if (!err)
- return cgx_print_dmac_flt(filp, lmac_id);
-
- return err;
+ return cgx_print_dmac_flt(s, rvu_dbg_derive_lmacid(s));
}
RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
@@ -2980,10 +2952,10 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
rvu->rvu_dbg.lmac =
debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
- debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
- cgx, &rvu_dbg_cgx_stat_fops);
- debugfs_create_file("mac_filter", 0600,
- rvu->rvu_dbg.lmac, cgx,
+ debugfs_create_file_aux_num("stats", 0600, rvu->rvu_dbg.lmac,
+ cgx, lmac_id, &rvu_dbg_cgx_stat_fops);
+ debugfs_create_file_aux_num("mac_filter", 0600,
+ rvu->rvu_dbg.lmac, cgx, lmac_id,
&rvu_dbg_cgx_dmac_flt_fops);
}
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index a5d1e2bddd58..613655fcd34f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -569,9 +569,17 @@ void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc)
mutex_unlock(&rvu->rsrc_lock);
}
-int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
- struct nix_bp_cfg_req *req,
- struct msg_rsp *rsp)
+static u16 nix_get_channel(u16 chan, bool cpt_link)
+{
+ /* CPT channel for a given link channel is always
+ * assumed to be BIT(11) set in link channel.
+ */
+ return cpt_link ? chan | BIT(11) : chan;
+}
+
+static int nix_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp, bool cpt_link)
{
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, pf, type, err;
@@ -579,6 +587,7 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
struct rvu_pfvf *pfvf;
struct nix_hw *nix_hw;
struct nix_bp *bp;
+ u16 chan_v;
u64 cfg;
pf = rvu_get_pf(pcifunc);
@@ -589,6 +598,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
if (is_sdp_pfvf(pcifunc))
type = NIX_INTF_TYPE_SDP;
+ if (cpt_link && !rvu->hw->cpt_links)
+ return 0;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
if (err)
@@ -597,8 +609,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
bp = &nix_hw->bp;
chan_base = pfvf->rx_chan_base + req->chan_base;
for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ chan_v = nix_get_channel(chan, cpt_link);
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
cfg & ~BIT_ULL(16));
if (type == NIX_INTF_TYPE_LBK) {
@@ -617,6 +630,20 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_bp_disable(rvu, req, rsp, false);
+}
+
+int rvu_mbox_handler_nix_cpt_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_bp_disable(rvu, req, rsp, true);
+}
+
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id)
{
@@ -696,15 +723,17 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
return bpid;
}
-int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
- struct nix_bp_cfg_req *req,
- struct nix_bp_cfg_rsp *rsp)
+static int nix_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp,
+ bool cpt_link)
{
int blkaddr, pf, type, chan_id = 0;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
u16 chan_base, chan;
s16 bpid, bpid_base;
+ u16 chan_v;
u64 cfg;
pf = rvu_get_pf(pcifunc);
@@ -717,6 +746,9 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
type != NIX_INTF_TYPE_SDP)
return 0;
+ if (cpt_link && !rvu->hw->cpt_links)
+ return 0;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
@@ -730,9 +762,11 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
return -EINVAL;
}
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
+ chan_v = nix_get_channel(chan, cpt_link);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
cfg &= ~GENMASK_ULL(8, 0);
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
chan_id++;
bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
@@ -750,6 +784,20 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ return nix_bp_enable(rvu, req, rsp, false);
+}
+
+int rvu_mbox_handler_nix_cpt_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ return nix_bp_enable(rvu, req, rsp, true);
+}
+
static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
u64 format, bool v4, u64 *fidx)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index da69e454662a..1b765045aa63 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -1452,23 +1452,21 @@ process_flow:
* hence modify pcifunc accordingly.
*/
- /* AF installing for a PF/VF */
- if (!req->hdr.pcifunc)
+ if (!req->hdr.pcifunc) {
+ /* AF installing for a PF/VF */
target = req->vf;
-
- /* PF installing for its VF */
- if (!from_vf && req->vf && !from_rep_dev) {
+ } else if (!from_vf && req->vf && !from_rep_dev) {
+ /* PF installing for its VF */
target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf;
pf_set_vfs_mac = req->default_rule &&
(req->features & BIT_ULL(NPC_DMAC));
- }
-
- /* Representor device installing for a representee */
- if (from_rep_dev && req->vf)
+ } else if (from_rep_dev && req->vf) {
+ /* Representor device installing for a representee */
target = req->vf;
- else
+ } else {
/* msg received from PF/VF */
target = req->hdr.pcifunc;
+ }
/* ignore chan_mask in case pf func is not AF, revisit later */
if (!is_pffunc_af(req->hdr.pcifunc))
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index dbc971266865..cb6513ab35e7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -15,5 +15,6 @@ rvu_rep-y := rep.o
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
+rvu_nicpf-$(CONFIG_XFRM_OFFLOAD) += cn10k_ipsec.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
new file mode 100644
index 000000000000..09a5b5268205
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -0,0 +1,1056 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell IPSEC offload driver
+ *
+ * Copyright (C) 2024 Marvell.
+ */
+
+#include <net/xfrm.h>
+#include <linux/netdevice.h>
+#include <linux/bitfield.h>
+#include <crypto/aead.h>
+#include <crypto/gcm.h>
+
+#include "otx2_common.h"
+#include "otx2_struct.h"
+#include "cn10k_ipsec.h"
+
+static bool is_dev_support_ipsec_offload(struct pci_dev *pdev)
+{
+ return is_dev_cn10ka_b0(pdev) || is_dev_cn10kb(pdev);
+}
+
+static bool cn10k_cpt_device_set_inuse(struct otx2_nic *pf)
+{
+ enum cn10k_cpt_hw_state_e state;
+
+ while (true) {
+ state = atomic_cmpxchg(&pf->ipsec.cpt_state,
+ CN10K_CPT_HW_AVAILABLE,
+ CN10K_CPT_HW_IN_USE);
+ if (state == CN10K_CPT_HW_AVAILABLE)
+ return true;
+ if (state == CN10K_CPT_HW_UNAVAILABLE)
+ return false;
+
+ mdelay(1);
+ }
+}
+
+static void cn10k_cpt_device_set_available(struct otx2_nic *pf)
+{
+ atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_AVAILABLE);
+}
+
+static void cn10k_cpt_device_set_unavailable(struct otx2_nic *pf)
+{
+ atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_UNAVAILABLE);
+}
+
+static int cn10k_outb_cptlf_attach(struct otx2_nic *pf)
+{
+ struct rsrc_attach *attach;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ /* Get memory to put this msg */
+ attach = otx2_mbox_alloc_msg_attach_resources(&pf->mbox);
+ if (!attach)
+ goto unlock;
+
+ attach->cptlfs = true;
+ attach->modify = true;
+
+ /* Send attach request to AF */
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static int cn10k_outb_cptlf_detach(struct otx2_nic *pf)
+{
+ struct rsrc_detach *detach;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ detach = otx2_mbox_alloc_msg_detach_resources(&pf->mbox);
+ if (!detach)
+ goto unlock;
+
+ detach->partial = true;
+ detach->cptlfs = true;
+
+ /* Send detach request to AF */
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static int cn10k_outb_cptlf_alloc(struct otx2_nic *pf)
+{
+ struct cpt_lf_alloc_req_msg *req;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cpt_lf_alloc(&pf->mbox);
+ if (!req)
+ goto unlock;
+
+ /* PF function */
+ req->nix_pf_func = pf->pcifunc;
+ /* Enable SE-IE Engine Group */
+ req->eng_grpmsk = 1 << CN10K_DEF_CPT_IPSEC_EGRP;
+
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static void cn10k_outb_cptlf_free(struct otx2_nic *pf)
+{
+ mutex_lock(&pf->mbox.lock);
+ otx2_mbox_alloc_msg_cpt_lf_free(&pf->mbox);
+ otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+}
+
+static int cn10k_outb_cptlf_config(struct otx2_nic *pf)
+{
+ struct cpt_inline_ipsec_cfg_msg *req;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(&pf->mbox);
+ if (!req)
+ goto unlock;
+
+ req->dir = CPT_INLINE_OUTBOUND;
+ req->enable = 1;
+ req->nix_pf_func = pf->pcifunc;
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static void cn10k_outb_cptlf_iq_enable(struct otx2_nic *pf)
+{
+ u64 reg_val;
+
+ /* Set Execution Enable of instruction queue */
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ reg_val |= BIT_ULL(16);
+ otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val);
+
+ /* Set iqueue's enqueuing */
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_CTL);
+ reg_val |= BIT_ULL(0);
+ otx2_write64(pf, CN10K_CPT_LF_CTL, reg_val);
+}
+
+static void cn10k_outb_cptlf_iq_disable(struct otx2_nic *pf)
+{
+ u32 inflight, grb_cnt, gwb_cnt;
+ u32 nq_ptr, dq_ptr;
+ int timeout = 20;
+ u64 reg_val;
+ int cnt;
+
+ /* Disable instructions enqueuing */
+ otx2_write64(pf, CN10K_CPT_LF_CTL, 0ull);
+
+ /* Wait for instruction queue to become empty.
+ * CPT_LF_INPROG.INFLIGHT count is zero
+ */
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val);
+ if (!inflight)
+ break;
+
+ usleep_range(10000, 20000);
+ if (timeout-- < 0) {
+ netdev_err(pf->netdev, "Timeout to cleanup CPT IQ\n");
+ break;
+ }
+ } while (1);
+
+ /* Disable executions in the LF's queue,
+ * the queue should be empty at this point
+ */
+ reg_val &= ~BIT_ULL(16);
+ otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val);
+
+ /* Wait for instruction queue to become empty */
+ cnt = 0;
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ if (reg_val & BIT_ULL(31))
+ cnt = 0;
+ else
+ cnt++;
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_Q_GRP_PTR);
+ nq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
+ dq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
+ } while ((cnt < 10) && (nq_ptr != dq_ptr));
+
+ cnt = 0;
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val);
+ grb_cnt = FIELD_GET(CPT_LF_INPROG_GRB_CNT, reg_val);
+ gwb_cnt = FIELD_GET(CPT_LF_INPROG_GWB_CNT, reg_val);
+ if (inflight == 0 && gwb_cnt < 40 &&
+ (grb_cnt == 0 || grb_cnt == 40))
+ cnt++;
+ else
+ cnt = 0;
+ } while (cnt < 10);
+}
+
+/* Allocate memory for CPT outbound Instruction queue.
+ * Instruction queue memory format is:
+ * -----------------------------
+ * | Instruction Group memory |
+ * | (CPT_LF_Q_SIZE[SIZE_DIV40] |
+ * | x 16 Bytes) |
+ * | |
+ * ----------------------------- <-- CPT_LF_Q_BASE[ADDR]
+ * | Flow Control (128 Bytes) |
+ * | |
+ * -----------------------------
+ * | Instruction Memory |
+ * | (CPT_LF_Q_SIZE[SIZE_DIV40] |
+ * | × 40 × 64 bytes) |
+ * | |
+ * -----------------------------
+ */
+static int cn10k_outb_cptlf_iq_alloc(struct otx2_nic *pf)
+{
+ struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq;
+
+ iq->size = CN10K_CPT_INST_QLEN_BYTES + CN10K_CPT_Q_FC_LEN +
+ CN10K_CPT_INST_GRP_QLEN_BYTES + OTX2_ALIGN;
+
+ iq->real_vaddr = dma_alloc_coherent(pf->dev, iq->size,
+ &iq->real_dma_addr, GFP_KERNEL);
+ if (!iq->real_vaddr)
+ return -ENOMEM;
+
+ /* iq->vaddr/dma_addr points to Flow Control location */
+ iq->vaddr = iq->real_vaddr + CN10K_CPT_INST_GRP_QLEN_BYTES;
+ iq->dma_addr = iq->real_dma_addr + CN10K_CPT_INST_GRP_QLEN_BYTES;
+
+ /* Align pointers */
+ iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_ALIGN);
+ iq->dma_addr = PTR_ALIGN(iq->dma_addr, OTX2_ALIGN);
+ return 0;
+}
+
+static void cn10k_outb_cptlf_iq_free(struct otx2_nic *pf)
+{
+ struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq;
+
+ if (iq->real_vaddr)
+ dma_free_coherent(pf->dev, iq->size, iq->real_vaddr,
+ iq->real_dma_addr);
+
+ iq->real_vaddr = NULL;
+ iq->vaddr = NULL;
+}
+
+static int cn10k_outb_cptlf_iq_init(struct otx2_nic *pf)
+{
+ u64 reg_val;
+ int ret;
+
+ /* Allocate Memory for CPT IQ */
+ ret = cn10k_outb_cptlf_iq_alloc(pf);
+ if (ret)
+ return ret;
+
+ /* Disable IQ */
+ cn10k_outb_cptlf_iq_disable(pf);
+
+ /* Set IQ base address */
+ otx2_write64(pf, CN10K_CPT_LF_Q_BASE, pf->ipsec.iq.dma_addr);
+
+ /* Set IQ size */
+ reg_val = FIELD_PREP(CPT_LF_Q_SIZE_DIV40, CN10K_CPT_SIZE_DIV40 +
+ CN10K_CPT_EXTRA_SIZE_DIV40);
+ otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, reg_val);
+
+ return 0;
+}
+
+static int cn10k_outb_cptlf_init(struct otx2_nic *pf)
+{
+ int ret;
+
+ /* Initialize CPTLF Instruction Queue (IQ) */
+ ret = cn10k_outb_cptlf_iq_init(pf);
+ if (ret)
+ return ret;
+
+ /* Configure CPTLF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_config(pf);
+ if (ret)
+ goto iq_clean;
+
+ /* Enable CPTLF IQ */
+ cn10k_outb_cptlf_iq_enable(pf);
+ return 0;
+iq_clean:
+ cn10k_outb_cptlf_iq_free(pf);
+ return ret;
+}
+
+static int cn10k_outb_cpt_init(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int ret;
+
+ /* Attach a CPT LF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_attach(pf);
+ if (ret)
+ return ret;
+
+ /* Allocate a CPT LF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_alloc(pf);
+ if (ret)
+ goto detach;
+
+ /* Initialize the CPTLF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_init(pf);
+ if (ret)
+ goto lf_free;
+
+ pf->ipsec.io_addr = (__force u64)otx2_get_regaddr(pf,
+ CN10K_CPT_LF_NQX(0));
+
+ /* Set ipsec offload enabled for this device */
+ pf->flags |= OTX2_FLAG_IPSEC_OFFLOAD_ENABLED;
+
+ cn10k_cpt_device_set_available(pf);
+ return 0;
+
+lf_free:
+ cn10k_outb_cptlf_free(pf);
+detach:
+ cn10k_outb_cptlf_detach(pf);
+ return ret;
+}
+
+static int cn10k_outb_cpt_clean(struct otx2_nic *pf)
+{
+ int ret;
+
+ if (!cn10k_cpt_device_set_inuse(pf)) {
+ netdev_err(pf->netdev, "CPT LF device unavailable\n");
+ return -ENODEV;
+ }
+
+ /* Set ipsec offload disabled for this device */
+ pf->flags &= ~OTX2_FLAG_IPSEC_OFFLOAD_ENABLED;
+
+ /* Disable CPTLF Instruction Queue (IQ) */
+ cn10k_outb_cptlf_iq_disable(pf);
+
+ /* Set IQ base address and size to 0 */
+ otx2_write64(pf, CN10K_CPT_LF_Q_BASE, 0);
+ otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, 0);
+
+ /* Free CPTLF IQ */
+ cn10k_outb_cptlf_iq_free(pf);
+
+ /* Free and detach CPT LF */
+ cn10k_outb_cptlf_free(pf);
+ ret = cn10k_outb_cptlf_detach(pf);
+ if (ret)
+ netdev_err(pf->netdev, "Failed to detach CPT LF\n");
+
+ cn10k_cpt_device_set_unavailable(pf);
+ return ret;
+}
+
+static void cn10k_cpt_inst_flush(struct otx2_nic *pf, struct cpt_inst_s *inst,
+ u64 size)
+{
+ struct otx2_lmt_info *lmt_info;
+ u64 val = 0, tar_addr = 0;
+
+ lmt_info = per_cpu_ptr(pf->hw.lmt_info, smp_processor_id());
+ /* FIXME: val[0:10] LMT_ID.
+ * [12:15] no of LMTST - 1 in the burst.
+ * [19:63] data size of each LMTST in the burst except first.
+ */
+ val = (lmt_info->lmt_id & 0x7FF);
+ /* Target address for LMTST flush tells HW how many 128bit
+ * words are present.
+ * tar_addr[6:4] size of first LMTST - 1 in units of 128b.
+ */
+ tar_addr |= pf->ipsec.io_addr | (((size / 16) - 1) & 0x7) << 4;
+ dma_wmb();
+ memcpy((u64 *)lmt_info->lmt_addr, inst, size);
+ cn10k_lmt_flush(val, tar_addr);
+}
+
+static int cn10k_wait_for_cpt_respose(struct otx2_nic *pf,
+ struct cpt_res_s *res)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ u64 *completion_ptr = (u64 *)res;
+
+ do {
+ if (time_after(jiffies, timeout)) {
+ netdev_err(pf->netdev, "CPT response timeout\n");
+ return -EBUSY;
+ }
+ } while ((READ_ONCE(*completion_ptr) & CN10K_CPT_COMP_E_MASK) ==
+ CN10K_CPT_COMP_E_NOTDONE);
+
+ if (!(res->compcode == CN10K_CPT_COMP_E_GOOD ||
+ res->compcode == CN10K_CPT_COMP_E_WARN) || res->uc_compcode) {
+ netdev_err(pf->netdev, "compcode=%x doneint=%x\n",
+ res->compcode, res->doneint);
+ netdev_err(pf->netdev, "uc_compcode=%x uc_info=%llx esn=%llx\n",
+ res->uc_compcode, (u64)res->uc_info, res->esn);
+ }
+ return 0;
+}
+
+static int cn10k_outb_write_sa(struct otx2_nic *pf, struct qmem *sa_info)
+{
+ dma_addr_t res_iova, dptr_iova, sa_iova;
+ struct cn10k_tx_sa_s *sa_dptr;
+ struct cpt_inst_s inst = {};
+ struct cpt_res_s *res;
+ u32 sa_size, off;
+ u64 *sptr, *dptr;
+ u64 reg_val;
+ int ret;
+
+ sa_iova = sa_info->iova;
+ if (!sa_iova)
+ return -EINVAL;
+
+ res = dma_alloc_coherent(pf->dev, sizeof(struct cpt_res_s),
+ &res_iova, GFP_ATOMIC);
+ if (!res)
+ return -ENOMEM;
+
+ sa_size = sizeof(struct cn10k_tx_sa_s);
+ sa_dptr = dma_alloc_coherent(pf->dev, sa_size, &dptr_iova, GFP_ATOMIC);
+ if (!sa_dptr) {
+ dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res,
+ res_iova);
+ return -ENOMEM;
+ }
+
+ sptr = (__force u64 *)sa_info->base;
+ dptr = (__force u64 *)sa_dptr;
+ for (off = 0; off < (sa_size / 8); off++)
+ *(dptr + off) = (__force u64)cpu_to_be64(*(sptr + off));
+
+ res->compcode = CN10K_CPT_COMP_E_NOTDONE;
+ inst.res_addr = res_iova;
+ inst.dptr = (u64)dptr_iova;
+ inst.param2 = sa_size >> 3;
+ inst.dlen = sa_size;
+ inst.opcode_major = CN10K_IPSEC_MAJOR_OP_WRITE_SA;
+ inst.opcode_minor = CN10K_IPSEC_MINOR_OP_WRITE_SA;
+ inst.cptr = sa_iova;
+ inst.ctx_val = 1;
+ inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP;
+
+ /* Check if CPT-LF available */
+ if (!cn10k_cpt_device_set_inuse(pf)) {
+ ret = -ENODEV;
+ goto free_mem;
+ }
+
+ cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s));
+ dma_wmb();
+ ret = cn10k_wait_for_cpt_respose(pf, res);
+ if (ret)
+ goto set_available;
+
+ /* Trigger CTX flush to write dirty data back to DRAM */
+ reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH, sa_iova >> 7);
+ otx2_write64(pf, CN10K_CPT_LF_CTX_FLUSH, reg_val);
+
+set_available:
+ cn10k_cpt_device_set_available(pf);
+free_mem:
+ dma_free_coherent(pf->dev, sa_size, sa_dptr, dptr_iova);
+ dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res, res_iova);
+ return ret;
+}
+
+static int cn10k_ipsec_get_hw_ctx_offset(void)
+{
+ /* Offset on Hardware-context offset in word */
+ return (offsetof(struct cn10k_tx_sa_s, hw_ctx) / sizeof(u64)) & 0x7F;
+}
+
+static int cn10k_ipsec_get_ctx_push_size(void)
+{
+ /* Context push size is round up and in multiple of 8 Byte */
+ return (roundup(offsetof(struct cn10k_tx_sa_s, hw_ctx), 8) / 8) & 0x7F;
+}
+
+static int cn10k_ipsec_get_aes_key_len(int key_len)
+{
+ /* key_len is aes key length in bytes */
+ switch (key_len) {
+ case 16:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_128;
+ case 24:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_192;
+ default:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_256;
+ }
+}
+
+static void cn10k_outb_prepare_sa(struct xfrm_state *x,
+ struct cn10k_tx_sa_s *sa_entry)
+{
+ int key_len = (x->aead->alg_key_len + 7) / 8;
+ struct net_device *netdev = x->xso.dev;
+ u8 *key = x->aead->alg_key;
+ struct otx2_nic *pf;
+ u32 *tmp_salt;
+ u64 *tmp_key;
+ int idx;
+
+ memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s));
+
+ /* context size, 128 Byte aligned up */
+ pf = netdev_priv(netdev);
+ sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF;
+ sa_entry->hw_ctx_off = cn10k_ipsec_get_hw_ctx_offset();
+ sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size();
+
+ /* Ucode to skip two words of CPT_CTX_HW_S */
+ sa_entry->ctx_hdr_size = 1;
+
+ /* Allow Atomic operation (AOP) */
+ sa_entry->aop_valid = 1;
+
+ /* Outbound, ESP TRANSPORT/TUNNEL Mode, AES-GCM with */
+ sa_entry->sa_dir = CN10K_IPSEC_SA_DIR_OUTB;
+ sa_entry->ipsec_protocol = CN10K_IPSEC_SA_IPSEC_PROTO_ESP;
+ sa_entry->enc_type = CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM;
+ sa_entry->iv_src = CN10K_IPSEC_SA_IV_SRC_PACKET;
+ if (x->props.mode == XFRM_MODE_TUNNEL)
+ sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL;
+ else
+ sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT;
+
+ /* Last 4 bytes are salt */
+ key_len -= 4;
+ sa_entry->aes_key_len = cn10k_ipsec_get_aes_key_len(key_len);
+ memcpy(sa_entry->cipher_key, key, key_len);
+ tmp_key = (u64 *)sa_entry->cipher_key;
+
+ for (idx = 0; idx < key_len / 8; idx++)
+ tmp_key[idx] = (__force u64)cpu_to_be64(tmp_key[idx]);
+
+ memcpy(&sa_entry->iv_gcm_salt, key + key_len, 4);
+ tmp_salt = (u32 *)&sa_entry->iv_gcm_salt;
+ *tmp_salt = (__force u32)cpu_to_be32(*tmp_salt);
+
+ /* Write SA context data to memory before enabling */
+ wmb();
+
+ /* Enable SA */
+ sa_entry->sa_valid = 1;
+}
+
+static int cn10k_ipsec_validate_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ if (x->props.aalgo != SADB_AALG_NONE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload authenticated xfrm states");
+ return -EINVAL;
+ }
+ if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only AES-GCM-ICV16 xfrm state may be offloaded");
+ return -EINVAL;
+ }
+ if (x->props.calgo != SADB_X_CALG_NONE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload compressed xfrm states");
+ return -EINVAL;
+ }
+ if (x->props.flags & XFRM_STATE_ESN) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states");
+ return -EINVAL;
+ }
+ if (x->props.family != AF_INET && x->props.family != AF_INET6) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only IPv4/v6 xfrm states may be offloaded");
+ return -EINVAL;
+ }
+ if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload other than crypto-mode");
+ return -EINVAL;
+ }
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only tunnel/transport xfrm states may be offloaded");
+ return -EINVAL;
+ }
+ if (x->id.proto != IPPROTO_ESP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only ESP xfrm state may be offloaded");
+ return -EINVAL;
+ }
+ if (x->encap) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Encapsulated xfrm state may not be offloaded");
+ return -EINVAL;
+ }
+ if (!x->aead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states without aead");
+ return -EINVAL;
+ }
+
+ if (x->aead->alg_icv_len != 128) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with AEAD ICV length other than 128bit");
+ return -EINVAL;
+ }
+ if (x->aead->alg_key_len != 128 + 32 &&
+ x->aead->alg_key_len != 192 + 32 &&
+ x->aead->alg_key_len != 256 + 32) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with AEAD key length other than 128/192/256bit");
+ return -EINVAL;
+ }
+ if (x->tfcpad) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with tfc padding");
+ return -EINVAL;
+ }
+ if (!x->geniv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states without geniv");
+ return -EINVAL;
+ }
+ if (strcmp(x->geniv, "seqiv")) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with geniv other than seqiv");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int cn10k_ipsec_inb_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "xfrm inbound offload not supported");
+ return -EOPNOTSUPP;
+}
+
+static int cn10k_ipsec_outb_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *netdev = x->xso.dev;
+ struct cn10k_tx_sa_s *sa_entry;
+ struct qmem *sa_info;
+ struct otx2_nic *pf;
+ int err;
+
+ err = cn10k_ipsec_validate_state(x, extack);
+ if (err)
+ return err;
+
+ pf = netdev_priv(netdev);
+
+ err = qmem_alloc(pf->dev, &sa_info, pf->ipsec.sa_size, OTX2_ALIGN);
+ if (err)
+ return err;
+
+ sa_entry = (struct cn10k_tx_sa_s *)sa_info->base;
+ cn10k_outb_prepare_sa(x, sa_entry);
+
+ err = cn10k_outb_write_sa(pf, sa_info);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error writing outbound SA");
+ qmem_free(pf->dev, sa_info);
+ return err;
+ }
+
+ x->xso.offload_handle = (unsigned long)sa_info;
+ /* Enable static branch when first SA setup */
+ if (!pf->ipsec.outb_sa_count)
+ static_branch_enable(&cn10k_ipsec_sa_enabled);
+ pf->ipsec.outb_sa_count++;
+ return 0;
+}
+
+static int cn10k_ipsec_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
+ return cn10k_ipsec_inb_add_state(x, extack);
+ else
+ return cn10k_ipsec_outb_add_state(x, extack);
+}
+
+static void cn10k_ipsec_del_state(struct xfrm_state *x)
+{
+ struct net_device *netdev = x->xso.dev;
+ struct cn10k_tx_sa_s *sa_entry;
+ struct qmem *sa_info;
+ struct otx2_nic *pf;
+ int err;
+
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
+ return;
+
+ pf = netdev_priv(netdev);
+
+ sa_info = (struct qmem *)x->xso.offload_handle;
+ sa_entry = (struct cn10k_tx_sa_s *)sa_info->base;
+ memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s));
+ /* Disable SA in CPT h/w */
+ sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size();
+ sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF;
+ sa_entry->aop_valid = 1;
+
+ err = cn10k_outb_write_sa(pf, sa_info);
+ if (err)
+ netdev_err(netdev, "Error (%d) deleting SA\n", err);
+
+ x->xso.offload_handle = 0;
+ qmem_free(pf->dev, sa_info);
+
+ /* If no more SA's then update netdev feature for potential change
+ * in NETIF_F_HW_ESP.
+ */
+ if (!--pf->ipsec.outb_sa_count)
+ queue_work(pf->ipsec.sa_workq, &pf->ipsec.sa_work);
+}
+
+static bool cn10k_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+ if (x->props.family == AF_INET) {
+ /* Offload with IPv4 options is not supported yet */
+ if (ip_hdr(skb)->ihl > 5)
+ return false;
+ } else {
+ /* Offload with IPv6 extension headers is not support yet */
+ if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
+ return false;
+ }
+ return true;
+}
+
+static const struct xfrmdev_ops cn10k_ipsec_xfrmdev_ops = {
+ .xdo_dev_state_add = cn10k_ipsec_add_state,
+ .xdo_dev_state_delete = cn10k_ipsec_del_state,
+ .xdo_dev_offload_ok = cn10k_ipsec_offload_ok,
+};
+
+static void cn10k_ipsec_sa_wq_handler(struct work_struct *work)
+{
+ struct cn10k_ipsec *ipsec = container_of(work, struct cn10k_ipsec,
+ sa_work);
+ struct otx2_nic *pf = container_of(ipsec, struct otx2_nic, ipsec);
+
+ /* Disable static branch when no more SA enabled */
+ static_branch_disable(&cn10k_ipsec_sa_enabled);
+ rtnl_lock();
+ netdev_update_features(pf->netdev);
+ rtnl_unlock();
+}
+
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ /* IPsec offload supported on cn10k */
+ if (!is_dev_support_ipsec_offload(pf->pdev))
+ return -EOPNOTSUPP;
+
+ /* Initialize CPT for outbound ipsec offload */
+ if (enable)
+ return cn10k_outb_cpt_init(netdev);
+
+ /* Don't do CPT cleanup if SA installed */
+ if (pf->ipsec.outb_sa_count) {
+ netdev_err(pf->netdev, "SA installed on this device\n");
+ return -EBUSY;
+ }
+
+ return cn10k_outb_cpt_clean(pf);
+}
+
+int cn10k_ipsec_init(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ u32 sa_size;
+
+ if (!is_dev_support_ipsec_offload(pf->pdev))
+ return 0;
+
+ /* Each SA entry size is 128 Byte round up in size */
+ sa_size = sizeof(struct cn10k_tx_sa_s) % OTX2_ALIGN ?
+ (sizeof(struct cn10k_tx_sa_s) / OTX2_ALIGN + 1) *
+ OTX2_ALIGN : sizeof(struct cn10k_tx_sa_s);
+ pf->ipsec.sa_size = sa_size;
+
+ INIT_WORK(&pf->ipsec.sa_work, cn10k_ipsec_sa_wq_handler);
+ pf->ipsec.sa_workq = alloc_workqueue("cn10k_ipsec_sa_workq", 0, 0);
+ if (!pf->ipsec.sa_workq) {
+ netdev_err(pf->netdev, "SA alloc workqueue failed\n");
+ return -ENOMEM;
+ }
+
+ /* Set xfrm device ops */
+ netdev->xfrmdev_ops = &cn10k_ipsec_xfrmdev_ops;
+ netdev->hw_features |= NETIF_F_HW_ESP;
+ netdev->hw_enc_features |= NETIF_F_HW_ESP;
+
+ cn10k_cpt_device_set_unavailable(pf);
+ return 0;
+}
+EXPORT_SYMBOL(cn10k_ipsec_init);
+
+void cn10k_ipsec_clean(struct otx2_nic *pf)
+{
+ if (!is_dev_support_ipsec_offload(pf->pdev))
+ return;
+
+ if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED))
+ return;
+
+ if (pf->ipsec.sa_workq) {
+ destroy_workqueue(pf->ipsec.sa_workq);
+ pf->ipsec.sa_workq = NULL;
+ }
+
+ cn10k_outb_cpt_clean(pf);
+}
+EXPORT_SYMBOL(cn10k_ipsec_clean);
+
+static u16 cn10k_ipsec_get_ip_data_len(struct xfrm_state *x,
+ struct sk_buff *skb)
+{
+ struct ipv6hdr *ipv6h;
+ struct iphdr *iph;
+ u8 *src;
+
+ src = (u8 *)skb->data + ETH_HLEN;
+
+ if (x->props.family == AF_INET) {
+ iph = (struct iphdr *)src;
+ return ntohs(iph->tot_len);
+ }
+
+ ipv6h = (struct ipv6hdr *)src;
+ return ntohs(ipv6h->payload_len) + sizeof(struct ipv6hdr);
+}
+
+/* Prepare CPT and NIX SQE scatter/gather subdescriptor structure.
+ * SG of NIX and CPT are same in size.
+ * Layout of a NIX SQE and CPT SG entry:
+ * -----------------------------
+ * | CPT Scatter Gather |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ * | NIX SQE |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ */
+bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset)
+{
+ struct cpt_sg_s *cpt_sg = NULL;
+ struct nix_sqe_sg_s *sg = NULL;
+ u64 dma_addr, *iova = NULL;
+ u64 *cpt_iova = NULL;
+ u16 *sg_lens = NULL;
+ int seg, len;
+
+ sq->sg[sq->head].num_segs = 0;
+ cpt_sg = (struct cpt_sg_s *)(sq->sqe_base - sq->sqe_size);
+
+ for (seg = 0; seg < num_segs; seg++) {
+ if ((seg % MAX_SEGS_PER_SG) == 0) {
+ sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
+ sg->ld_type = NIX_SEND_LDTYPE_LDD;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 0;
+ sg_lens = (void *)sg;
+ iova = (void *)sg + sizeof(*sg);
+ /* Next subdc always starts at a 16byte boundary.
+ * So if sg->segs is whether 2 or 3, offset += 16bytes.
+ */
+ if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
+ *offset += sizeof(*sg) + (3 * sizeof(u64));
+ else
+ *offset += sizeof(*sg) + sizeof(u64);
+
+ cpt_sg += (seg / MAX_SEGS_PER_SG) * 4;
+ cpt_iova = (void *)cpt_sg + sizeof(*cpt_sg);
+ }
+ dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
+ if (dma_mapping_error(pfvf->dev, dma_addr))
+ return false;
+
+ sg_lens[seg % MAX_SEGS_PER_SG] = len;
+ sg->segs++;
+ *iova++ = dma_addr;
+ *cpt_iova++ = dma_addr;
+
+ /* Save DMA mapping info for later unmapping */
+ sq->sg[sq->head].dma_addr[seg] = dma_addr;
+ sq->sg[sq->head].size[seg] = len;
+ sq->sg[sq->head].num_segs++;
+
+ *cpt_sg = *(struct cpt_sg_s *)sg;
+ cpt_sg->rsvd_63_50 = 0;
+ }
+
+ sq->sg[sq->head].skb = (u64)skb;
+ return true;
+}
+
+static u16 cn10k_ipsec_get_param1(u8 iv_offset)
+{
+ u16 param1_val;
+
+ /* Set Crypto mode, disable L3/L4 checksum */
+ param1_val = CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM |
+ CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM;
+ param1_val |= (u16)iv_offset << CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT;
+ return param1_val;
+}
+
+bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size)
+{
+ struct cpt_inst_s inst;
+ struct cpt_res_s *res;
+ struct xfrm_state *x;
+ struct qmem *sa_info;
+ dma_addr_t dptr_iova;
+ struct sec_path *sp;
+ u8 encap_offset;
+ u8 auth_offset;
+ u8 gthr_size;
+ u8 iv_offset;
+ u16 dlen;
+
+ /* Check for IPSEC offload enabled */
+ if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED))
+ goto drop;
+
+ sp = skb_sec_path(skb);
+ if (unlikely(!sp->len))
+ goto drop;
+
+ x = xfrm_input_state(skb);
+ if (unlikely(!x))
+ goto drop;
+
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL)
+ goto drop;
+
+ dlen = cn10k_ipsec_get_ip_data_len(x, skb);
+ if (dlen == 0 && netif_msg_tx_err(pf)) {
+ netdev_err(pf->netdev, "Invalid IP header, ip-length zero\n");
+ goto drop;
+ }
+
+ /* Check for valid SA context */
+ sa_info = (struct qmem *)x->xso.offload_handle;
+ if (!sa_info)
+ goto drop;
+
+ memset(&inst, 0, sizeof(struct cpt_inst_s));
+
+ /* Get authentication offset */
+ if (x->props.family == AF_INET)
+ auth_offset = sizeof(struct iphdr);
+ else
+ auth_offset = sizeof(struct ipv6hdr);
+
+ /* IV offset is after ESP header */
+ iv_offset = auth_offset + sizeof(struct ip_esp_hdr);
+ /* Encap will start after IV */
+ encap_offset = iv_offset + GCM_RFC4106_IV_SIZE;
+
+ /* CPT Instruction word-1 */
+ res = (struct cpt_res_s *)(sq->cpt_resp->base + (64 * sq->head));
+ res->compcode = 0;
+ inst.res_addr = sq->cpt_resp->iova + (64 * sq->head);
+
+ /* CPT Instruction word-2 */
+ inst.rvu_pf_func = pf->pcifunc;
+
+ /* CPT Instruction word-3:
+ * Set QORD to force CPT_RES_S write completion
+ */
+ inst.qord = 1;
+
+ /* CPT Instruction word-4 */
+ /* inst.dlen should not include ICV length */
+ inst.dlen = dlen + ETH_HLEN - (x->aead->alg_icv_len / 8);
+ inst.opcode_major = CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC;
+ inst.param1 = cn10k_ipsec_get_param1(iv_offset);
+
+ inst.param2 = encap_offset <<
+ CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT;
+ inst.param2 |= (u16)auth_offset <<
+ CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT;
+
+ /* CPT Instruction word-5 */
+ gthr_size = num_segs / MAX_SEGS_PER_SG;
+ gthr_size = (num_segs % MAX_SEGS_PER_SG) ? gthr_size + 1 : gthr_size;
+
+ gthr_size &= 0xF;
+ dptr_iova = (sq->sqe_ring->iova + (sq->head * (sq->sqe_size * 2)));
+ inst.dptr = dptr_iova | ((u64)gthr_size << 60);
+
+ /* CPT Instruction word-6 */
+ inst.rptr = inst.dptr;
+
+ /* CPT Instruction word-7 */
+ inst.cptr = sa_info->iova;
+ inst.ctx_val = 1;
+ inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP;
+
+ /* CPT Instruction word-0 */
+ inst.nixtxl = (size / 16) - 1;
+ inst.dat_offset = ETH_HLEN;
+ inst.nixtx_offset = sq->sqe_size;
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ /* Finally Flush the CPT instruction */
+ sq->head++;
+ sq->head &= (sq->sqe_cnt - 1);
+ cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s));
+ return true;
+drop:
+ dev_kfree_skb_any(skb);
+ return false;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
new file mode 100644
index 000000000000..9965df0faa3e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell IPSEC offload driver
+ *
+ * Copyright (C) 2024 Marvell.
+ */
+
+#ifndef CN10K_IPSEC_H
+#define CN10K_IPSEC_H
+
+#include <linux/types.h>
+
+DECLARE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled);
+
+/* CPT instruction size in bytes */
+#define CN10K_CPT_INST_SIZE 64
+
+/* CPT instruction (CPT_INST_S) queue length */
+#define CN10K_CPT_INST_QLEN 8200
+
+/* CPT instruction queue size passed to HW is in units of
+ * 40*CPT_INST_S messages.
+ */
+#define CN10K_CPT_SIZE_DIV40 (CN10K_CPT_INST_QLEN / 40)
+
+/* CPT needs 320 free entries */
+#define CN10K_CPT_INST_QLEN_EXTRA_BYTES (320 * CN10K_CPT_INST_SIZE)
+#define CN10K_CPT_EXTRA_SIZE_DIV40 (320 / 40)
+
+/* CPT instruction queue length in bytes */
+#define CN10K_CPT_INST_QLEN_BYTES \
+ ((CN10K_CPT_SIZE_DIV40 * 40 * CN10K_CPT_INST_SIZE) + \
+ CN10K_CPT_INST_QLEN_EXTRA_BYTES)
+
+/* CPT instruction group queue length in bytes */
+#define CN10K_CPT_INST_GRP_QLEN_BYTES \
+ ((CN10K_CPT_SIZE_DIV40 + CN10K_CPT_EXTRA_SIZE_DIV40) * 16)
+
+/* CPT FC length in bytes */
+#define CN10K_CPT_Q_FC_LEN 128
+
+/* Default CPT engine group for ipsec offload */
+#define CN10K_DEF_CPT_IPSEC_EGRP 1
+
+/* CN10K CPT LF registers */
+#define CPT_LFBASE (BLKTYPE_CPT << RVU_FUNC_BLKADDR_SHIFT)
+#define CN10K_CPT_LF_CTL (CPT_LFBASE | 0x10)
+#define CN10K_CPT_LF_INPROG (CPT_LFBASE | 0x40)
+#define CN10K_CPT_LF_Q_BASE (CPT_LFBASE | 0xf0)
+#define CN10K_CPT_LF_Q_SIZE (CPT_LFBASE | 0x100)
+#define CN10K_CPT_LF_Q_INST_PTR (CPT_LFBASE | 0x110)
+#define CN10K_CPT_LF_Q_GRP_PTR (CPT_LFBASE | 0x120)
+#define CN10K_CPT_LF_NQX(a) (CPT_LFBASE | 0x400 | (a) << 3)
+#define CN10K_CPT_LF_CTX_FLUSH (CPT_LFBASE | 0x510)
+
+/* IPSEC Instruction opcodes */
+#define CN10K_IPSEC_MAJOR_OP_WRITE_SA 0x01UL
+#define CN10K_IPSEC_MINOR_OP_WRITE_SA 0x09UL
+#define CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC 0x2AUL
+
+enum cn10k_cpt_comp_e {
+ CN10K_CPT_COMP_E_NOTDONE = 0x00,
+ CN10K_CPT_COMP_E_GOOD = 0x01,
+ CN10K_CPT_COMP_E_FAULT = 0x02,
+ CN10K_CPT_COMP_E_HWERR = 0x04,
+ CN10K_CPT_COMP_E_INSTERR = 0x05,
+ CN10K_CPT_COMP_E_WARN = 0x06,
+ CN10K_CPT_COMP_E_MASK = 0x3F
+};
+
+struct cn10k_cpt_inst_queue {
+ u8 *vaddr;
+ u8 *real_vaddr;
+ dma_addr_t dma_addr;
+ dma_addr_t real_dma_addr;
+ u32 size;
+};
+
+enum cn10k_cpt_hw_state_e {
+ CN10K_CPT_HW_UNAVAILABLE,
+ CN10K_CPT_HW_AVAILABLE,
+ CN10K_CPT_HW_IN_USE
+};
+
+struct cn10k_ipsec {
+ /* Outbound CPT */
+ u64 io_addr;
+ atomic_t cpt_state;
+ struct cn10k_cpt_inst_queue iq;
+
+ /* SA info */
+ u32 sa_size;
+ u32 outb_sa_count;
+ struct work_struct sa_work;
+ struct workqueue_struct *sa_workq;
+};
+
+/* CN10K IPSEC Security Association (SA) */
+/* SA direction */
+#define CN10K_IPSEC_SA_DIR_INB 0
+#define CN10K_IPSEC_SA_DIR_OUTB 1
+/* SA protocol */
+#define CN10K_IPSEC_SA_IPSEC_PROTO_AH 0
+#define CN10K_IPSEC_SA_IPSEC_PROTO_ESP 1
+/* SA Encryption Type */
+#define CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM 5
+/* SA IPSEC mode Transport/Tunnel */
+#define CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT 0
+#define CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL 1
+/* SA AES Key Length */
+#define CN10K_IPSEC_SA_AES_KEY_LEN_128 1
+#define CN10K_IPSEC_SA_AES_KEY_LEN_192 2
+#define CN10K_IPSEC_SA_AES_KEY_LEN_256 3
+/* IV Source */
+#define CN10K_IPSEC_SA_IV_SRC_COUNTER 0
+#define CN10K_IPSEC_SA_IV_SRC_PACKET 3
+
+struct cn10k_tx_sa_s {
+ u64 esn_en : 1; /* W0 */
+ u64 rsvd_w0_1_8 : 8;
+ u64 hw_ctx_off : 7;
+ u64 ctx_id : 16;
+ u64 rsvd_w0_32_47 : 16;
+ u64 ctx_push_size : 7;
+ u64 rsvd_w0_55 : 1;
+ u64 ctx_hdr_size : 2;
+ u64 aop_valid : 1;
+ u64 rsvd_w0_59 : 1;
+ u64 ctx_size : 4;
+ u64 w1; /* W1 */
+ u64 sa_valid : 1; /* W2 */
+ u64 sa_dir : 1;
+ u64 rsvd_w2_2_3 : 2;
+ u64 ipsec_mode : 1;
+ u64 ipsec_protocol : 1;
+ u64 aes_key_len : 2;
+ u64 enc_type : 3;
+ u64 rsvd_w2_11_19 : 9;
+ u64 iv_src : 2;
+ u64 rsvd_w2_22_31 : 10;
+ u64 rsvd_w2_32_63 : 32;
+ u64 w3; /* W3 */
+ u8 cipher_key[32]; /* W4 - W7 */
+ u32 rsvd_w8_0_31; /* W8 : IV */
+ u32 iv_gcm_salt;
+ u64 rsvd_w9_w30[22]; /* W9 - W30 */
+ u64 hw_ctx[6]; /* W31 - W36 */
+};
+
+/* CPT instruction parameter-1 */
+#define CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM 0x1
+#define CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM 0x2
+#define CN10K_IPSEC_INST_PARAM1_CRYPTO_MODE 0x20
+#define CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT 8
+
+/* CPT instruction parameter-2 */
+#define CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT 0
+#define CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT 8
+
+/* CPT Instruction Structure */
+struct cpt_inst_s {
+ u64 nixtxl : 3; /* W0 */
+ u64 doneint : 1;
+ u64 rsvd_w0_4_15 : 12;
+ u64 dat_offset : 8;
+ u64 ext_param1 : 8;
+ u64 nixtx_offset : 20;
+ u64 rsvd_w0_52_63 : 12;
+ u64 res_addr; /* W1 */
+ u64 tag : 32; /* W2 */
+ u64 tt : 2;
+ u64 grp : 10;
+ u64 rsvd_w2_44_47 : 4;
+ u64 rvu_pf_func : 16;
+ u64 qord : 1; /* W3 */
+ u64 rsvd_w3_1_2 : 2;
+ u64 wqe_ptr : 61;
+ u64 dlen : 16; /* W4 */
+ u64 param2 : 16;
+ u64 param1 : 16;
+ u64 opcode_major : 8;
+ u64 opcode_minor : 8;
+ u64 dptr; /* W5 */
+ u64 rptr; /* W6 */
+ u64 cptr : 60; /* W7 */
+ u64 ctx_val : 1;
+ u64 egrp : 3;
+};
+
+/* CPT Instruction Result Structure */
+struct cpt_res_s {
+ u64 compcode : 7; /* W0 */
+ u64 doneint : 1;
+ u64 uc_compcode : 8;
+ u64 uc_info : 48;
+ u64 esn; /* W1 */
+};
+
+/* CPT SG structure */
+struct cpt_sg_s {
+ u64 seg1_size : 16;
+ u64 seg2_size : 16;
+ u64 seg3_size : 16;
+ u64 segs : 2;
+ u64 rsvd_63_50 : 14;
+};
+
+/* CPT LF_INPROG Register */
+#define CPT_LF_INPROG_INFLIGHT GENMASK_ULL(8, 0)
+#define CPT_LF_INPROG_GRB_CNT GENMASK_ULL(39, 32)
+#define CPT_LF_INPROG_GWB_CNT GENMASK_ULL(47, 40)
+
+/* CPT LF_Q_GRP_PTR Register */
+#define CPT_LF_Q_GRP_PTR_DQ_PTR GENMASK_ULL(14, 0)
+#define CPT_LF_Q_GRP_PTR_NQ_PTR GENMASK_ULL(46, 32)
+
+/* CPT LF_Q_SIZE Register */
+#define CPT_LF_Q_BASE_ADDR GENMASK_ULL(52, 7)
+
+/* CPT LF_Q_SIZE Register */
+#define CPT_LF_Q_SIZE_DIV40 GENMASK_ULL(14, 0)
+
+/* CPT LF CTX Flush Register */
+#define CPT_LF_CTX_FLUSH GENMASK_ULL(45, 0)
+
+#ifdef CONFIG_XFRM_OFFLOAD
+int cn10k_ipsec_init(struct net_device *netdev);
+void cn10k_ipsec_clean(struct otx2_nic *pf);
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable);
+bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset);
+bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size);
+#else
+static inline __maybe_unused int cn10k_ipsec_init(struct net_device *netdev)
+{
+ return 0;
+}
+
+static inline __maybe_unused void cn10k_ipsec_clean(struct otx2_nic *pf)
+{
+}
+
+static inline __maybe_unused
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
+{
+ return 0;
+}
+
+static inline bool __maybe_unused
+otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset)
+{
+ return true;
+}
+
+static inline bool __maybe_unused
+cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size)
+{
+ return true;
+}
+#endif
+#endif // CN10K_IPSEC_H
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
index 6cc7a78968fc..f3b9daffaec3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -133,9 +133,7 @@ static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
return "SA";
default:
return "Unknown";
- };
-
- return "Unknown";
+ }
}
static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 523ecb798a7a..2b49bfec7869 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -10,12 +10,19 @@
#include <net/page_pool/helpers.h>
#include <net/tso.h>
#include <linux/bitfield.h>
+#include <linux/dcbnl.h>
+#include <net/xfrm.h>
#include "otx2_reg.h"
#include "otx2_common.h"
#include "otx2_struct.h"
#include "cn10k.h"
+static bool otx2_is_pfc_enabled(struct otx2_nic *pfvf)
+{
+ return IS_ENABLED(CONFIG_DCB) && !!pfvf->pfc_en;
+}
+
static void otx2_nix_rq_op_stats(struct queue_stats *stats,
struct otx2_nic *pfvf, int qidx)
{
@@ -964,6 +971,29 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (err)
return err;
+ /* Allocate memory for NIX SQE (which includes NIX SG) and CPT SG.
+ * SG of NIX and CPT are same in size. Allocate memory for CPT SG
+ * same as NIX SQE for base address alignment.
+ * Layout of a NIX SQE and CPT SG entry:
+ * -----------------------------
+ * | CPT Scatter Gather |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ * | NIX SQE |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ */
+ err = qmem_alloc(pfvf->dev, &sq->sqe_ring, qset->sqe_cnt,
+ sq->sqe_size * 2);
+ if (err)
+ return err;
+
+ err = qmem_alloc(pfvf->dev, &sq->cpt_resp, qset->sqe_cnt, 64);
+ if (err)
+ return err;
+
if (qidx < pfvf->hw.tx_queues) {
err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
TSO_HEADER_SIZE);
@@ -1722,18 +1752,43 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
return -ENOMEM;
req->chan_base = 0;
-#ifdef CONFIG_DCB
- req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1;
- req->bpid_per_chan = pfvf->pfc_en ? 1 : 0;
-#else
- req->chan_cnt = 1;
- req->bpid_per_chan = 0;
-#endif
+ if (otx2_is_pfc_enabled(pfvf)) {
+ req->chan_cnt = IEEE_8021QAZ_MAX_TCS;
+ req->bpid_per_chan = 1;
+ } else {
+ req->chan_cnt = 1;
+ req->bpid_per_chan = 0;
+ }
return otx2_sync_mbox_msg(&pfvf->mbox);
}
EXPORT_SYMBOL(otx2_nix_config_bp);
+int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable)
+{
+ struct nix_bp_cfg_req *req;
+
+ if (enable)
+ req = otx2_mbox_alloc_msg_nix_cpt_bp_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_nix_cpt_bp_disable(&pfvf->mbox);
+
+ if (!req)
+ return -ENOMEM;
+
+ req->chan_base = 0;
+ if (otx2_is_pfc_enabled(pfvf)) {
+ req->chan_cnt = IEEE_8021QAZ_MAX_TCS;
+ req->bpid_per_chan = 1;
+ } else {
+ req->chan_cnt = 1;
+ req->bpid_per_chan = 0;
+ }
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+EXPORT_SYMBOL(otx2_nix_cpt_config_bp);
+
/* Mbox message handlers */
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
struct cgx_stats_rsp *rsp)
@@ -1947,3 +2002,48 @@ EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
MBOX_UP_CGX_MESSAGES
MBOX_UP_MCS_MESSAGES
#undef M
+
+dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
+ struct sk_buff *skb, int seg, int *len)
+{
+ enum dma_data_direction dir = DMA_TO_DEVICE;
+ const skb_frag_t *frag;
+ struct page *page;
+ int offset;
+
+ /* Crypto hardware need write permission for ipsec crypto offload */
+ if (unlikely(xfrm_offload(skb))) {
+ dir = DMA_BIDIRECTIONAL;
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ }
+
+ /* First segment is always skb->data */
+ if (!seg) {
+ page = virt_to_page(skb->data);
+ offset = offset_in_page(skb->data);
+ *len = skb_headlen(skb);
+ } else {
+ frag = &skb_shinfo(skb)->frags[seg - 1];
+ page = skb_frag_page(frag);
+ offset = skb_frag_off(frag);
+ *len = skb_frag_size(frag);
+ }
+ return otx2_dma_map_page(pfvf, page, offset, *len, dir);
+}
+
+void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
+{
+ enum dma_data_direction dir = DMA_TO_DEVICE;
+ struct sk_buff *skb = NULL;
+ int seg;
+
+ skb = (struct sk_buff *)sg->skb;
+ if (unlikely(xfrm_offload(skb)))
+ dir = DMA_BIDIRECTIONAL;
+
+ for (seg = 0; seg < sg->num_segs; seg++) {
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
+ sg->size[seg], dir);
+ }
+ sg->num_segs = 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 566848663fea..65814e3dc93f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -30,6 +30,7 @@
#include <rvu_trace.h>
#include "qos.h"
#include "rep.h"
+#include "cn10k_ipsec.h"
/* IPv4 flag more fragment bit */
#define IPV4_FLAG_MORE 0x20
@@ -40,6 +41,7 @@
#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
+#define PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF 0xB900
#define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00
#define PCI_DEVID_OCTEONTX2_SDP_REP 0xA0F7
@@ -55,6 +57,9 @@
#define NIX_PF_PFC_PRIO_MAX 8
#endif
+/* Number of segments per SG structure */
+#define MAX_SEGS_PER_SG 3
+
enum arua_mapped_qtypes {
AURA_NIX_RQ,
AURA_NIX_SQ,
@@ -448,6 +453,7 @@ struct otx2_nic {
#define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17)
#define OTX2_FLAG_REP_MODE_ENABLED BIT_ULL(18)
#define OTX2_FLAG_PORT_UP BIT_ULL(19)
+#define OTX2_FLAG_IPSEC_OFFLOAD_ENABLED BIT_ULL(20)
u64 flags;
u64 *cq_op_addr;
@@ -499,9 +505,9 @@ struct otx2_nic {
/* Devlink */
struct otx2_devlink *dl;
-#ifdef CONFIG_DCB
/* PFC */
u8 pfc_en;
+#ifdef CONFIG_DCB
u8 *queue_to_pfc_map;
u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX];
@@ -522,6 +528,9 @@ struct otx2_nic {
u16 rep_pf_map[RVU_MAX_REP];
u16 esw_mode;
#endif
+
+ /* Inline ipsec */
+ struct cn10k_ipsec ipsec;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -572,6 +581,15 @@ static inline bool is_dev_cn10kb(struct pci_dev *pdev)
return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF;
}
+static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev)
+{
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF &&
+ (pdev->revision & 0xFF) == 0x54)
+ return true;
+
+ return false;
+}
+
static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
{
struct otx2_hw *hw = &pfvf->hw;
@@ -621,6 +639,9 @@ static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
case BLKTYPE_NPA:
blkaddr = BLKADDR_NPA;
break;
+ case BLKTYPE_CPT:
+ blkaddr = BLKADDR_CPT0;
+ break;
default:
blkaddr = BLKADDR_RVUM;
break;
@@ -985,6 +1006,7 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
+int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx);
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura);
@@ -1149,4 +1171,8 @@ static inline int mcam_entry_cmp(const void *a, const void *b)
{
return *(u16 *)a - *(u16 *)b;
}
+
+dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
+ struct sk_buff *skb, int seg, int *len);
+void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg);
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
index 294fba58b670..f110dfa42360 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -435,6 +435,9 @@ process_pfc:
return err;
}
+ /* Default disable backpressure on NIX-CPT */
+ otx2_nix_cpt_config_bp(pfvf, false);
+
/* Request Per channel Bpids */
if (pfc->pfc_en)
otx2_nix_config_bp(pfvf, true);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index e310f99b1736..e1dde93e8af8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -26,6 +26,7 @@
#include "cn10k.h"
#include "qos.h"
#include <rvu_trace.h>
+#include "cn10k_ipsec.h"
#define DRV_NAME "rvu_nicpf"
#define DRV_STRING "Marvell RVU NIC Physical Function Driver"
@@ -1484,6 +1485,8 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
if (!sq->sqe)
continue;
qmem_free(pf->dev, sq->sqe);
+ qmem_free(pf->dev, sq->sqe_ring);
+ qmem_free(pf->dev, sq->cpt_resp);
qmem_free(pf->dev, sq->tso_hdrs);
kfree(sq->sg);
kfree(sq->sqb_ptrs);
@@ -1551,6 +1554,9 @@ int otx2_init_hw_resources(struct otx2_nic *pf)
if (err)
goto err_free_npa_lf;
+ /* Default disable backpressure on NIX-CPT */
+ otx2_nix_cpt_config_bp(pf, false);
+
/* Enable backpressure for CGX mapped PF/VFs */
if (!is_otx2_lbkvf(pf->pdev))
otx2_nix_config_bp(pf, true);
@@ -2273,6 +2279,10 @@ static int otx2_set_features(struct net_device *netdev,
return otx2_enable_rxvlan(pf,
features & NETIF_F_HW_VLAN_CTAG_RX);
+ if (changed & NETIF_F_HW_ESP)
+ return cn10k_ipsec_ethtool_init(netdev,
+ features & NETIF_F_HW_ESP);
+
return otx2_handle_ntuple_tc_features(netdev, features);
}
@@ -3162,10 +3172,14 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* reset CGX/RPM MAC stats */
otx2_reset_mac_stats(pf);
+ err = cn10k_ipsec_init(netdev);
+ if (err)
+ goto err_mcs_free;
+
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_mcs_free;
+ goto err_ipsec_clean;
}
err = otx2_wq_init(pf);
@@ -3206,6 +3220,8 @@ err_mcam_flow_del:
otx2_mcam_flow_del(pf);
err_unreg_netdev:
unregister_netdev(netdev);
+err_ipsec_clean:
+ cn10k_ipsec_clean(pf);
err_mcs_free:
cn10k_mcs_free(pf);
err_del_mcam_entries:
@@ -3403,6 +3419,7 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_unregister_dl(pf);
unregister_netdev(netdev);
+ cn10k_ipsec_clean(pf);
cn10k_mcs_free(pf);
otx2_sriov_disable(pf->pdev);
otx2_sriov_vfcfg_cleanup(pf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 04bc06a80e23..224cef938927 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -11,6 +11,7 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <net/ip6_checksum.h>
+#include <net/xfrm.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -26,12 +27,25 @@
*/
#define PTP_SYNC_SEC_OFFSET 34
+DEFINE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled);
+
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
struct otx2_cq_queue *cq,
bool *need_xdp_flush);
+static void otx2_sq_set_sqe_base(struct otx2_snd_queue *sq,
+ struct sk_buff *skb)
+{
+ if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
+ (xfrm_offload(skb)))
+ sq->sqe_base = sq->sqe_ring->base + sq->sqe_size +
+ (sq->head * (sq->sqe_size * 2));
+ else
+ sq->sqe_base = sq->sqe->base;
+}
+
static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq)
{
@@ -80,38 +94,6 @@ static unsigned int frag_num(unsigned int i)
#endif
}
-static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
- struct sk_buff *skb, int seg, int *len)
-{
- const skb_frag_t *frag;
- struct page *page;
- int offset;
-
- /* First segment is always skb->data */
- if (!seg) {
- page = virt_to_page(skb->data);
- offset = offset_in_page(skb->data);
- *len = skb_headlen(skb);
- } else {
- frag = &skb_shinfo(skb)->frags[seg - 1];
- page = skb_frag_page(frag);
- offset = skb_frag_off(frag);
- *len = skb_frag_size(frag);
- }
- return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
-}
-
-static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
-{
- int seg;
-
- for (seg = 0; seg < sg->num_segs; seg++) {
- otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
- sg->size[seg], DMA_TO_DEVICE);
- }
- sg->num_segs = 0;
-}
-
static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
struct otx2_snd_queue *sq,
struct nix_cqe_tx_s *cqe)
@@ -625,7 +607,6 @@ void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
sq->head &= (sq->sqe_cnt - 1);
}
-#define MAX_SEGS_PER_SG 3
/* Add SQE scatter/gather subdescriptor structure */
static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
struct sk_buff *skb, int num_segs, int *offset)
@@ -1161,6 +1142,7 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
int offset, num_segs, free_desc;
struct nix_sqe_hdr_s *sqe_hdr;
struct otx2_nic *pfvf = dev;
+ bool ret;
/* Check if there is enough room between producer
* and consumer index.
@@ -1177,6 +1159,7 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
/* If SKB doesn't fit in a single SQE, linearize it.
* TODO: Consider adding JUMP descriptor instead.
*/
+
if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
if (__skb_linearize(skb)) {
dev_kfree_skb_any(skb);
@@ -1196,6 +1179,9 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
return true;
}
+ /* Set sqe base address */
+ otx2_sq_set_sqe_base(sq, skb);
+
/* Set SQE's SEND_HDR.
* Do not clear the first 64bit as it contains constant info.
*/
@@ -1208,7 +1194,13 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
otx2_sqe_add_ext(pfvf, sq, skb, &offset);
/* Add SG subdesc with data frags */
- if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
+ if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
+ (xfrm_offload(skb)))
+ ret = otx2_sqe_add_sg_ipsec(pfvf, sq, skb, num_segs, &offset);
+ else
+ ret = otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset);
+
+ if (!ret) {
otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
return false;
}
@@ -1217,11 +1209,15 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
sqe_hdr->sizem1 = (offset / 16) - 1;
+ if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
+ (xfrm_offload(skb)))
+ return cn10k_ipsec_transmit(pfvf, txq, sq, skb, num_segs,
+ offset);
+
netdev_tx_sent_queue(txq, skb->len);
/* Flush SQE to HW */
pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
-
return true;
}
EXPORT_SYMBOL(otx2_sq_append_skb);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index e1db5f961877..d23810963fdb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -101,6 +101,9 @@ struct otx2_snd_queue {
struct queue_stats stats;
u16 sqb_count;
u64 *sqb_ptrs;
+ /* SQE ring and CPT response queue for Inline IPSEC */
+ struct qmem *sqe_ring;
+ struct qmem *cpt_resp;
} ____cacheline_aligned_in_smp;
enum cq_type {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 839fc77c11b2..e926c6ce96cf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -14,6 +14,7 @@
#include "otx2_reg.h"
#include "otx2_ptp.h"
#include "cn10k.h"
+#include "cn10k_ipsec.h"
#define DRV_NAME "rvu_nicvf"
#define DRV_STRING "Marvell RVU NIC Virtual Function Driver"
@@ -693,10 +694,14 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->bus->number, n);
}
+ err = cn10k_ipsec_init(netdev);
+ if (err)
+ goto err_ptp_destroy;
+
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_ptp_destroy;
+ goto err_ipsec_clean;
}
err = otx2_vf_wq_init(vf);
@@ -730,6 +735,8 @@ err_shutdown_tc:
otx2_shutdown_tc(vf);
err_unreg_netdev:
unregister_netdev(netdev);
+err_ipsec_clean:
+ cn10k_ipsec_clean(vf);
err_ptp_destroy:
otx2_ptp_destroy(vf);
err_detach_rsrc:
@@ -782,6 +789,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
if (vf->otx2_wq)
destroy_workqueue(vf->otx2_wq);
+ cn10k_ipsec_clean(vf);
otx2_ptp_destroy(vf);
otx2_mcam_flow_del(vf);
otx2_shutdown_tc(vf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
index 232b10740c13..04e08e06f30f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
@@ -680,14 +680,17 @@ int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack)
ndev->features |= ndev->hw_features;
eth_hw_addr_random(ndev);
err = rvu_rep_devlink_port_register(rep);
- if (err)
+ if (err) {
+ free_netdev(ndev);
goto exit;
+ }
SET_NETDEV_DEVLINK_PORT(ndev, &rep->dl_port);
err = register_netdev(ndev);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"PFVF representor registration failed");
+ rvu_rep_devlink_port_unregister(rep);
free_netdev(ndev);
goto exit;
}