diff options
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2')
26 files changed, 1140 insertions, 331 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 592037f4e55b..e06f77ad6106 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -55,6 +55,7 @@ static const char *cgx_lmactype_string[LMAC_MODE_MAX] = { [LMAC_MODE_50G_R] = "50G_R", [LMAC_MODE_100G_R] = "100G_R", [LMAC_MODE_USXGMII] = "USXGMII", + [LMAC_MODE_USGMII] = "USGMII", }; /* CGX PHY management internal APIs */ @@ -223,24 +224,6 @@ int cgx_get_link_info(void *cgxd, int lmac_id, return 0; } -static u64 mac2u64 (u8 *mac_addr) -{ - u64 mac = 0; - int index; - - for (index = ETH_ALEN - 1; index >= 0; index--) - mac |= ((u64)*mac_addr++) << (8 * index); - return mac; -} - -static void cfg2mac(u64 cfg, u8 *mac_addr) -{ - int i, index = 0; - - for (i = ETH_ALEN - 1; i >= 0; i--, index++) - mac_addr[i] = (cfg >> (8 * index)) & 0xFF; -} - int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); @@ -249,13 +232,16 @@ int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr) int index, id; u64 cfg; + if (!lmac) + return -ENODEV; + /* access mac_ops to know csr_offset */ mac_ops = cgx_dev->mac_ops; /* copy 6bytes from macaddr */ /* memcpy(&cfg, mac_addr, 6); */ - cfg = mac2u64 (mac_addr); + cfg = ether_addr_to_u64(mac_addr); id = get_sequence_id_of_lmac(cgx_dev, lmac_id); @@ -322,7 +308,7 @@ int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr) index = id * lmac->mac_to_index_bmap.max + idx; - cfg = mac2u64 (mac_addr); + cfg = ether_addr_to_u64(mac_addr); cfg |= CGX_DMAC_CAM_ADDR_ENABLE; cfg |= ((u64)lmac_id << 49); cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg); @@ -405,7 +391,7 @@ int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index) cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8))); cfg &= ~CGX_RX_DMAC_ADR_MASK; - cfg |= mac2u64 (mac_addr); + cfg |= ether_addr_to_u64(mac_addr); cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg); return 0; @@ -441,7 +427,7 @@ int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index) /* Read MAC address to check whether it is ucast or mcast */ cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8))); - cfg2mac(cfg, mac); + u64_to_ether_addr(cfg, mac); if (is_multicast_ether_addr(mac)) lmac->mcast_filters_count--; @@ -567,15 +553,16 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable) { struct cgx *cgx = cgx_get_pdata(cgx_id); struct lmac *lmac = lmac_pdata(lmac_id, cgx); - u16 max_dmac = lmac->mac_to_index_bmap.max; struct mac_ops *mac_ops; + u16 max_dmac; int index, i; u64 cfg = 0; int id; - if (!cgx) + if (!cgx || !lmac) return; + max_dmac = lmac->mac_to_index_bmap.max; id = get_sequence_id_of_lmac(cgx, lmac_id); mac_ops = cgx->mac_ops; @@ -748,7 +735,7 @@ int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp) int corr_reg, uncorr_reg; struct cgx *cgx = cgxd; - if (!cgx || lmac_id >= cgx->lmac_count) + if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index 574114179688..6f7d1dee5830 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -110,6 +110,7 @@ enum LMAC_TYPE { LMAC_MODE_50G_R = 8, LMAC_MODE_100G_R = 9, LMAC_MODE_USXGMII = 10, + LMAC_MODE_USGMII = 11, LMAC_MODE_MAX, }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index eba307eee2b2..6b5b06c2b4e9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -136,6 +136,7 @@ M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \ M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \ msg_rsp) \ M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \ +M(PTP_GET_CAP, 0x00c, ptp_get_cap, msg_req, ptp_get_cap_rsp) \ /* CGX mbox IDs (range 0x200 - 0x3FF) */ \ M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \ M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \ @@ -235,7 +236,7 @@ M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \ M(NPC_INSTALL_FLOW, 0x600d, npc_install_flow, \ npc_install_flow_req, npc_install_flow_rsp) \ M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \ - npc_delete_flow_req, msg_rsp) \ + npc_delete_flow_req, npc_delete_flow_rsp) \ M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \ npc_mcam_read_entry_req, \ npc_mcam_read_entry_rsp) \ @@ -1437,6 +1438,12 @@ struct npc_get_kex_cfg_rsp { u8 mkex_pfl_name[MKEX_NAME_LEN]; }; +struct ptp_get_cap_rsp { + struct mbox_msghdr hdr; +#define PTP_CAP_HW_ATOMIC_UPDATE BIT_ULL(0) + u64 cap; +}; + struct flow_msg { unsigned char dmac[6]; unsigned char smac[6]; @@ -1451,6 +1458,10 @@ struct flow_msg { __be32 ip4dst; __be32 ip6dst[4]; }; + union { + __be32 spi; + }; + u8 tos; u8 ip_ver; u8 ip_proto; @@ -1461,6 +1472,7 @@ struct flow_msg { u8 ip_flag; u8 next_header; }; + __be16 vlan_itci; }; struct npc_install_flow_req { @@ -1491,6 +1503,8 @@ struct npc_install_flow_req { u8 vtag0_op; u16 vtag1_def; u8 vtag1_op; + /* old counter value */ + u16 cntr_val; }; struct npc_install_flow_rsp { @@ -1506,6 +1520,11 @@ struct npc_delete_flow_req { u8 all; /* PF + VFs */ }; +struct npc_delete_flow_rsp { + struct mbox_msghdr hdr; + u16 cntr_val; +}; + struct npc_mcam_read_entry_req { struct mbox_msghdr hdr; u16 entry; /* MCAM entry to read */ @@ -1556,6 +1575,8 @@ enum ptp_op { PTP_OP_GET_TSTMP = 2, PTP_OP_SET_THRESH = 3, PTP_OP_EXTTS_ON = 4, + PTP_OP_ADJTIME = 5, + PTP_OP_SET_CLOCK = 6, }; struct ptp_req { @@ -1564,11 +1585,14 @@ struct ptp_req { s64 scaled_ppm; u64 thresh; int extts_on; + s64 delta; + u64 clk; }; struct ptp_rsp { struct mbox_msghdr hdr; u64 clk; + u64 tsc; }; struct npc_get_field_status_req { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index 9beeead56d7b..de9fbd98dfb7 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -184,6 +184,7 @@ enum key_fields { NPC_VLAN_ETYPE_CTAG, /* 0x8100 */ NPC_VLAN_ETYPE_STAG, /* 0x88A8 */ NPC_OUTER_VID, + NPC_INNER_VID, NPC_TOS, NPC_IPFRAG_IPV4, NPC_SIP_IPV4, @@ -204,6 +205,7 @@ enum key_fields { NPC_DPORT_UDP, NPC_SPORT_SCTP, NPC_DPORT_SCTP, + NPC_IPSEC_SPI, NPC_HEADER_FIELDS_MAX, NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */ NPC_PF_FUNC, /* Valid when Tx */ @@ -229,6 +231,8 @@ enum key_fields { NPC_VLAN_TAG1, /* outer vlan tci for double tagged frame */ NPC_VLAN_TAG2, + /* inner vlan tci for double tagged frame */ + NPC_VLAN_TAG3, /* other header fields programmed to extract but not of our interest */ NPC_UNKNOWN, NPC_KEY_FIELDS_MAX, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c index 0ee420a489fc..ffbd22797163 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c @@ -12,8 +12,8 @@ #include <linux/hrtimer.h> #include <linux/ktime.h> -#include "ptp.h" #include "mbox.h" +#include "ptp.h" #include "rvu.h" #define DRV_NAME "Marvell PTP Driver" @@ -40,6 +40,7 @@ #define PTP_CLOCK_CFG_TSTMP_EDGE BIT_ULL(9) #define PTP_CLOCK_CFG_TSTMP_EN BIT_ULL(8) #define PTP_CLOCK_CFG_TSTMP_IN_MASK GENMASK_ULL(15, 10) +#define PTP_CLOCK_CFG_ATOMIC_OP_MASK GENMASK_ULL(28, 26) #define PTP_CLOCK_CFG_PPS_EN BIT_ULL(30) #define PTP_CLOCK_CFG_PPS_INV BIT_ULL(31) @@ -53,36 +54,62 @@ #define PTP_TIMESTAMP 0xF20ULL #define PTP_CLOCK_SEC 0xFD0ULL #define PTP_SEC_ROLLOVER 0xFD8ULL +/* Atomic update related CSRs */ +#define PTP_FRNS_TIMESTAMP 0xFE0ULL +#define PTP_NXT_ROLLOVER_SET 0xFE8ULL +#define PTP_CURR_ROLLOVER_SET 0xFF0ULL +#define PTP_NANO_TIMESTAMP 0xFF8ULL +#define PTP_SEC_TIMESTAMP 0x1000ULL #define CYCLE_MULT 1000 +#define is_rev_A0(ptp) (((ptp)->pdev->revision & 0x0F) == 0x0) +#define is_rev_A1(ptp) (((ptp)->pdev->revision & 0x0F) == 0x1) + +/* PTP atomic update operation type */ +enum atomic_opcode { + ATOMIC_SET = 1, + ATOMIC_INC = 3, + ATOMIC_DEC = 4 +}; + static struct ptp *first_ptp_block; static const struct pci_device_id ptp_id_table[]; -static bool is_ptp_dev_cnf10kb(struct ptp *ptp) +static bool is_ptp_dev_cnf10ka(struct ptp *ptp) { - return (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_B_PTP) ? true : false; + return ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP; } -static bool is_ptp_dev_cn10k(struct ptp *ptp) +static bool is_ptp_dev_cn10ka(struct ptp *ptp) { - return (ptp->pdev->device == PCI_DEVID_CN10K_PTP) ? true : false; + return ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP; } static bool cn10k_ptp_errata(struct ptp *ptp) { - if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP || - ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP) + if ((is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) && + (is_rev_A0(ptp) || is_rev_A1(ptp))) return true; + return false; } -static bool is_ptp_tsfmt_sec_nsec(struct ptp *ptp) +static bool is_tstmp_atomic_update_supported(struct rvu *rvu) { - if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP || - ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP) - return true; - return false; + struct ptp *ptp = rvu->ptp; + + if (is_rvu_otx2(rvu)) + return false; + + /* On older silicon variants of CN10K, atomic update feature + * is not available. + */ + if ((is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) && + (is_rev_A0(ptp) || is_rev_A1(ptp))) + return false; + + return true; } static enum hrtimer_restart ptp_reset_thresh(struct hrtimer *hrtimer) @@ -222,6 +249,65 @@ void ptp_put(struct ptp *ptp) pci_dev_put(ptp->pdev); } +static void ptp_atomic_update(struct ptp *ptp, u64 timestamp) +{ + u64 regval, curr_rollover_set, nxt_rollover_set; + + /* First setup NSECs and SECs */ + writeq(timestamp, ptp->reg_base + PTP_NANO_TIMESTAMP); + writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP); + writeq(timestamp / NSEC_PER_SEC, + ptp->reg_base + PTP_SEC_TIMESTAMP); + + nxt_rollover_set = roundup(timestamp, NSEC_PER_SEC); + curr_rollover_set = nxt_rollover_set - NSEC_PER_SEC; + writeq(nxt_rollover_set, ptp->reg_base + PTP_NXT_ROLLOVER_SET); + writeq(curr_rollover_set, ptp->reg_base + PTP_CURR_ROLLOVER_SET); + + /* Now, initiate atomic update */ + regval = readq(ptp->reg_base + PTP_CLOCK_CFG); + regval &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK; + regval |= (ATOMIC_SET << 26); + writeq(regval, ptp->reg_base + PTP_CLOCK_CFG); +} + +static void ptp_atomic_adjtime(struct ptp *ptp, s64 delta) +{ + bool neg_adj = false, atomic_inc_dec = false; + u64 regval, ptp_clock_hi; + + if (delta < 0) { + delta = -delta; + neg_adj = true; + } + + /* use atomic inc/dec when delta < 1 second */ + if (delta < NSEC_PER_SEC) + atomic_inc_dec = true; + + if (!atomic_inc_dec) { + ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI); + if (neg_adj) { + if (ptp_clock_hi > delta) + ptp_clock_hi -= delta; + else + ptp_clock_hi = delta - ptp_clock_hi; + } else { + ptp_clock_hi += delta; + } + ptp_atomic_update(ptp, ptp_clock_hi); + } else { + writeq(delta, ptp->reg_base + PTP_NANO_TIMESTAMP); + writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP); + + /* initiate atomic inc/dec */ + regval = readq(ptp->reg_base + PTP_CLOCK_CFG); + regval &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK; + regval |= neg_adj ? (ATOMIC_DEC << 26) : (ATOMIC_INC << 26); + writeq(regval, ptp->reg_base + PTP_CLOCK_CFG); + } +} + static int ptp_adjfine(struct ptp *ptp, long scaled_ppm) { bool neg_adj = false; @@ -277,8 +363,9 @@ static int ptp_get_clock(struct ptp *ptp, u64 *clk) return 0; } -void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts) +void ptp_start(struct rvu *rvu, u64 sclk, u32 ext_clk_freq, u32 extts) { + struct ptp *ptp = rvu->ptp; struct pci_dev *pdev; u64 clock_comp; u64 clock_cfg; @@ -297,8 +384,14 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts) ptp->clock_rate = sclk * 1000000; /* Program the seconds rollover value to 1 second */ - if (is_ptp_dev_cnf10kb(ptp)) + if (is_tstmp_atomic_update_supported(rvu)) { + writeq(0, ptp->reg_base + PTP_NANO_TIMESTAMP); + writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP); + writeq(0, ptp->reg_base + PTP_SEC_TIMESTAMP); + writeq(0, ptp->reg_base + PTP_CURR_ROLLOVER_SET); + writeq(0x3b9aca00, ptp->reg_base + PTP_NXT_ROLLOVER_SET); writeq(0x3b9aca00, ptp->reg_base + PTP_SEC_ROLLOVER); + } /* Enable PTP clock */ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG); @@ -320,6 +413,10 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts) clock_cfg |= PTP_CLOCK_CFG_PTP_EN; clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV; writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG); + clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG); + clock_cfg &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK; + clock_cfg |= (ATOMIC_SET << 26); + writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG); /* Set 50% duty cycle for 1Hz output */ writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR); @@ -350,7 +447,7 @@ static int ptp_get_tstmp(struct ptp *ptp, u64 *clk) { u64 timestamp; - if (is_ptp_dev_cn10k(ptp)) { + if (is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) { timestamp = readq(ptp->reg_base + PTP_TIMESTAMP); *clk = (timestamp >> 32) * NSEC_PER_SEC + (timestamp & 0xFFFFFFFF); } else { @@ -414,14 +511,12 @@ static int ptp_probe(struct pci_dev *pdev, first_ptp_block = ptp; spin_lock_init(&ptp->ptp_lock); - if (is_ptp_tsfmt_sec_nsec(ptp)) - ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec; - else - ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec; - if (cn10k_ptp_errata(ptp)) { + ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec; hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ptp->hrtimer.function = ptp_reset_thresh; + } else { + ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec; } return 0; @@ -521,6 +616,12 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req, case PTP_OP_EXTTS_ON: err = ptp_extts_on(rvu->ptp, req->extts_on); break; + case PTP_OP_ADJTIME: + ptp_atomic_adjtime(rvu->ptp, req->delta); + break; + case PTP_OP_SET_CLOCK: + ptp_atomic_update(rvu->ptp, (u64)req->clk); + break; default: err = -EINVAL; break; @@ -528,3 +629,17 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req, return err; } + +int rvu_mbox_handler_ptp_get_cap(struct rvu *rvu, struct msg_req *req, + struct ptp_get_cap_rsp *rsp) +{ + if (!rvu->ptp) + return -ENODEV; + + if (is_tstmp_atomic_update_supported(rvu)) + rsp->cap |= PTP_CAP_HW_ATOMIC_UPDATE; + else + rsp->cap &= ~BIT_ULL_MASK(0); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h index b9d92abc3844..1229344c7279 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h @@ -23,9 +23,10 @@ struct ptp { u32 clock_period; }; +struct rvu; struct ptp *ptp_get(void); void ptp_put(struct ptp *ptp); -void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts); +void ptp_start(struct rvu *rvu, u64 sclk, u32 ext_clk_freq, u32 extts); extern struct pci_driver ptp_driver; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c index b4fcb20c3f4f..af21e2030cff 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c @@ -355,8 +355,8 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause, void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable) { + u64 cfg, pfc_class_mask_cfg; rpm_t *rpm = rpmd; - u64 cfg; /* ALL pause frames received are completely ignored */ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); @@ -380,9 +380,11 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable) rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL); /* Disable all PFC classes */ - cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL); + pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL : + RPMX_CMRX_PRT_CBFC_CTL; + cfg = rpm_read(rpm, lmac_id, pfc_class_mask_cfg); cfg = FIELD_SET(RPM_PFC_CLASS_MASK, 0, cfg); - rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg); + rpm_write(rpm, lmac_id, pfc_class_mask_cfg, cfg); } int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat) @@ -605,8 +607,11 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p if (!is_lmac_valid(rpm, lmac_id)) return -ENODEV; + pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL : + RPMX_CMRX_PRT_CBFC_CTL; + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); - class_en = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL); + class_en = rpm_read(rpm, lmac_id, pfc_class_mask_cfg); pfc_en |= FIELD_GET(RPM_PFC_CLASS_MASK, class_en); if (rx_pause) { @@ -635,10 +640,6 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE; rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); - - pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL : - RPMX_CMRX_PRT_CBFC_CTL; - rpm_write(rpm, lmac_id, pfc_class_mask_cfg, class_en); return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 73df2d564545..22c395c7d040 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -3322,7 +3322,7 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) mutex_init(&rvu->rswitch.switch_lock); if (rvu->fwdata) - ptp_start(rvu->ptp, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate, + ptp_start(rvu, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate, rvu->fwdata->ptp_ext_tstamp); return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index e8e65fd7888d..c4d999ef5ab4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -17,6 +17,7 @@ #include "mbox.h" #include "npc.h" #include "rvu_reg.h" +#include "ptp.h" /* PCI device IDs */ #define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065 @@ -26,6 +27,7 @@ #define PCI_SUBSYS_DEVID_98XX 0xB100 #define PCI_SUBSYS_DEVID_96XX 0xB200 #define PCI_SUBSYS_DEVID_CN10K_A 0xB900 +#define PCI_SUBSYS_DEVID_CNF10K_A 0xBA00 #define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00 #define PCI_SUBSYS_DEVID_CN10K_B 0xBD00 @@ -634,6 +636,16 @@ static inline bool is_rvu_otx2(struct rvu *rvu) midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO); } +static inline bool is_cnf10ka_a0(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + + if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A && + (pdev->revision & 0x0F) == 0x0) + return true; + return false; +} + static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu) { u64 npc_const3; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 095b2cc4a699..f2b1edf1bb43 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -236,6 +236,11 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) linfo = &event->link_uinfo; pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id); + if (!pfmap) { + dev_err(rvu->dev, "CGX port%d:%d not mapped with PF\n", + event->cgx_id, event->lmac_id); + return; + } do { pfid = find_first_bit(&pfmap, @@ -345,7 +350,7 @@ int rvu_cgx_init(struct rvu *rvu) rvu->cgx_cnt_max = cgx_get_cgxcnt_max(); if (!rvu->cgx_cnt_max) { dev_info(rvu->dev, "No CGX devices found!\n"); - return -ENODEV; + return 0; } rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max * @@ -686,7 +691,7 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, { int pf = rvu_get_pf(req->hdr.pcifunc); u8 cgx_id, lmac_id; - int rc = 0, i; + int rc = 0; u64 cfg; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) @@ -697,8 +702,7 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, rsp->hdr.rc = rc; cfg = cgx_lmac_addr_get(cgx_id, lmac_id); /* copy 48 bit mac address to req->mac_addr */ - for (i = 0; i < ETH_ALEN; i++) - rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8; + u64_to_ether_addr(cfg, rsp->mac_addr); return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 3b26893efdf8..d30e84803481 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -2787,6 +2787,11 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.vlan_tci)); break; + case NPC_INNER_VID: + seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_itci)); + seq_printf(s, "mask 0x%x\n", + ntohs(rule->mask.vlan_itci)); + break; case NPC_TOS: seq_printf(s, "%d ", rule->packet.tos); seq_printf(s, "mask 0x%x\n", rule->mask.tos); @@ -2827,6 +2832,10 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, seq_printf(s, "%d ", ntohs(rule->packet.dport)); seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport)); break; + case NPC_IPSEC_SPI: + seq_printf(s, "0x%x ", ntohl(rule->packet.spi)); + seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi)); + break; default: seq_puts(s, "\n"); break; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index 952319453701..237f82082ebe 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -20,6 +20,7 @@ static const char * const npc_flow_names[] = { [NPC_VLAN_ETYPE_CTAG] = "vlan ether type ctag", [NPC_VLAN_ETYPE_STAG] = "vlan ether type stag", [NPC_OUTER_VID] = "outer vlan id", + [NPC_INNER_VID] = "inner vlan id", [NPC_TOS] = "tos", [NPC_IPFRAG_IPV4] = "fragmented IPv4 header ", [NPC_SIP_IPV4] = "ipv4 source ip", @@ -41,6 +42,7 @@ static const char * const npc_flow_names[] = { [NPC_SPORT_SCTP] = "sctp source port", [NPC_DPORT_SCTP] = "sctp destination port", [NPC_LXMB] = "Mcast/Bcast header ", + [NPC_IPSEC_SPI] = "SPI ", [NPC_UNKNOWN] = "unknown", }; @@ -327,6 +329,8 @@ static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf) */ struct npc_key_field *vlan_tag1; struct npc_key_field *vlan_tag2; + /* Inner VLAN TCI for double tagged frames */ + struct npc_key_field *vlan_tag3; u64 *features; u8 start_lid; int i; @@ -349,6 +353,7 @@ static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf) etype_tag2 = &key_fields[NPC_ETYPE_TAG2]; vlan_tag1 = &key_fields[NPC_VLAN_TAG1]; vlan_tag2 = &key_fields[NPC_VLAN_TAG2]; + vlan_tag3 = &key_fields[NPC_VLAN_TAG3]; /* if key profile programmed does not extract Ethertype at all */ if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) { @@ -430,6 +435,12 @@ vlan_tci: goto done; } *features |= BIT_ULL(NPC_OUTER_VID); + + /* If key profile extracts inner vlan tci */ + if (vlan_tag3->nr_kws) { + key_fields[NPC_INNER_VID] = *vlan_tag3; + *features |= BIT_ULL(NPC_INNER_VID); + } done: return; } @@ -512,7 +523,12 @@ do { \ NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2); NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2); NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2); + NPC_SCAN_HDR(NPC_VLAN_TAG3, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 6, 2); NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6); + + NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LD, NPC_LT_LD_AH, 4, 4); + NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LE, NPC_LT_LE_ESP, 0, 4); + /* SMAC follows the DMAC(which is 6 bytes) */ NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start + 6, 6); /* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */ @@ -564,6 +580,11 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) if (!npc_check_field(rvu, blkaddr, NPC_LB, intf)) *features &= ~BIT_ULL(NPC_OUTER_VID); + /* Set SPI flag only if AH/ESP and IPSEC_SPI are in the key */ + if (npc_check_field(rvu, blkaddr, NPC_IPSEC_SPI, intf) && + (*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH)))) + *features |= BIT_ULL(NPC_IPSEC_SPI); + /* for vlan ethertypes corresponding layer type should be in the key */ if (npc_check_field(rvu, blkaddr, NPC_LB, intf)) *features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG) | @@ -930,8 +951,13 @@ do { \ NPC_WRITE_FLOW(NPC_DPORT_SCTP, dport, ntohs(pkt->dport), 0, ntohs(mask->dport), 0); + NPC_WRITE_FLOW(NPC_IPSEC_SPI, spi, ntohl(pkt->spi), 0, + ntohl(mask->spi), 0); + NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0, ntohs(mask->vlan_tci), 0); + NPC_WRITE_FLOW(NPC_INNER_VID, vlan_itci, ntohs(pkt->vlan_itci), 0, + ntohs(mask->vlan_itci), 0); NPC_WRITE_FLOW(NPC_IPFRAG_IPV6, next_header, pkt->next_header, 0, mask->next_header, 0); @@ -1192,7 +1218,7 @@ find_rule: write_req.enable_entry = (u8)enable; /* if counter is available then clear and use it */ if (req->set_cntr && rule->has_cntr) { - rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), 0x00); + rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), req->cntr_val); write_req.set_cntr = 1; write_req.cntr = rule->cntr; } @@ -1407,12 +1433,13 @@ static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule, int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu, struct npc_delete_flow_req *req, - struct msg_rsp *rsp) + struct npc_delete_flow_rsp *rsp) { struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_npc_mcam_rule *iter, *tmp; u16 pcifunc = req->hdr.pcifunc; struct list_head del_list; + int blkaddr; INIT_LIST_HEAD(&del_list); @@ -1428,6 +1455,10 @@ int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu, list_move_tail(&iter->list, &del_list); /* single rule */ } else if (req->entry == iter->entry) { + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr) + rsp->cntr_val = rvu_read64(rvu, blkaddr, + NPC_AF_MATCH_STATX(iter->cntr)); list_move_tail(&iter->list, &del_list); break; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c index 7e20282c12d0..d2661e7fabdb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c @@ -391,22 +391,6 @@ int rvu_mbox_handler_npc_get_field_hash_info(struct rvu *rvu, } /** - * rvu_npc_exact_mac2u64 - utility function to convert mac address to u64. - * @mac_addr: MAC address. - * Return: mdata for exact match table. - */ -static u64 rvu_npc_exact_mac2u64(u8 *mac_addr) -{ - u64 mac = 0; - int index; - - for (index = ETH_ALEN - 1; index >= 0; index--) - mac |= ((u64)*mac_addr++) << (8 * index); - - return mac; -} - -/** * rvu_exact_prepare_mdata - Make mdata for mcam entry * @mac: MAC address * @chan: Channel number. @@ -416,7 +400,7 @@ static u64 rvu_npc_exact_mac2u64(u8 *mac_addr) */ static u64 rvu_exact_prepare_mdata(u8 *mac, u16 chan, u16 ctype, u64 mask) { - u64 ldata = rvu_npc_exact_mac2u64(mac); + u64 ldata = ether_addr_to_u64(mac); /* Please note that mask is 48bit which excludes chan and ctype. * Increase mask bits if we need to include them as well. @@ -604,7 +588,7 @@ static u64 rvu_exact_prepare_table_entry(struct rvu *rvu, bool enable, u8 ctype, u16 chan, u8 *mac_addr) { - u64 ldata = rvu_npc_exact_mac2u64(mac_addr); + u64 ldata = ether_addr_to_u64(mac_addr); /* Enable or disable */ u64 mdata = FIELD_PREP(GENMASK_ULL(63, 63), enable ? 1 : 0); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c index 592b317f4637..854045ed3b06 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c @@ -158,6 +158,7 @@ void rvu_switch_enable(struct rvu *rvu) struct npc_mcam_alloc_entry_req alloc_req = { 0 }; struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 }; struct npc_delete_flow_req uninstall_req = { 0 }; + struct npc_delete_flow_rsp uninstall_rsp = { 0 }; struct npc_mcam_free_entry_req free_req = { 0 }; struct rvu_switch *rswitch = &rvu->rswitch; struct msg_rsp rsp; @@ -197,7 +198,7 @@ void rvu_switch_enable(struct rvu *rvu) uninstall_rules: uninstall_req.start = rswitch->start_entry; uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1; - rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp); + rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp); kfree(rswitch->entry2pcifunc); free_entries: free_req.all = 1; @@ -209,6 +210,7 @@ exit: void rvu_switch_disable(struct rvu *rvu) { struct npc_delete_flow_req uninstall_req = { 0 }; + struct npc_delete_flow_rsp uninstall_rsp = { 0 }; struct npc_mcam_free_entry_req free_req = { 0 }; struct rvu_switch *rswitch = &rvu->rswitch; struct rvu_hwinfo *hw = rvu->hw; @@ -250,7 +252,7 @@ void rvu_switch_disable(struct rvu *rvu) uninstall_req.start = rswitch->start_entry; uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1; free_req.all = 1; - rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp); + rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp); rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp); rswitch->used_entries = 0; kfree(rswitch->entry2pcifunc); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 77c8f650f7ac..8511906cb4e2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -7,6 +7,7 @@ #include <linux/interrupt.h> #include <linux/pci.h> +#include <net/page_pool/helpers.h> #include <net/tso.h> #include <linux/bitfield.h> @@ -774,6 +775,7 @@ int otx2_txsch_alloc(struct otx2_nic *pfvf) rsp->schq_list[lvl][schq]; pfvf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl; + pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio; return 0; } @@ -804,6 +806,7 @@ void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq) mutex_unlock(&pfvf->mbox.lock); } +EXPORT_SYMBOL(otx2_txschq_free_one); void otx2_txschq_stop(struct otx2_nic *pfvf) { @@ -1432,7 +1435,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, } pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP; - pp_params.pool_size = numptrs; + pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs); pp_params.nid = NUMA_NO_NODE; pp_params.dev = pfvf->dev; pp_params.dma_dir = DMA_FROM_DEVICE; @@ -1903,31 +1906,16 @@ int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t } } - if ((changed & NETIF_F_HW_TC) && tc) { - if (!pfvf->flow_cfg->max_flows) { - netdev_err(netdev, - "Can't enable TC, MCAM entries not allocated\n"); - return -EINVAL; - } - } - if ((changed & NETIF_F_HW_TC) && !tc && - pfvf->flow_cfg && pfvf->flow_cfg->nr_flows) { + otx2_tc_flower_rule_cnt(pfvf)) { netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n"); return -EBUSY; } if ((changed & NETIF_F_NTUPLE) && ntuple && - (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) { - netdev_err(netdev, - "Can't enable NTUPLE when TC is active, disable TC and retry\n"); - return -EINVAL; - } - - if ((changed & NETIF_F_HW_TC) && tc && - (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) { + otx2_tc_flower_rule_cnt(pfvf) && !(changed & NETIF_F_HW_TC)) { netdev_err(netdev, - "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n"); + "Can't enable NTUPLE when TC flower offload is active, disable TC rules and retry\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index ba8091131ec0..4c6032ee7800 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -224,6 +224,7 @@ struct otx2_hw { /* NIX */ u8 txschq_link_cfg_lvl; + u8 txschq_aggr_lvl_rr_prio; u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; u16 matchall_ipolicer; u32 dwrr_mtu; @@ -325,6 +326,7 @@ struct otx2_ptp { struct ptp_pin_desc extts_config; u64 (*convert_rx_ptp_tstmp)(u64 timestamp); u64 (*convert_tx_ptp_tstmp)(u64 timestamp); + u64 (*ptp_tstamp2nsec)(const struct timecounter *time_counter, u64 timestamp); struct delayed_work synctstamp_work; u64 tstamp; u32 base_ns; @@ -360,13 +362,8 @@ struct otx2_flow_config { struct list_head flow_list; u32 dmacflt_max_flows; u16 max_flows; -}; - -struct otx2_tc_info { - /* hash table to store TC offloaded flows */ - struct rhashtable flow_table; - struct rhashtable_params flow_ht_params; - unsigned long *tc_entries_bitmap; + struct list_head flow_list_tc; + bool ntuple; }; struct dev_hw_ops { @@ -491,7 +488,6 @@ struct otx2_nic { /* NPC MCAM */ struct otx2_flow_config *flow_cfg; struct otx2_mac_table *mac_table; - struct otx2_tc_info tc_info; u64 reset_count; struct work_struct reset_task; @@ -945,6 +941,15 @@ static inline u64 otx2_convert_rate(u64 rate) return converted_rate; } +static inline int otx2_tc_flower_rule_cnt(struct otx2_nic *pfvf) +{ + /* return here if MCAM entries not allocated */ + if (!pfvf->flow_cfg) + return 0; + + return pfvf->flow_cfg->nr_flows; +} + /* MSI-X APIs */ void otx2_free_cints(struct otx2_nic *pfvf, int n); void otx2_set_cints_affinity(struct otx2_nic *pfvf); @@ -1063,7 +1068,6 @@ int otx2_init_tc(struct otx2_nic *nic); void otx2_shutdown_tc(struct otx2_nic *nic); int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data); -int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic); /* CGX/RPM DMAC filters support */ int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf); int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c index ccaf97bb1ce0..bfddbff7bcdf 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c @@ -70,7 +70,7 @@ static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio) * link config level. These rest of the scheduler can be * same as hw.txschq_list. */ - for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++) + for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++) req->schq[lvl] = 1; rc = otx2_sync_mbox_msg(&pfvf->mbox); @@ -83,7 +83,7 @@ static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio) return PTR_ERR(rsp); /* Setup transmit scheduler list */ - for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++) { + for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++) { if (!rsp->schq[lvl]) return -ENOSPC; @@ -125,19 +125,12 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf) static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio) { - struct nix_txsch_free_req *free_req; + int lvl; - mutex_lock(&pfvf->mbox.lock); /* free PFC TLx nodes */ - free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox); - if (!free_req) { - mutex_unlock(&pfvf->mbox.lock); - return -ENOMEM; - } - - free_req->flags = TXSCHQ_FREE_ALL; - otx2_sync_mbox_msg(&pfvf->mbox); - mutex_unlock(&pfvf->mbox.lock); + for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++) + otx2_txschq_free_one(pfvf, lvl, + pfvf->pfc_schq_list[lvl][prio]); pfvf->pfc_alloc_status[prio] = false; return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c index 63ef7c41d18d..4e1130496573 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c @@ -41,7 +41,6 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id, return 0; otx2_alloc_mcam_entries(pfvf, ctx->val.vu16); - otx2_tc_alloc_ent_bitmap(pfvf); return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index c47d91da32dc..9efcec549834 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -764,6 +764,7 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc) struct otx2_nic *pfvf = netdev_priv(dev); int ret = -EOPNOTSUPP; + pfvf->flow_cfg->ntuple = ntuple; switch (nfc->cmd) { case ETHTOOL_SRXFH: ret = otx2_set_rss_hash_opts(pfvf, nfc); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 2d7713a1a153..4762dbea64a1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -276,6 +276,7 @@ int otx2vf_mcam_flow_init(struct otx2_nic *pfvf) flow_cfg = pfvf->flow_cfg; INIT_LIST_HEAD(&flow_cfg->flow_list); + INIT_LIST_HEAD(&flow_cfg->flow_list_tc); flow_cfg->max_flows = 0; return 0; @@ -298,6 +299,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) return -ENOMEM; INIT_LIST_HEAD(&pf->flow_cfg->flow_list); + INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc); /* Allocate bare minimum number of MCAM entries needed for * unicast and ntuple filters. diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 9551b422622a..70b9065f7d10 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -16,6 +16,7 @@ #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <linux/bitfield.h> +#include <net/page_pool/types.h> #include "otx2_reg.h" #include "otx2_common.h" @@ -2027,7 +2028,7 @@ u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb, #endif int txq; - qos_enabled = (netdev->real_num_tx_queues > pf->hw.tx_queues) ? true : false; + qos_enabled = netdev->real_num_tx_queues > pf->hw.tx_queues; if (unlikely(qos_enabled)) { /* This smp_load_acquire() pairs with smp_store_release() in * otx2_qos_root_add() called from htb offload root creation diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c index 896b2f9bac34..3a72b0793d4a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c @@ -10,6 +10,65 @@ #include "otx2_common.h" #include "otx2_ptp.h" +static bool is_tstmp_atomic_update_supported(struct otx2_ptp *ptp) +{ + struct ptp_get_cap_rsp *rsp; + struct msg_req *req; + int err; + + if (!ptp->nic) + return false; + + mutex_lock(&ptp->nic->mbox.lock); + req = otx2_mbox_alloc_msg_ptp_get_cap(&ptp->nic->mbox); + if (!req) { + mutex_unlock(&ptp->nic->mbox.lock); + return false; + } + + err = otx2_sync_mbox_msg(&ptp->nic->mbox); + if (err) { + mutex_unlock(&ptp->nic->mbox.lock); + return false; + } + rsp = (struct ptp_get_cap_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0, + &req->hdr); + mutex_unlock(&ptp->nic->mbox.lock); + + if (IS_ERR(rsp)) + return false; + + if (rsp->cap & PTP_CAP_HW_ATOMIC_UPDATE) + return true; + + return false; +} + +static int otx2_ptp_hw_adjtime(struct ptp_clock_info *ptp_info, s64 delta) +{ + struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, + ptp_info); + struct otx2_nic *pfvf = ptp->nic; + struct ptp_req *req; + int rc; + + if (!ptp->nic) + return -ENODEV; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + req->op = PTP_OP_ADJTIME; + req->delta = delta; + rc = otx2_sync_mbox_msg(&ptp->nic->mbox); + mutex_unlock(&pfvf->mbox.lock); + + return rc; +} + static u64 otx2_ptp_get_clock(struct otx2_ptp *ptp) { struct ptp_req *req; @@ -37,6 +96,49 @@ static u64 otx2_ptp_get_clock(struct otx2_ptp *ptp) return rsp->clk; } +static int otx2_ptp_hw_gettime(struct ptp_clock_info *ptp_info, + struct timespec64 *ts) +{ + struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, + ptp_info); + u64 tstamp; + + tstamp = otx2_ptp_get_clock(ptp); + + *ts = ns_to_timespec64(tstamp); + return 0; +} + +static int otx2_ptp_hw_settime(struct ptp_clock_info *ptp_info, + const struct timespec64 *ts) +{ + struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, + ptp_info); + struct otx2_nic *pfvf = ptp->nic; + struct ptp_req *req; + u64 nsec; + int rc; + + if (!ptp->nic) + return -ENODEV; + + nsec = timespec64_to_ns(ts); + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + req->op = PTP_OP_SET_CLOCK; + req->clk = nsec; + rc = otx2_sync_mbox_msg(&ptp->nic->mbox); + mutex_unlock(&pfvf->mbox.lock); + + return rc; +} + static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, @@ -124,16 +226,7 @@ static u64 ptp_tstmp_read(struct otx2_ptp *ptp) return rsp->clk; } -static void otx2_get_ptpclock(struct otx2_ptp *ptp, u64 *tstamp) -{ - struct otx2_nic *pfvf = ptp->nic; - - mutex_lock(&pfvf->mbox.lock); - *tstamp = timecounter_read(&ptp->time_counter); - mutex_unlock(&pfvf->mbox.lock); -} - -static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) +static int otx2_ptp_tc_adjtime(struct ptp_clock_info *ptp_info, s64 delta) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, ptp_info); @@ -146,32 +239,33 @@ static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) return 0; } -static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info, - struct timespec64 *ts) +static int otx2_ptp_tc_gettime(struct ptp_clock_info *ptp_info, + struct timespec64 *ts) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, ptp_info); u64 tstamp; - otx2_get_ptpclock(ptp, &tstamp); + mutex_lock(&ptp->nic->mbox.lock); + tstamp = timecounter_read(&ptp->time_counter); + mutex_unlock(&ptp->nic->mbox.lock); *ts = ns_to_timespec64(tstamp); return 0; } -static int otx2_ptp_settime(struct ptp_clock_info *ptp_info, - const struct timespec64 *ts) +static int otx2_ptp_tc_settime(struct ptp_clock_info *ptp_info, + const struct timespec64 *ts) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, ptp_info); - struct otx2_nic *pfvf = ptp->nic; u64 nsec; nsec = timespec64_to_ns(ts); - mutex_lock(&pfvf->mbox.lock); + mutex_lock(&ptp->nic->mbox.lock); timecounter_init(&ptp->time_counter, &ptp->cycle_counter, nsec); - mutex_unlock(&pfvf->mbox.lock); + mutex_unlock(&ptp->nic->mbox.lock); return 0; } @@ -190,6 +284,12 @@ static int otx2_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, return 0; } +static u64 otx2_ptp_hw_tstamp2time(const struct timecounter *time_counter, u64 tstamp) +{ + /* On HW which supports atomic updates, timecounter is not initialized */ + return tstamp; +} + static void otx2_ptp_extts_check(struct work_struct *work) { struct otx2_ptp *ptp = container_of(work, struct otx2_ptp, @@ -204,7 +304,7 @@ static void otx2_ptp_extts_check(struct work_struct *work) if (tstmp != ptp->last_extts) { event.type = PTP_CLOCK_EXTTS; event.index = 0; - event.timestamp = timecounter_cyc2time(&ptp->time_counter, tstmp); + event.timestamp = ptp->ptp_tstamp2nsec(&ptp->time_counter, tstmp); ptp_clock_event(ptp->ptp_clock, &event); new_thresh = tstmp % 500000000; if (ptp->thresh != new_thresh) { @@ -229,7 +329,7 @@ static void otx2_sync_tstamp(struct work_struct *work) tstamp = otx2_ptp_get_clock(ptp); mutex_unlock(&pfvf->mbox.lock); - ptp->tstamp = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp); + ptp->tstamp = ptp->ptp_tstamp2nsec(&ptp->time_counter, tstamp); ptp->base_ns = tstamp % NSEC_PER_SEC; schedule_delayed_work(&ptp->synctstamp_work, msecs_to_jiffies(250)); @@ -302,15 +402,6 @@ int otx2_ptp_init(struct otx2_nic *pfvf) ptp_ptr->nic = pfvf; - cc = &ptp_ptr->cycle_counter; - cc->read = ptp_cc_read; - cc->mask = CYCLECOUNTER_MASK(64); - cc->mult = 1; - cc->shift = 0; - - timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter, - ktime_to_ns(ktime_get_real())); - snprintf(ptp_ptr->extts_config.name, sizeof(ptp_ptr->extts_config.name), "TSTAMP"); ptp_ptr->extts_config.index = 0; ptp_ptr->extts_config.func = PTP_PF_NONE; @@ -324,13 +415,33 @@ int otx2_ptp_init(struct otx2_nic *pfvf) .pps = 0, .pin_config = &ptp_ptr->extts_config, .adjfine = otx2_ptp_adjfine, - .adjtime = otx2_ptp_adjtime, - .gettime64 = otx2_ptp_gettime, - .settime64 = otx2_ptp_settime, .enable = otx2_ptp_enable, .verify = otx2_ptp_verify_pin, }; + /* Check whether hardware supports atomic updates to timestamp */ + if (is_tstmp_atomic_update_supported(ptp_ptr)) { + ptp_ptr->ptp_info.adjtime = otx2_ptp_hw_adjtime; + ptp_ptr->ptp_info.gettime64 = otx2_ptp_hw_gettime; + ptp_ptr->ptp_info.settime64 = otx2_ptp_hw_settime; + + ptp_ptr->ptp_tstamp2nsec = otx2_ptp_hw_tstamp2time; + } else { + ptp_ptr->ptp_info.adjtime = otx2_ptp_tc_adjtime; + ptp_ptr->ptp_info.gettime64 = otx2_ptp_tc_gettime; + ptp_ptr->ptp_info.settime64 = otx2_ptp_tc_settime; + + cc = &ptp_ptr->cycle_counter; + cc->read = ptp_cc_read; + cc->mask = CYCLECOUNTER_MASK(64); + cc->mult = 1; + cc->shift = 0; + ptp_ptr->ptp_tstamp2nsec = timecounter_cyc2time; + + timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter, + ktime_to_ns(ktime_get_real())); + } + INIT_DELAYED_WORK(&ptp_ptr->extts_work, otx2_ptp_extts_check); ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev); @@ -387,7 +498,7 @@ int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns) if (!pfvf->ptp) return -ENODEV; - *tsns = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp); + *tsns = pfvf->ptp->ptp_tstamp2nsec(&pfvf->ptp->time_counter, tstamp); return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index 5e56b6c3e60a..fab9d85bfb37 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -34,9 +34,8 @@ struct otx2_tc_flow_stats { }; struct otx2_tc_flow { - struct rhash_head node; + struct list_head list; unsigned long cookie; - unsigned int bitpos; struct rcu_head rcu; struct otx2_tc_flow_stats stats; spinlock_t lock; /* lock for stats */ @@ -44,31 +43,10 @@ struct otx2_tc_flow { u16 entry; u16 leaf_profile; bool is_act_police; + u32 prio; + struct npc_install_flow_req req; }; -int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic) -{ - struct otx2_tc_info *tc = &nic->tc_info; - - if (!nic->flow_cfg->max_flows) - return 0; - - /* Max flows changed, free the existing bitmap */ - kfree(tc->tc_entries_bitmap); - - tc->tc_entries_bitmap = - kcalloc(BITS_TO_LONGS(nic->flow_cfg->max_flows), - sizeof(long), GFP_KERNEL); - if (!tc->tc_entries_bitmap) { - netdev_err(nic->netdev, - "Unable to alloc TC flow entries bitmap\n"); - return -ENOMEM; - } - - return 0; -} -EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap); - static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst, u32 *burst_exp, u32 *burst_mantissa) { @@ -461,6 +439,62 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic, return 0; } +static int otx2_tc_process_vlan(struct otx2_nic *nic, struct flow_msg *flow_spec, + struct flow_msg *flow_mask, struct flow_rule *rule, + struct npc_install_flow_req *req, bool is_inner) +{ + struct flow_match_vlan match; + u16 vlan_tci, vlan_tci_mask; + + if (is_inner) + flow_rule_match_cvlan(rule, &match); + else + flow_rule_match_vlan(rule, &match); + + if (!eth_type_vlan(match.key->vlan_tpid)) { + netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n", + ntohs(match.key->vlan_tpid)); + return -EOPNOTSUPP; + } + + if (!match.mask->vlan_id) { + struct flow_action_entry *act; + int i; + + flow_action_for_each(i, act, &rule->action) { + if (act->id == FLOW_ACTION_DROP) { + netdev_err(nic->netdev, + "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n", + ntohs(match.key->vlan_tpid), match.key->vlan_id); + return -EOPNOTSUPP; + } + } + } + + if (match.mask->vlan_id || + match.mask->vlan_dei || + match.mask->vlan_priority) { + vlan_tci = match.key->vlan_id | + match.key->vlan_dei << 12 | + match.key->vlan_priority << 13; + + vlan_tci_mask = match.mask->vlan_id | + match.mask->vlan_dei << 12 | + match.mask->vlan_priority << 13; + if (is_inner) { + flow_spec->vlan_itci = htons(vlan_tci); + flow_mask->vlan_itci = htons(vlan_tci_mask); + req->features |= BIT_ULL(NPC_INNER_VID); + } else { + flow_spec->vlan_tci = htons(vlan_tci); + flow_mask->vlan_tci = htons(vlan_tci_mask); + req->features |= BIT_ULL(NPC_OUTER_VID); + } + } + + return 0; +} + static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, struct flow_cls_offload *f, struct npc_install_flow_req *req) @@ -476,15 +510,17 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, dissector = rule->match.dissector; if ((dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_VLAN) | - BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_PORTS) | - BIT(FLOW_DISSECTOR_KEY_IP)))) { - netdev_info(nic->netdev, "unsupported flow used key 0x%x", + ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | + BIT(FLOW_DISSECTOR_KEY_CVLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | + BIT(FLOW_DISSECTOR_KEY_IPSEC) | + BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) { + netdev_info(nic->netdev, "unsupported flow used key 0x%llx", dissector->used_keys); return -EOPNOTSUPP; } @@ -504,6 +540,8 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, match.key->ip_proto != IPPROTO_UDP && match.key->ip_proto != IPPROTO_SCTP && match.key->ip_proto != IPPROTO_ICMP && + match.key->ip_proto != IPPROTO_ESP && + match.key->ip_proto != IPPROTO_AH && match.key->ip_proto != IPPROTO_ICMPV6)) { netdev_info(nic->netdev, "ip_proto=0x%x not supported\n", @@ -523,6 +561,10 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, req->features |= BIT_ULL(NPC_IPPROTO_ICMP); else if (ip_proto == IPPROTO_ICMPV6) req->features |= BIT_ULL(NPC_IPPROTO_ICMP6); + else if (ip_proto == IPPROTO_ESP) + req->features |= BIT_ULL(NPC_IPPROTO_ESP); + else if (ip_proto == IPPROTO_AH) + req->features |= BIT_ULL(NPC_IPPROTO_AH); } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { @@ -567,6 +609,26 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, } } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPSEC)) { + struct flow_match_ipsec match; + + flow_rule_match_ipsec(rule, &match); + if (!match.mask->spi) { + NL_SET_ERR_MSG_MOD(extack, "spi index not specified"); + return -EOPNOTSUPP; + } + if (ip_proto != IPPROTO_ESP && + ip_proto != IPPROTO_AH) { + NL_SET_ERR_MSG_MOD(extack, + "SPI index is valid only for ESP/AH proto"); + return -EOPNOTSUPP; + } + + flow_spec->spi = match.key->spi; + flow_mask->spi = match.mask->spi; + req->features |= BIT_ULL(NPC_IPSEC_SPI); + } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { struct flow_match_ip match; @@ -586,47 +648,19 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { - struct flow_match_vlan match; - u16 vlan_tci, vlan_tci_mask; - - flow_rule_match_vlan(rule, &match); - - if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) { - netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n", - ntohs(match.key->vlan_tpid)); - return -EOPNOTSUPP; - } + int ret; - if (!match.mask->vlan_id) { - struct flow_action_entry *act; - int i; - - flow_action_for_each(i, act, &rule->action) { - if (act->id == FLOW_ACTION_DROP) { - netdev_err(nic->netdev, - "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n", - ntohs(match.key->vlan_tpid), - match.key->vlan_id); - return -EOPNOTSUPP; - } - } - } - - if (match.mask->vlan_id || - match.mask->vlan_dei || - match.mask->vlan_priority) { - vlan_tci = match.key->vlan_id | - match.key->vlan_dei << 12 | - match.key->vlan_priority << 13; + ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, false); + if (ret) + return ret; + } - vlan_tci_mask = match.mask->vlan_id | - match.mask->vlan_dei << 12 | - match.mask->vlan_priority << 13; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { + int ret; - flow_spec->vlan_tci = htons(vlan_tci); - flow_mask->vlan_tci = htons(vlan_tci_mask); - req->features |= BIT_ULL(NPC_OUTER_VID); - } + ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, true); + if (ret) + return ret; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { @@ -707,8 +741,117 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, return otx2_tc_parse_actions(nic, &rule->action, req, f, node); } -static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry) +static void otx2_destroy_tc_flow_list(struct otx2_nic *pfvf) +{ + struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; + struct otx2_tc_flow *iter, *tmp; + + if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) + return; + + list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) { + list_del(&iter->list); + kfree(iter); + flow_cfg->nr_flows--; + } +} + +static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg, + unsigned long cookie) { + struct otx2_tc_flow *tmp; + + list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { + if (tmp->cookie == cookie) + return tmp; + } + + return NULL; +} + +static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg, + int index) +{ + struct otx2_tc_flow *tmp; + int i = 0; + + list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { + if (i == index) + return tmp; + i++; + } + + return NULL; +} + +static void otx2_tc_del_from_flow_list(struct otx2_flow_config *flow_cfg, + struct otx2_tc_flow *node) +{ + struct list_head *pos, *n; + struct otx2_tc_flow *tmp; + + list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { + tmp = list_entry(pos, struct otx2_tc_flow, list); + if (node == tmp) { + list_del(&node->list); + return; + } + } +} + +static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg, + struct otx2_tc_flow *node) +{ + struct list_head *pos, *n; + struct otx2_tc_flow *tmp; + int index = 0; + + /* If the flow list is empty then add the new node */ + if (list_empty(&flow_cfg->flow_list_tc)) { + list_add(&node->list, &flow_cfg->flow_list_tc); + return index; + } + + list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { + tmp = list_entry(pos, struct otx2_tc_flow, list); + if (node->prio < tmp->prio) + break; + index++; + } + + list_add(&node->list, pos->prev); + return index; +} + +static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req) +{ + struct npc_install_flow_req *tmp_req; + int err; + + mutex_lock(&nic->mbox.lock); + tmp_req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); + if (!tmp_req) { + mutex_unlock(&nic->mbox.lock); + return -ENOMEM; + } + + memcpy(tmp_req, req, sizeof(struct npc_install_flow_req)); + /* Send message to AF */ + err = otx2_sync_mbox_msg(&nic->mbox); + if (err) { + netdev_err(nic->netdev, "Failed to install MCAM flow entry %d\n", + req->entry); + mutex_unlock(&nic->mbox.lock); + return -EFAULT; + } + + mutex_unlock(&nic->mbox.lock); + return 0; +} + +static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val) +{ + struct npc_delete_flow_rsp *rsp; struct npc_delete_flow_req *req; int err; @@ -729,22 +872,113 @@ static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry) mutex_unlock(&nic->mbox.lock); return -EFAULT; } + + if (cntr_val) { + rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox, + 0, &req->hdr); + if (IS_ERR(rsp)) { + netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n", + entry); + mutex_unlock(&nic->mbox.lock); + return -EFAULT; + } + + *cntr_val = rsp->cntr_val; + } + mutex_unlock(&nic->mbox.lock); + return 0; +} + +static int otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic, + struct otx2_flow_config *flow_cfg, + struct otx2_tc_flow *node) +{ + struct list_head *pos, *n; + struct otx2_tc_flow *tmp; + int i = 0, index = 0; + u16 cntr_val = 0; + + /* Find and delete the entry from the list and re-install + * all the entries from beginning to the index of the + * deleted entry to higher mcam indexes. + */ + list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { + tmp = list_entry(pos, struct otx2_tc_flow, list); + if (node == tmp) { + list_del(&tmp->list); + break; + } + + otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); + tmp->entry++; + tmp->req.entry = tmp->entry; + tmp->req.cntr_val = cntr_val; + index++; + } + + list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { + if (i == index) + break; + + tmp = list_entry(pos, struct otx2_tc_flow, list); + otx2_add_mcam_flow_entry(nic, &tmp->req); + i++; + } return 0; } +static int otx2_tc_update_mcam_table_add_req(struct otx2_nic *nic, + struct otx2_flow_config *flow_cfg, + struct otx2_tc_flow *node) +{ + int mcam_idx = flow_cfg->max_flows - flow_cfg->nr_flows - 1; + struct otx2_tc_flow *tmp; + int list_idx, i; + u16 cntr_val = 0; + + /* Find the index of the entry(list_idx) whose priority + * is greater than the new entry and re-install all + * the entries from beginning to list_idx to higher + * mcam indexes. + */ + list_idx = otx2_tc_add_to_flow_list(flow_cfg, node); + for (i = 0; i < list_idx; i++) { + tmp = otx2_tc_get_entry_by_index(flow_cfg, i); + if (!tmp) + return -ENOMEM; + + otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); + tmp->entry = flow_cfg->flow_ent[mcam_idx]; + tmp->req.entry = tmp->entry; + tmp->req.cntr_val = cntr_val; + otx2_add_mcam_flow_entry(nic, &tmp->req); + mcam_idx++; + } + + return mcam_idx; +} + +static int otx2_tc_update_mcam_table(struct otx2_nic *nic, + struct otx2_flow_config *flow_cfg, + struct otx2_tc_flow *node, + bool add_req) +{ + if (add_req) + return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node); + + return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node); +} + static int otx2_tc_del_flow(struct otx2_nic *nic, struct flow_cls_offload *tc_flow_cmd) { struct otx2_flow_config *flow_cfg = nic->flow_cfg; - struct otx2_tc_info *tc_info = &nic->tc_info; struct otx2_tc_flow *flow_node; int err; - flow_node = rhashtable_lookup_fast(&tc_info->flow_table, - &tc_flow_cmd->cookie, - tc_info->flow_ht_params); + flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); if (!flow_node) { netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n", tc_flow_cmd->cookie); @@ -772,16 +1006,10 @@ static int otx2_tc_del_flow(struct otx2_nic *nic, mutex_unlock(&nic->mbox.lock); } - otx2_del_mcam_flow_entry(nic, flow_node->entry); - - WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table, - &flow_node->node, - nic->tc_info.flow_ht_params)); + otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL); + otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false); kfree_rcu(flow_node, rcu); - - clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap); flow_cfg->nr_flows--; - return 0; } @@ -790,15 +1018,14 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, { struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; struct otx2_flow_config *flow_cfg = nic->flow_cfg; - struct otx2_tc_info *tc_info = &nic->tc_info; struct otx2_tc_flow *new_node, *old_node; struct npc_install_flow_req *req, dummy; - int rc, err; + int rc, err, mcam_idx; if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) return -ENOMEM; - if (bitmap_full(tc_info->tc_entries_bitmap, flow_cfg->max_flows)) { + if (flow_cfg->nr_flows == flow_cfg->max_flows) { NL_SET_ERR_MSG_MOD(extack, "Free MCAM entry not available to add the flow"); return -ENOMEM; @@ -810,6 +1037,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, return -ENOMEM; spin_lock_init(&new_node->lock); new_node->cookie = tc_flow_cmd->cookie; + new_node->prio = tc_flow_cmd->common.prio; memset(&dummy, 0, sizeof(struct npc_install_flow_req)); @@ -820,12 +1048,11 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, } /* If a flow exists with the same cookie, delete it */ - old_node = rhashtable_lookup_fast(&tc_info->flow_table, - &tc_flow_cmd->cookie, - tc_info->flow_ht_params); + old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); if (old_node) otx2_tc_del_flow(nic, tc_flow_cmd); + mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true); mutex_lock(&nic->mbox.lock); req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); if (!req) { @@ -836,11 +1063,8 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr)); memcpy(req, &dummy, sizeof(struct npc_install_flow_req)); - - new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap, - flow_cfg->max_flows); req->channel = nic->hw.rx_chan_base; - req->entry = flow_cfg->flow_ent[flow_cfg->max_flows - new_node->bitpos - 1]; + req->entry = flow_cfg->flow_ent[mcam_idx]; req->intf = NIX_INTF_RX; req->set_cntr = 1; new_node->entry = req->entry; @@ -850,26 +1074,18 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, if (rc) { NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry"); mutex_unlock(&nic->mbox.lock); - kfree_rcu(new_node, rcu); goto free_leaf; } - mutex_unlock(&nic->mbox.lock); - /* add new flow to flow-table */ - rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node, - nic->tc_info.flow_ht_params); - if (rc) { - otx2_del_mcam_flow_entry(nic, req->entry); - kfree_rcu(new_node, rcu); - goto free_leaf; - } + mutex_unlock(&nic->mbox.lock); + memcpy(&new_node->req, req, sizeof(struct npc_install_flow_req)); - set_bit(new_node->bitpos, tc_info->tc_entries_bitmap); flow_cfg->nr_flows++; - return 0; free_leaf: + otx2_tc_del_from_flow_list(flow_cfg, new_node); + kfree_rcu(new_node, rcu); if (new_node->is_act_police) { mutex_lock(&nic->mbox.lock); @@ -896,16 +1112,13 @@ free_leaf: static int otx2_tc_get_flow_stats(struct otx2_nic *nic, struct flow_cls_offload *tc_flow_cmd) { - struct otx2_tc_info *tc_info = &nic->tc_info; struct npc_mcam_get_stats_req *req; struct npc_mcam_get_stats_rsp *rsp; struct otx2_tc_flow_stats *stats; struct otx2_tc_flow *flow_node; int err; - flow_node = rhashtable_lookup_fast(&tc_info->flow_table, - &tc_flow_cmd->cookie, - tc_info->flow_ht_params); + flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie); if (!flow_node) { netdev_info(nic->netdev, "tc flow not found for cookie %lx", tc_flow_cmd->cookie); @@ -1053,12 +1266,20 @@ static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { struct otx2_nic *nic = cb_priv; + bool ntuple; if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) return -EOPNOTSUPP; + ntuple = nic->netdev->features & NETIF_F_NTUPLE; switch (type) { case TC_SETUP_CLSFLOWER: + if (ntuple) { + netdev_warn(nic->netdev, + "Can't install TC flower offload rule when NTUPLE is active"); + return -EOPNOTSUPP; + } + return otx2_setup_tc_cls_flower(nic, type_data); case TC_SETUP_CLSMATCHALL: return otx2_setup_tc_ingress_matchall(nic, type_data); @@ -1143,18 +1364,8 @@ int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, } EXPORT_SYMBOL(otx2_setup_tc); -static const struct rhashtable_params tc_flow_ht_params = { - .head_offset = offsetof(struct otx2_tc_flow, node), - .key_offset = offsetof(struct otx2_tc_flow, cookie), - .key_len = sizeof(((struct otx2_tc_flow *)0)->cookie), - .automatic_shrinking = true, -}; - int otx2_init_tc(struct otx2_nic *nic) { - struct otx2_tc_info *tc = &nic->tc_info; - int err; - /* Exclude receive queue 0 being used for police action */ set_bit(0, &nic->rq_bmap); @@ -1164,25 +1375,12 @@ int otx2_init_tc(struct otx2_nic *nic) return -EINVAL; } - err = otx2_tc_alloc_ent_bitmap(nic); - if (err) - return err; - - tc->flow_ht_params = tc_flow_ht_params; - err = rhashtable_init(&tc->flow_table, &tc->flow_ht_params); - if (err) { - kfree(tc->tc_entries_bitmap); - tc->tc_entries_bitmap = NULL; - } - return err; + return 0; } EXPORT_SYMBOL(otx2_init_tc); void otx2_shutdown_tc(struct otx2_nic *nic) { - struct otx2_tc_info *tc = &nic->tc_info; - - kfree(tc->tc_entries_bitmap); - rhashtable_destroy(&tc->flow_table); + otx2_destroy_tc_flow_list(nic); } EXPORT_SYMBOL(otx2_shutdown_tc); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index b5d689eeff80..9e3bfbe5c480 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -23,6 +23,8 @@ #define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN) #define OTX2_MIN_MTU 60 +#define OTX2_PAGE_POOL_SZ 2048 + #define OTX2_MAX_GSO_SEGS 255 #define OTX2_MAX_FRAGS_IN_SQE 9 diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c index d3a76c5ccda8..1e77bbf5d22a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c @@ -19,6 +19,9 @@ #define OTX2_QOS_CLASS_NONE 0 #define OTX2_QOS_DEFAULT_PRIO 0xF #define OTX2_QOS_INVALID_SQ 0xFFFF +#define OTX2_QOS_INVALID_TXSCHQ_IDX 0xFFFF +#define CN10K_MAX_RR_WEIGHT GENMASK_ULL(13, 0) +#define OTX2_MAX_RR_QUANTUM GENMASK_ULL(23, 0) static void otx2_qos_update_tx_netdev_queues(struct otx2_nic *pfvf) { @@ -65,11 +68,24 @@ static void otx2_qos_get_regaddr(struct otx2_qos_node *node, } } +static int otx2_qos_quantum_to_dwrr_weight(struct otx2_nic *pfvf, u32 quantum) +{ + u32 weight; + + weight = quantum / pfvf->hw.dwrr_mtu; + if (quantum % pfvf->hw.dwrr_mtu) + weight += 1; + + return weight; +} + static void otx2_config_sched_shaping(struct otx2_nic *pfvf, struct otx2_qos_node *node, struct nix_txschq_config *cfg, int *num_regs) { + u32 rr_weight; + u32 quantum; u64 maxrate; otx2_qos_get_regaddr(node, cfg, *num_regs); @@ -86,8 +102,17 @@ static void otx2_config_sched_shaping(struct otx2_nic *pfvf, return; } - /* configure priority */ - cfg->regval[*num_regs] = (node->schq - node->parent->prio_anchor) << 24; + /* configure priority/quantum */ + if (node->is_static) { + cfg->regval[*num_regs] = + (node->schq - node->parent->prio_anchor) << 24; + } else { + quantum = node->quantum ? + node->quantum : pfvf->tx_max_pktlen; + rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum); + cfg->regval[*num_regs] = node->parent->child_dwrr_prio << 24 | + rr_weight; + } (*num_regs)++; /* configure PIR */ @@ -195,9 +220,8 @@ static int otx2_qos_txschq_set_parent_topology(struct otx2_nic *pfvf, cfg->reg[0] = NIX_AF_TL1X_TOPOLOGY(parent->schq); cfg->regval[0] = (u64)parent->prio_anchor << 32; - if (parent->level == NIX_TXSCH_LVL_TL1) - cfg->regval[0] |= (u64)TXSCH_TL1_DFLT_RR_PRIO << 1; - + cfg->regval[0] |= ((parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) ? + parent->child_dwrr_prio : 0) << 1; cfg->num_regs++; rc = otx2_sync_mbox_msg(&pfvf->mbox); @@ -315,9 +339,14 @@ static void otx2_qos_fill_cfg_tl(struct otx2_qos_node *parent, list_for_each_entry(node, &parent->child_list, list) { otx2_qos_fill_cfg_tl(node, cfg); - cfg->schq_contig[node->level]++; otx2_qos_fill_cfg_schq(node, cfg); } + + /* Assign the required number of transmit schedular queues under the + * given class + */ + cfg->schq_contig[parent->level - 1] += parent->child_dwrr_cnt + + parent->max_static_prio + 1; } static void otx2_qos_prepare_txschq_cfg(struct otx2_nic *pfvf, @@ -378,10 +407,12 @@ otx2_qos_alloc_root(struct otx2_nic *pfvf) return ERR_PTR(-ENOMEM); node->parent = NULL; - if (!is_otx2_vf(pfvf->pcifunc)) + if (!is_otx2_vf(pfvf->pcifunc)) { node->level = NIX_TXSCH_LVL_TL1; - else + } else { node->level = NIX_TXSCH_LVL_TL2; + node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO; + } WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER); node->classid = OTX2_QOS_ROOT_CLASSID; @@ -401,9 +432,13 @@ static int otx2_qos_add_child_node(struct otx2_qos_node *parent, struct otx2_qos_node *tmp_node; struct list_head *tmp; + if (node->prio > parent->max_static_prio) + parent->max_static_prio = node->prio; + for (tmp = head->next; tmp != head; tmp = tmp->next) { tmp_node = list_entry(tmp, struct otx2_qos_node, list); - if (tmp_node->prio == node->prio) + if (tmp_node->prio == node->prio && + tmp_node->is_static) return -EEXIST; if (tmp_node->prio > node->prio) { list_add_tail(&node->list, tmp); @@ -434,6 +469,10 @@ static int otx2_qos_alloc_txschq_node(struct otx2_nic *pfvf, txschq_node->rate = 0; txschq_node->ceil = 0; txschq_node->prio = 0; + txschq_node->quantum = 0; + txschq_node->is_static = true; + txschq_node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO; + txschq_node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX; mutex_lock(&pfvf->qos.qos_lock); list_add_tail(&txschq_node->list, &node->child_schq_list); @@ -459,7 +498,7 @@ static struct otx2_qos_node * otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf, struct otx2_qos_node *parent, u16 classid, u32 prio, u64 rate, u64 ceil, - u16 qid) + u32 quantum, u16 qid, bool static_cfg) { struct otx2_qos_node *node; int err; @@ -476,6 +515,10 @@ otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf, node->rate = otx2_convert_rate(rate); node->ceil = otx2_convert_rate(ceil); node->prio = prio; + node->quantum = quantum; + node->is_static = static_cfg; + node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO; + node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX; __set_bit(qid, pfvf->qos.qos_sq_bmap); @@ -622,12 +665,28 @@ static int otx2_qos_txschq_alloc(struct otx2_nic *pfvf, } pfvf->qos.link_cfg_lvl = rsp->link_cfg_lvl; + pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio; out: mutex_unlock(&mbox->lock); return rc; } +static void otx2_qos_free_unused_txschq(struct otx2_nic *pfvf, + struct otx2_qos_cfg *cfg) +{ + int lvl, idx, schq; + + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) { + if (!cfg->schq_index_used[lvl][idx]) { + schq = cfg->schq_contig_list[lvl][idx]; + otx2_txschq_free_one(pfvf, lvl, schq); + } + } + } +} + static void otx2_qos_txschq_fill_cfg_schq(struct otx2_nic *pfvf, struct otx2_qos_node *node, struct otx2_qos_cfg *cfg) @@ -652,9 +711,11 @@ static void otx2_qos_txschq_fill_cfg_tl(struct otx2_nic *pfvf, list_for_each_entry(tmp, &node->child_list, list) { otx2_qos_txschq_fill_cfg_tl(pfvf, tmp, cfg); cnt = cfg->static_node_pos[tmp->level]; - tmp->schq = cfg->schq_contig_list[tmp->level][cnt]; + tmp->schq = cfg->schq_contig_list[tmp->level][tmp->txschq_idx]; + cfg->schq_index_used[tmp->level][tmp->txschq_idx] = true; if (cnt == 0) - node->prio_anchor = tmp->schq; + node->prio_anchor = + cfg->schq_contig_list[tmp->level][0]; cfg->static_node_pos[tmp->level]++; otx2_qos_txschq_fill_cfg_schq(pfvf, tmp, cfg); } @@ -667,9 +728,87 @@ static void otx2_qos_txschq_fill_cfg(struct otx2_nic *pfvf, mutex_lock(&pfvf->qos.qos_lock); otx2_qos_txschq_fill_cfg_tl(pfvf, node, cfg); otx2_qos_txschq_fill_cfg_schq(pfvf, node, cfg); + otx2_qos_free_unused_txschq(pfvf, cfg); mutex_unlock(&pfvf->qos.qos_lock); } +static void __otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf, + struct otx2_qos_node *tmp, + unsigned long *child_idx_bmap, + int child_cnt) +{ + int idx; + + if (tmp->txschq_idx != OTX2_QOS_INVALID_TXSCHQ_IDX) + return; + + /* assign static nodes 1:1 prio mapping first, then remaining nodes */ + for (idx = 0; idx < child_cnt; idx++) { + if (tmp->is_static && tmp->prio == idx && + !test_bit(idx, child_idx_bmap)) { + tmp->txschq_idx = idx; + set_bit(idx, child_idx_bmap); + return; + } else if (!tmp->is_static && idx >= tmp->prio && + !test_bit(idx, child_idx_bmap)) { + tmp->txschq_idx = idx; + set_bit(idx, child_idx_bmap); + return; + } + } +} + +static int otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf, + struct otx2_qos_node *node) +{ + unsigned long *child_idx_bmap; + struct otx2_qos_node *tmp; + int child_cnt; + + list_for_each_entry(tmp, &node->child_list, list) + tmp->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX; + + /* allocate child index array */ + child_cnt = node->child_dwrr_cnt + node->max_static_prio + 1; + child_idx_bmap = kcalloc(BITS_TO_LONGS(child_cnt), + sizeof(unsigned long), + GFP_KERNEL); + if (!child_idx_bmap) + return -ENOMEM; + + list_for_each_entry(tmp, &node->child_list, list) + otx2_qos_assign_base_idx_tl(pfvf, tmp); + + /* assign base index of static priority children first */ + list_for_each_entry(tmp, &node->child_list, list) { + if (!tmp->is_static) + continue; + __otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap, + child_cnt); + } + + /* assign base index of dwrr priority children */ + list_for_each_entry(tmp, &node->child_list, list) + __otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap, + child_cnt); + + kfree(child_idx_bmap); + + return 0; +} + +static int otx2_qos_assign_base_idx(struct otx2_nic *pfvf, + struct otx2_qos_node *node) +{ + int ret = 0; + + mutex_lock(&pfvf->qos.qos_lock); + ret = otx2_qos_assign_base_idx_tl(pfvf, node); + mutex_unlock(&pfvf->qos.qos_lock); + + return ret; +} + static int otx2_qos_txschq_push_cfg_schq(struct otx2_nic *pfvf, struct otx2_qos_node *node, struct otx2_qos_cfg *cfg) @@ -761,8 +900,10 @@ static void otx2_qos_free_cfg(struct otx2_nic *pfvf, struct otx2_qos_cfg *cfg) for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) { - schq = cfg->schq_contig_list[lvl][idx]; - otx2_txschq_free_one(pfvf, lvl, schq); + if (cfg->schq_index_used[lvl][idx]) { + schq = cfg->schq_contig_list[lvl][idx]; + otx2_txschq_free_one(pfvf, lvl, schq); + } } } } @@ -838,6 +979,10 @@ static int otx2_qos_push_txschq_cfg(struct otx2_nic *pfvf, if (ret) return -ENOSPC; + ret = otx2_qos_assign_base_idx(pfvf, node); + if (ret) + return -ENOMEM; + if (!(pfvf->netdev->flags & IFF_UP)) { otx2_qos_txschq_fill_cfg(pfvf, node, cfg); return 0; @@ -894,6 +1039,13 @@ static int otx2_qos_root_add(struct otx2_nic *pfvf, u16 htb_maj_id, u16 htb_defc goto free_root_node; } + /* Update TL1 RR PRIO */ + if (root->level == NIX_TXSCH_LVL_TL1) { + root->child_dwrr_prio = pfvf->hw.txschq_aggr_lvl_rr_prio; + netdev_dbg(pfvf->netdev, + "TL1 DWRR Priority %d\n", root->child_dwrr_prio); + } + if (!(pfvf->netdev->flags & IFF_UP) || root->level == NIX_TXSCH_LVL_TL1) { root->schq = new_cfg->schq_list[root->level][0]; @@ -940,37 +1092,126 @@ static int otx2_qos_root_destroy(struct otx2_nic *pfvf) return 0; } +static int otx2_qos_validate_quantum(struct otx2_nic *pfvf, u32 quantum) +{ + u32 rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum); + int err = 0; + + /* Max Round robin weight supported by octeontx2 and CN10K + * is different. Validate accordingly + */ + if (is_dev_otx2(pfvf->pdev)) + err = (rr_weight > OTX2_MAX_RR_QUANTUM) ? -EINVAL : 0; + else if (rr_weight > CN10K_MAX_RR_WEIGHT) + err = -EINVAL; + + return err; +} + +static int otx2_qos_validate_dwrr_cfg(struct otx2_qos_node *parent, + struct netlink_ext_ack *extack, + struct otx2_nic *pfvf, + u64 prio, u64 quantum) +{ + int err; + + err = otx2_qos_validate_quantum(pfvf, quantum); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported quantum value"); + return err; + } + + if (parent->child_dwrr_prio == OTX2_QOS_DEFAULT_PRIO) { + parent->child_dwrr_prio = prio; + } else if (prio != parent->child_dwrr_prio) { + NL_SET_ERR_MSG_MOD(extack, "Only one DWRR group is allowed"); + return -EOPNOTSUPP; + } + + return 0; +} + static int otx2_qos_validate_configuration(struct otx2_qos_node *parent, struct netlink_ext_ack *extack, struct otx2_nic *pfvf, - u64 prio) + u64 prio, bool static_cfg) { - if (test_bit(prio, parent->prio_bmap)) { - NL_SET_ERR_MSG_MOD(extack, - "Static priority child with same priority exists"); + if (prio == parent->child_dwrr_prio && static_cfg) { + NL_SET_ERR_MSG_MOD(extack, "DWRR child group with same priority exists"); return -EEXIST; } - if (prio == TXSCH_TL1_DFLT_RR_PRIO) { + if (static_cfg && test_bit(prio, parent->prio_bmap)) { NL_SET_ERR_MSG_MOD(extack, - "Priority is reserved for Round Robin"); - return -EINVAL; + "Static priority child with same priority exists"); + return -EEXIST; } return 0; } +static void otx2_reset_dwrr_prio(struct otx2_qos_node *parent, u64 prio) +{ + /* For PF, root node dwrr priority is static */ + if (parent->level == NIX_TXSCH_LVL_TL1) + return; + + if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) { + parent->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO; + clear_bit(prio, parent->prio_bmap); + } +} + +static bool is_qos_node_dwrr(struct otx2_qos_node *parent, + struct otx2_nic *pfvf, + u64 prio) +{ + struct otx2_qos_node *node; + bool ret = false; + + if (parent->child_dwrr_prio == prio) + return true; + + mutex_lock(&pfvf->qos.qos_lock); + list_for_each_entry(node, &parent->child_list, list) { + if (prio == node->prio) { + if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO && + parent->child_dwrr_prio != prio) + continue; + + if (otx2_qos_validate_quantum(pfvf, node->quantum)) { + netdev_err(pfvf->netdev, + "Unsupported quantum value for existing classid=0x%x quantum=%d prio=%d", + node->classid, node->quantum, + node->prio); + break; + } + /* mark old node as dwrr */ + node->is_static = false; + parent->child_dwrr_cnt++; + parent->child_static_cnt--; + ret = true; + break; + } + } + mutex_unlock(&pfvf->qos.qos_lock); + + return ret; +} + static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid, u32 parent_classid, u64 rate, u64 ceil, - u64 prio, struct netlink_ext_ack *extack) + u64 prio, u32 quantum, + struct netlink_ext_ack *extack) { struct otx2_qos_cfg *old_cfg, *new_cfg; struct otx2_qos_node *node, *parent; int qid, ret, err; + bool static_cfg; netdev_dbg(pfvf->netdev, - "TC_HTB_LEAF_ALLOC_QUEUE: classid=0x%x parent_classid=0x%x rate=%lld ceil=%lld prio=%lld\n", - classid, parent_classid, rate, ceil, prio); + "TC_HTB_LEAF_ALLOC_QUEUE: classid=0x%x parent_classid=0x%x rate=%lld ceil=%lld prio=%lld quantum=%d\n", + classid, parent_classid, rate, ceil, prio, quantum); if (prio > OTX2_QOS_MAX_PRIO) { NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7"); @@ -978,6 +1219,12 @@ static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid, goto out; } + if (!quantum || quantum > INT_MAX) { + NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes"); + ret = -EOPNOTSUPP; + goto out; + } + /* get parent node */ parent = otx2_sw_node_find(pfvf, parent_classid); if (!parent) { @@ -991,10 +1238,24 @@ static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid, goto out; } - ret = otx2_qos_validate_configuration(parent, extack, pfvf, prio); + static_cfg = !is_qos_node_dwrr(parent, pfvf, prio); + ret = otx2_qos_validate_configuration(parent, extack, pfvf, prio, + static_cfg); if (ret) goto out; + if (!static_cfg) { + ret = otx2_qos_validate_dwrr_cfg(parent, extack, pfvf, prio, + quantum); + if (ret) + goto out; + } + + if (static_cfg) + parent->child_static_cnt++; + else + parent->child_dwrr_cnt++; + set_bit(prio, parent->prio_bmap); /* read current txschq configuration */ @@ -1019,7 +1280,7 @@ static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid, /* allocate and initialize a new child node */ node = otx2_qos_sw_create_leaf_node(pfvf, parent, classid, prio, rate, - ceil, qid); + ceil, quantum, qid, static_cfg); if (IS_ERR(node)) { NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node"); ret = PTR_ERR(node); @@ -1067,6 +1328,11 @@ free_node: free_old_cfg: kfree(old_cfg); reset_prio: + if (static_cfg) + parent->child_static_cnt--; + else + parent->child_dwrr_cnt--; + clear_bit(prio, parent->prio_bmap); out: return ret; @@ -1074,10 +1340,11 @@ out: static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid, u16 child_classid, u64 rate, u64 ceil, u64 prio, - struct netlink_ext_ack *extack) + u32 quantum, struct netlink_ext_ack *extack) { struct otx2_qos_cfg *old_cfg, *new_cfg; struct otx2_qos_node *node, *child; + bool static_cfg; int ret, err; u16 qid; @@ -1091,6 +1358,12 @@ static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid, goto out; } + if (!quantum || quantum > INT_MAX) { + NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes"); + ret = -EOPNOTSUPP; + goto out; + } + /* find node related to classid */ node = otx2_sw_node_find(pfvf, classid); if (!node) { @@ -1105,6 +1378,19 @@ static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid, goto out; } + static_cfg = !is_qos_node_dwrr(node, pfvf, prio); + if (!static_cfg) { + ret = otx2_qos_validate_dwrr_cfg(node, extack, pfvf, prio, + quantum); + if (ret) + goto out; + } + + if (static_cfg) + node->child_static_cnt++; + else + node->child_dwrr_cnt++; + set_bit(prio, node->prio_bmap); /* store the qid to assign to leaf node */ @@ -1127,7 +1413,8 @@ static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid, /* allocate and initialize a new child node */ child = otx2_qos_sw_create_leaf_node(pfvf, node, child_classid, - prio, rate, ceil, qid); + prio, rate, ceil, quantum, + qid, static_cfg); if (IS_ERR(child)) { NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node"); ret = PTR_ERR(child); @@ -1178,6 +1465,10 @@ free_node: free_old_cfg: kfree(old_cfg); reset_prio: + if (static_cfg) + node->child_static_cnt--; + else + node->child_dwrr_cnt--; clear_bit(prio, node->prio_bmap); out: return ret; @@ -1187,6 +1478,7 @@ static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid, struct netlink_ext_ack *extack) { struct otx2_qos_node *node, *parent; + int dwrr_del_node = false; u64 prio; u16 qid; @@ -1202,12 +1494,27 @@ static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid, prio = node->prio; qid = node->qid; + if (!node->is_static) + dwrr_del_node = true; + otx2_qos_disable_sq(pfvf, node->qid); otx2_qos_destroy_node(pfvf, node); pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ; - clear_bit(prio, parent->prio_bmap); + if (dwrr_del_node) { + parent->child_dwrr_cnt--; + } else { + parent->child_static_cnt--; + clear_bit(prio, parent->prio_bmap); + } + + /* Reset DWRR priority if all dwrr nodes are deleted */ + if (!parent->child_dwrr_cnt) + otx2_reset_dwrr_prio(parent, prio); + + if (!parent->child_static_cnt) + parent->max_static_prio = 0; return 0; } @@ -1217,6 +1524,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force { struct otx2_qos_node *node, *parent; struct otx2_qos_cfg *new_cfg; + int dwrr_del_node = false; u64 prio; int err; u16 qid; @@ -1241,11 +1549,26 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force return -ENOENT; } + if (!node->is_static) + dwrr_del_node = true; + /* destroy the leaf node */ otx2_qos_destroy_node(pfvf, node); pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ; - clear_bit(prio, parent->prio_bmap); + if (dwrr_del_node) { + parent->child_dwrr_cnt--; + } else { + parent->child_static_cnt--; + clear_bit(prio, parent->prio_bmap); + } + + /* Reset DWRR priority if all dwrr nodes are deleted */ + if (!parent->child_dwrr_cnt) + otx2_reset_dwrr_prio(parent, prio); + + if (!parent->child_static_cnt) + parent->max_static_prio = 0; /* create downstream txschq entries to parent */ err = otx2_qos_alloc_txschq_node(pfvf, parent); @@ -1298,10 +1621,12 @@ void otx2_qos_config_txschq(struct otx2_nic *pfvf) if (!root) return; - err = otx2_qos_txschq_config(pfvf, root); - if (err) { - netdev_err(pfvf->netdev, "Error update txschq configuration\n"); - goto root_destroy; + if (root->level != NIX_TXSCH_LVL_TL1) { + err = otx2_qos_txschq_config(pfvf, root); + if (err) { + netdev_err(pfvf->netdev, "Error update txschq configuration\n"); + goto root_destroy; + } } err = otx2_qos_txschq_push_cfg_tl(pfvf, root, NULL); @@ -1334,7 +1659,8 @@ int otx2_setup_tc_htb(struct net_device *ndev, struct tc_htb_qopt_offload *htb) res = otx2_qos_leaf_alloc_queue(pfvf, htb->classid, htb->parent_classid, htb->rate, htb->ceil, - htb->prio, htb->extack); + htb->prio, htb->quantum, + htb->extack); if (res < 0) return res; htb->qid = res; @@ -1343,7 +1669,7 @@ int otx2_setup_tc_htb(struct net_device *ndev, struct tc_htb_qopt_offload *htb) return otx2_qos_leaf_to_inner(pfvf, htb->parent_classid, htb->classid, htb->rate, htb->ceil, htb->prio, - htb->extack); + htb->quantum, htb->extack); case TC_HTB_LEAF_DEL: return otx2_qos_leaf_del(pfvf, &htb->classid, htb->extack); case TC_HTB_LEAF_DEL_LAST: diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.h b/drivers/net/ethernet/marvell/octeontx2/nic/qos.h index 19773284be27..221bd0438f60 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.h @@ -35,6 +35,7 @@ struct otx2_qos_cfg { int dwrr_node_pos[NIX_TXSCH_LVL_CNT]; u16 schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; u16 schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; + bool schq_index_used[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; }; struct otx2_qos { @@ -59,10 +60,18 @@ struct otx2_qos_node { u64 ceil; u32 classid; u32 prio; - u16 schq; /* hw txschq */ + u32 quantum; + /* hw txschq */ + u16 schq; u16 qid; u16 prio_anchor; + u16 max_static_prio; + u16 child_dwrr_cnt; + u16 child_static_cnt; + u16 child_dwrr_prio; + u16 txschq_idx; /* txschq allocation index */ u8 level; + bool is_static; }; |