diff options
Diffstat (limited to 'drivers/net/ethernet')
15 files changed, 980 insertions, 340 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index a6f390fdb971..d3e06cec8317 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -42,7 +42,7 @@ mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o # Core extra # mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \ - ecpf.o rdma.o + ecpf.o rdma.o eswitch_offloads_chains.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index acd946f2ddbe..3bc2ac3d53fc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -58,6 +58,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv, struct ethtool_rx_flow_spec *fs, int num_tuples) { + struct mlx5_flow_table_attr ft_attr = {}; struct mlx5e_ethtool_table *eth_ft; struct mlx5_flow_namespace *ns; struct mlx5_flow_table *ft; @@ -102,9 +103,11 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv, table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev, flow_table_properties_nic_receive.log_max_ft_size)), MLX5E_ETHTOOL_NUM_ENTRIES); - ft = mlx5_create_auto_grouped_flow_table(ns, prio, - table_size, - MLX5E_ETHTOOL_NUM_GROUPS, 0, 0); + + ft_attr.prio = prio; + ft_attr.max_fte = table_size; + ft_attr.autogroup.max_num_groups = MLX5E_ETHTOOL_NUM_GROUPS; + ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); if (IS_ERR(ft)) return (void *)ft; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index f175cb24bb67..446eb4d6c983 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -41,6 +41,7 @@ #include <net/ipv6_stubs.h> #include "eswitch.h" +#include "eswitch_offloads_chains.h" #include "en.h" #include "en_rep.h" #include "en_tc.h" @@ -1247,8 +1248,7 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { - struct flow_cls_offload *f = type_data; - struct flow_cls_offload cls_flower; + struct flow_cls_offload tmp, *f = type_data; struct mlx5e_priv *priv = cb_priv; struct mlx5_eswitch *esw; unsigned long flags; @@ -1261,16 +1261,30 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data, switch (type) { case TC_SETUP_CLSFLOWER: - if (!mlx5_eswitch_prios_supported(esw) || f->common.chain_index) + memcpy(&tmp, f, sizeof(*f)); + + if (!mlx5_esw_chains_prios_supported(esw) || + tmp.common.chain_index) return -EOPNOTSUPP; /* Re-use tc offload path by moving the ft flow to the * reserved ft chain. + * + * FT offload can use prio range [0, INT_MAX], so we normalize + * it to range [1, mlx5_esw_chains_get_prio_range(esw)] + * as with tc, where prio 0 isn't supported. + * + * We only support chain 0 of FT offload. */ - memcpy(&cls_flower, f, sizeof(*f)); - cls_flower.common.chain_index = FDB_FT_CHAIN; - err = mlx5e_rep_setup_tc_cls_flower(priv, &cls_flower, flags); - memcpy(&f->stats, &cls_flower.stats, sizeof(f->stats)); + if (tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw)) + return -EOPNOTSUPP; + if (tmp.common.chain_index != 0) + return -EOPNOTSUPP; + + tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw); + tmp.common.prio++; + err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags); + memcpy(&f->stats, &tmp.stats, sizeof(f->stats)); return err; default: return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index a05158472ed1..4291db78efc9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -1133,6 +1133,7 @@ static void mlx5e_grp_per_port_buffer_congest_update_stats(struct mlx5e_priv *pr static const struct counter_desc pport_per_prio_traffic_stats_desc[] = { { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) }, { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) }, + { "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) }, { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) }, { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) }, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index db614bd6bd1f..26f559b453dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -51,6 +51,7 @@ #include "en_rep.h" #include "en_tc.h" #include "eswitch.h" +#include "eswitch_offloads_chains.h" #include "fs_core.h" #include "en/port.h" #include "en/tc_tun.h" @@ -960,7 +961,8 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, mutex_lock(&priv->fs.tc.t_lock); if (IS_ERR_OR_NULL(priv->fs.tc.t)) { - int tc_grp_size, tc_tbl_size; + struct mlx5_flow_table_attr ft_attr = {}; + int tc_grp_size, tc_tbl_size, tc_num_grps; u32 max_flow_counter; max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) | @@ -970,13 +972,15 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS, BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size))); + tc_num_grps = MLX5E_TC_TABLE_NUM_GROUPS; + ft_attr.prio = MLX5E_TC_PRIO; + ft_attr.max_fte = tc_tbl_size; + ft_attr.level = MLX5E_TC_FT_LEVEL; + ft_attr.autogroup.max_num_groups = tc_num_grps; priv->fs.tc.t = mlx5_create_auto_grouped_flow_table(priv->fs.ns, - MLX5E_TC_PRIO, - tc_tbl_size, - MLX5E_TC_TABLE_NUM_GROUPS, - MLX5E_TC_FT_LEVEL, 0); + &ft_attr); if (IS_ERR(priv->fs.tc.t)) { mutex_unlock(&priv->fs.tc.t_lock); NL_SET_ERR_MSG_MOD(extack, @@ -1080,7 +1084,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr)); slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; slow_attr->split_count = 0; - slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN; + slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH; rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr); if (!IS_ERR(rule)) @@ -1097,7 +1101,7 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr)); slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; slow_attr->split_count = 0; - slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN; + slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH; mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); flow_flag_clear(flow, SLOW); } @@ -1157,19 +1161,18 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - u32 max_chain = mlx5_eswitch_get_chain_range(esw); struct mlx5_esw_flow_attr *attr = flow->esw_attr; struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; - u16 max_prio = mlx5_eswitch_get_prio_range(esw); struct net_device *out_dev, *encap_dev = NULL; struct mlx5_fc *counter = NULL; struct mlx5e_rep_priv *rpriv; struct mlx5e_priv *out_priv; bool encap_valid = true; + u32 max_prio, max_chain; int err = 0; int out_index; - if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) { + if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) { NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW"); return -EOPNOTSUPP; } @@ -1179,11 +1182,13 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, * FDB_FT_CHAIN which is outside tc range. * See mlx5e_rep_setup_ft_cb(). */ + max_chain = mlx5_esw_chains_get_chain_range(esw); if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) { NL_SET_ERR_MSG(extack, "Requested chain is out of supported range"); return -EOPNOTSUPP; } + max_prio = mlx5_esw_chains_get_prio_range(esw); if (attr->prio > max_prio) { NL_SET_ERR_MSG(extack, "Requested priority is out of supported range"); return -EOPNOTSUPP; @@ -3466,7 +3471,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, break; case FLOW_ACTION_GOTO: { u32 dest_chain = act->chain_index; - u32 max_chain = mlx5_eswitch_get_chain_range(esw); + u32 max_chain = mlx5_esw_chains_get_chain_range(esw); if (ft_flow) { NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 2c965ad0d744..05b13a1e829c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -277,6 +277,7 @@ enum { static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw) { + struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_core_dev *dev = esw->dev; struct mlx5_flow_namespace *root_ns; struct mlx5_flow_table *fdb; @@ -289,8 +290,10 @@ static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw) } /* num FTE 2, num FG 2 */ - fdb = mlx5_create_auto_grouped_flow_table(root_ns, LEGACY_VEPA_PRIO, - 2, 2, 0, 0); + ft_attr.prio = LEGACY_VEPA_PRIO; + ft_attr.max_fte = 2; + ft_attr.autogroup.max_num_groups = 2; + fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr); if (IS_ERR(fdb)) { err = PTR_ERR(fdb); esw_warn(dev, "Failed to create VEPA FDB err %d\n", err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index ffcff3ba3701..4472710ccc9c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -157,7 +157,7 @@ enum offloads_fdb_flags { ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED = BIT(0), }; -extern const unsigned int ESW_POOLS[4]; +struct mlx5_esw_chains_priv; struct mlx5_eswitch_fdb { union { @@ -182,14 +182,7 @@ struct mlx5_eswitch_fdb { struct mlx5_flow_handle *miss_rule_multi; int vlan_push_pop_refcount; - struct { - struct mlx5_flow_table *fdb; - u32 num_rules; - } fdb_prio[FDB_NUM_CHAINS][FDB_TC_MAX_PRIO + 1][FDB_TC_LEVELS_PER_PRIO]; - /* Protects fdb_prio table */ - struct mutex fdb_prio_lock; - - int fdb_left[ARRAY_SIZE(ESW_POOLS)]; + struct mlx5_esw_chains_priv *esw_chains_priv; } offloads; }; u32 flags; @@ -355,15 +348,6 @@ mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule, struct mlx5_esw_flow_attr *attr); -bool -mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw); - -u16 -mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw); - -u32 -mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw); - struct mlx5_flow_handle * mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, struct mlx5_flow_destination *dest); @@ -388,6 +372,11 @@ enum { MLX5_ESW_DEST_ENCAP_VALID = BIT(1), }; +enum { + MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0), + MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1), +}; + struct mlx5_esw_flow_attr { struct mlx5_eswitch_rep *in_rep; struct mlx5_core_dev *in_mdev; @@ -401,7 +390,6 @@ struct mlx5_esw_flow_attr { u16 vlan_vid[MLX5_FS_VLAN_DEPTH]; u8 vlan_prio[MLX5_FS_VLAN_DEPTH]; u8 total_vlan; - bool vlan_handled; struct { u32 flags; struct mlx5_eswitch_rep *rep; @@ -416,6 +404,7 @@ struct mlx5_esw_flow_attr { u32 chain; u16 prio; u32 dest_chain; + u32 flags; struct mlx5e_tc_flow_parse_attr *parse_attr; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 243a5440867e..a6d0b62ef234 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -37,6 +37,7 @@ #include <linux/mlx5/fs.h> #include "mlx5_core.h" #include "eswitch.h" +#include "eswitch_offloads_chains.h" #include "rdma.h" #include "en.h" #include "fs_core.h" @@ -47,10 +48,6 @@ * one for multicast. */ #define MLX5_ESW_MISS_FLOWS (2) - -#define fdb_prio_table(esw, chain, prio, level) \ - (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)] - #define UPLINK_REP_INDEX 0 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, @@ -62,32 +59,6 @@ static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, return &esw->offloads.vport_reps[idx]; } -static struct mlx5_flow_table * -esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level); -static void -esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level); - -bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw) -{ - return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)); -} - -u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw) -{ - if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED) - return FDB_TC_MAX_CHAIN; - - return 0; -} - -u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw) -{ - if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED) - return FDB_TC_MAX_PRIO; - - return 1; -} - static bool esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw, const struct mlx5_vport *vport) @@ -175,10 +146,17 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { - if (attr->dest_chain) { - struct mlx5_flow_table *ft; + struct mlx5_flow_table *ft; - ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0); + if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) { + flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest[i].ft = mlx5_esw_chains_get_tc_end_ft(esw); + i++; + } else if (attr->dest_chain) { + flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; + ft = mlx5_esw_chains_get_table(esw, attr->dest_chain, + 1, 0); if (IS_ERR(ft)) { rule = ERR_CAST(ft); goto err_create_goto_table; @@ -223,7 +201,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) flow_act.modify_hdr = attr->modify_hdr; - fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split); + fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, + !!split); if (IS_ERR(fdb)) { rule = ERR_CAST(fdb); goto err_esw_get; @@ -242,10 +221,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, return rule; err_add_rule: - esw_put_prio_table(esw, attr->chain, attr->prio, !!split); + mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, !!split); err_esw_get: - if (attr->dest_chain) - esw_put_prio_table(esw, attr->dest_chain, 1, 0); + if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain) + mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0); err_create_goto_table: return rule; } @@ -262,13 +241,13 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule; int i; - fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0); + fast_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 0); if (IS_ERR(fast_fdb)) { rule = ERR_CAST(fast_fdb); goto err_get_fast; } - fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1); + fwd_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 1); if (IS_ERR(fwd_fdb)) { rule = ERR_CAST(fwd_fdb); goto err_get_fwd; @@ -296,6 +275,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, if (attr->outer_match_level != MLX5_MATCH_NONE) spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; + flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i); if (IS_ERR(rule)) @@ -305,9 +285,9 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, return rule; add_err: - esw_put_prio_table(esw, attr->chain, attr->prio, 1); + mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1); err_get_fwd: - esw_put_prio_table(esw, attr->chain, attr->prio, 0); + mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0); err_get_fast: return rule; } @@ -332,12 +312,13 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, atomic64_dec(&esw->offloads.num_flows); if (fwd_rule) { - esw_put_prio_table(esw, attr->chain, attr->prio, 1); - esw_put_prio_table(esw, attr->chain, attr->prio, 0); + mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1); + mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0); } else { - esw_put_prio_table(esw, attr->chain, attr->prio, !!split); + mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, + !!split); if (attr->dest_chain) - esw_put_prio_table(esw, attr->dest_chain, 1, 0); + mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0); } } @@ -451,7 +432,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, if (err) goto unlock; - attr->vlan_handled = false; + attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED; vport = esw_vlan_action_get_vport(attr, push, pop); @@ -459,7 +440,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, /* tracks VF --> wire rules without vlan push action */ if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) { vport->vlan_refcount++; - attr->vlan_handled = true; + attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED; } goto unlock; @@ -490,7 +471,7 @@ skip_set_push: } out: if (!err) - attr->vlan_handled = true; + attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED; unlock: mutex_unlock(&esw->state_lock); return err; @@ -508,7 +489,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) return 0; - if (!attr->vlan_handled) + if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED)) return 0; push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); @@ -582,8 +563,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport, dest.vport.num = vport; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, - &flow_act, &dest, 1); + flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, + spec, &flow_act, &dest, 1); if (IS_ERR(flow_rule)) esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule)); out: @@ -824,8 +805,8 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) dest.vport.num = esw->manager_vport; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, - &flow_act, &dest, 1); + flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, + spec, &flow_act, &dest, 1); if (IS_ERR(flow_rule)) { err = PTR_ERR(flow_rule); esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err); @@ -839,8 +820,8 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16); dmac_v[0] = 0x01; - flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, - &flow_act, &dest, 1); + flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, + spec, &flow_act, &dest, 1); if (IS_ERR(flow_rule)) { err = PTR_ERR(flow_rule); esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err); @@ -855,174 +836,6 @@ out: return err; } -#define ESW_OFFLOADS_NUM_GROUPS 4 - -/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS), - * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated - * for each flow table pool. We can allocate up to 16M of each pool, - * and we keep track of how much we used via put/get_sz_to_pool. - * Firmware doesn't report any of this for now. - * ESW_POOL is expected to be sorted from large to small - */ -#define ESW_SIZE (16 * 1024 * 1024) -const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024, - 64 * 1024, 4 * 1024 }; - -static int -get_sz_from_pool(struct mlx5_eswitch *esw) -{ - int sz = 0, i; - - for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) { - if (esw->fdb_table.offloads.fdb_left[i]) { - --esw->fdb_table.offloads.fdb_left[i]; - sz = ESW_POOLS[i]; - break; - } - } - - return sz; -} - -static void -put_sz_to_pool(struct mlx5_eswitch *esw, int sz) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) { - if (sz >= ESW_POOLS[i]) { - ++esw->fdb_table.offloads.fdb_left[i]; - break; - } - } -} - -static struct mlx5_flow_table * -create_next_size_table(struct mlx5_eswitch *esw, - struct mlx5_flow_namespace *ns, - u16 table_prio, - int level, - u32 flags) -{ - struct mlx5_flow_table *fdb; - int sz; - - sz = get_sz_from_pool(esw); - if (!sz) - return ERR_PTR(-ENOSPC); - - fdb = mlx5_create_auto_grouped_flow_table(ns, - table_prio, - sz, - ESW_OFFLOADS_NUM_GROUPS, - level, - flags); - if (IS_ERR(fdb)) { - esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n", - (int)PTR_ERR(fdb), table_prio, level, sz); - put_sz_to_pool(esw, sz); - } - - return fdb; -} - -static struct mlx5_flow_table * -esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level) -{ - struct mlx5_core_dev *dev = esw->dev; - struct mlx5_flow_table *fdb = NULL; - struct mlx5_flow_namespace *ns; - int table_prio, l = 0; - u32 flags = 0; - - if (chain == FDB_TC_SLOW_PATH_CHAIN) - return esw->fdb_table.offloads.slow_fdb; - - mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock); - - fdb = fdb_prio_table(esw, chain, prio, level).fdb; - if (fdb) { - /* take ref on earlier levels as well */ - while (level >= 0) - fdb_prio_table(esw, chain, prio, level--).num_rules++; - mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); - return fdb; - } - - ns = mlx5_get_fdb_sub_ns(dev, chain); - if (!ns) { - esw_warn(dev, "Failed to get FDB sub namespace\n"); - mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); - return ERR_PTR(-EOPNOTSUPP); - } - - if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) - flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | - MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); - - table_prio = prio - 1; - - /* create earlier levels for correct fs_core lookup when - * connecting tables - */ - for (l = 0; l <= level; l++) { - if (fdb_prio_table(esw, chain, prio, l).fdb) { - fdb_prio_table(esw, chain, prio, l).num_rules++; - continue; - } - - fdb = create_next_size_table(esw, ns, table_prio, l, flags); - if (IS_ERR(fdb)) { - l--; - goto err_create_fdb; - } - - fdb_prio_table(esw, chain, prio, l).fdb = fdb; - fdb_prio_table(esw, chain, prio, l).num_rules = 1; - } - - mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); - return fdb; - -err_create_fdb: - mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); - if (l >= 0) - esw_put_prio_table(esw, chain, prio, l); - - return fdb; -} - -static void -esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level) -{ - int l; - - if (chain == FDB_TC_SLOW_PATH_CHAIN) - return; - - mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock); - - for (l = level; l >= 0; l--) { - if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0) - continue; - - put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte); - mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb); - fdb_prio_table(esw, chain, prio, l).fdb = NULL; - } - - mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); -} - -static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw) -{ - /* If lazy creation isn't supported, deref the fast path tables */ - if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) { - esw_put_prio_table(esw, 0, 1, 1); - esw_put_prio_table(esw, 0, 1, 0); - } -} - #define MAX_PF_SQ 256 #define MAX_SQ_NVPORTS 32 @@ -1055,16 +868,16 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_core_dev *dev = esw->dev; - u32 *flow_group_in, max_flow_counter; struct mlx5_flow_namespace *root_ns; struct mlx5_flow_table *fdb = NULL; - int table_size, ix, err = 0, i; + u32 flags = 0, *flow_group_in; + int table_size, ix, err = 0; struct mlx5_flow_group *g; - u32 flags = 0, fdb_max; void *match_criteria; u8 *dmac; esw_debug(esw->dev, "Create offloads FDB Tables\n"); + flow_group_in = kvzalloc(inlen, GFP_KERNEL); if (!flow_group_in) return -ENOMEM; @@ -1083,19 +896,6 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) goto ns_err; } - max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) | - MLX5_CAP_GEN(dev, max_flow_counter_15_0); - fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size); - - esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(%d))\n", - MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size), - max_flow_counter, ESW_OFFLOADS_NUM_GROUPS, - fdb_max); - - for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) - esw->fdb_table.offloads.fdb_left[i] = - ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0; - table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + MLX5_ESW_MISS_FLOWS + esw->total_vports; @@ -1118,16 +918,10 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) } esw->fdb_table.offloads.slow_fdb = fdb; - /* If lazy creation isn't supported, open the fast path tables now */ - if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) && - esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) { - esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; - esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n"); - esw_get_prio_table(esw, 0, 1, 0); - esw_get_prio_table(esw, 0, 1, 1); - } else { - esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n"); - esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; + err = mlx5_esw_chains_create(esw); + if (err) { + esw_warn(dev, "Failed to create fdb chains err(%d)\n", err); + goto fdb_chains_err; } /* create send-to-vport group */ @@ -1218,7 +1012,8 @@ miss_err: peer_miss_err: mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); send_vport_err: - esw_destroy_offloads_fast_fdb_tables(esw); + mlx5_esw_chains_destroy(esw); +fdb_chains_err: mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); slow_fdb_err: /* Holds true only as long as DMFS is the default */ @@ -1240,8 +1035,8 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); + mlx5_esw_chains_destroy(esw); mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); - esw_destroy_offloads_fast_fdb_tables(esw); /* Holds true only as long as DMFS is the default */ mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns, MLX5_FLOW_STEERING_MODE_DMFS); @@ -2111,7 +1906,6 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev); memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); - mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); err = esw_create_uplink_offloads_acl_tables(esw); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c new file mode 100644 index 000000000000..3a60eb5360bd --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c @@ -0,0 +1,758 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2020 Mellanox Technologies. + +#include <linux/mlx5/driver.h> +#include <linux/mlx5/mlx5_ifc.h> +#include <linux/mlx5/fs.h> + +#include "eswitch_offloads_chains.h" +#include "mlx5_core.h" +#include "fs_core.h" +#include "eswitch.h" +#include "en.h" + +#define esw_chains_priv(esw) ((esw)->fdb_table.offloads.esw_chains_priv) +#define esw_chains_lock(esw) (esw_chains_priv(esw)->lock) +#define esw_chains_ht(esw) (esw_chains_priv(esw)->chains_ht) +#define esw_prios_ht(esw) (esw_chains_priv(esw)->prios_ht) +#define fdb_pool_left(esw) (esw_chains_priv(esw)->fdb_left) +#define tc_slow_fdb(esw) ((esw)->fdb_table.offloads.slow_fdb) +#define tc_end_fdb(esw) (esw_chains_priv(esw)->tc_end_fdb) +#define fdb_ignore_flow_level_supported(esw) \ + (MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level)) + +#define ESW_OFFLOADS_NUM_GROUPS 4 + +/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS), + * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated + * for each flow table pool. We can allocate up to 16M of each pool, + * and we keep track of how much we used via get_next_avail_sz_from_pool. + * Firmware doesn't report any of this for now. + * ESW_POOL is expected to be sorted from large to small and match firmware + * pools. + */ +#define ESW_SIZE (16 * 1024 * 1024) +const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024, + 1 * 1024 * 1024, + 64 * 1024, + 4 * 1024, }; + +struct mlx5_esw_chains_priv { + struct rhashtable chains_ht; + struct rhashtable prios_ht; + /* Protects above chains_ht and prios_ht */ + struct mutex lock; + + struct mlx5_flow_table *tc_end_fdb; + + int fdb_left[ARRAY_SIZE(ESW_POOLS)]; +}; + +struct fdb_chain { + struct rhash_head node; + + u32 chain; + + int ref; + + struct mlx5_eswitch *esw; + struct list_head prios_list; +}; + +struct fdb_prio_key { + u32 chain; + u32 prio; + u32 level; +}; + +struct fdb_prio { + struct rhash_head node; + struct list_head list; + + struct fdb_prio_key key; + + int ref; + + struct fdb_chain *fdb_chain; + struct mlx5_flow_table *fdb; + struct mlx5_flow_table *next_fdb; + struct mlx5_flow_group *miss_group; + struct mlx5_flow_handle *miss_rule; +}; + +static const struct rhashtable_params chain_params = { + .head_offset = offsetof(struct fdb_chain, node), + .key_offset = offsetof(struct fdb_chain, chain), + .key_len = sizeof_field(struct fdb_chain, chain), + .automatic_shrinking = true, +}; + +static const struct rhashtable_params prio_params = { + .head_offset = offsetof(struct fdb_prio, node), + .key_offset = offsetof(struct fdb_prio, key), + .key_len = sizeof_field(struct fdb_prio, key), + .automatic_shrinking = true, +}; + +bool mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw) +{ + return esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; +} + +u32 mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw) +{ + if (!mlx5_esw_chains_prios_supported(esw)) + return 1; + + if (fdb_ignore_flow_level_supported(esw)) + return UINT_MAX - 1; + + return FDB_TC_MAX_CHAIN; +} + +u32 mlx5_esw_chains_get_ft_chain(struct mlx5_eswitch *esw) +{ + return mlx5_esw_chains_get_chain_range(esw) + 1; +} + +u32 mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw) +{ + if (!mlx5_esw_chains_prios_supported(esw)) + return 1; + + if (fdb_ignore_flow_level_supported(esw)) + return UINT_MAX; + + return FDB_TC_MAX_PRIO; +} + +static unsigned int mlx5_esw_chains_get_level_range(struct mlx5_eswitch *esw) +{ + if (fdb_ignore_flow_level_supported(esw)) + return UINT_MAX; + + return FDB_TC_LEVELS_PER_PRIO; +} + +#define POOL_NEXT_SIZE 0 +static int +mlx5_esw_chains_get_avail_sz_from_pool(struct mlx5_eswitch *esw, + int desired_size) +{ + int i, found_i = -1; + + for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--) { + if (fdb_pool_left(esw)[i] && ESW_POOLS[i] > desired_size) { + found_i = i; + if (desired_size != POOL_NEXT_SIZE) + break; + } + } + + if (found_i != -1) { + --fdb_pool_left(esw)[found_i]; + return ESW_POOLS[found_i]; + } + + return 0; +} + +static void +mlx5_esw_chains_put_sz_to_pool(struct mlx5_eswitch *esw, int sz) +{ + int i; + + for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--) { + if (sz == ESW_POOLS[i]) { + ++fdb_pool_left(esw)[i]; + return; + } + } + + WARN_ONCE(1, "Couldn't find size %d in fdb size pool", sz); +} + +static void +mlx5_esw_chains_init_sz_pool(struct mlx5_eswitch *esw) +{ + u32 fdb_max; + int i; + + fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, log_max_ft_size); + + for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--) + fdb_pool_left(esw)[i] = + ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0; +} + +static struct mlx5_flow_table * +mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw, + u32 chain, u32 prio, u32 level) +{ + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_namespace *ns; + struct mlx5_flow_table *fdb; + int sz; + + if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) + ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | + MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); + + sz = mlx5_esw_chains_get_avail_sz_from_pool(esw, POOL_NEXT_SIZE); + if (!sz) + return ERR_PTR(-ENOSPC); + ft_attr.max_fte = sz; + + /* We use tc_slow_fdb(esw) as the table's next_ft till + * ignore_flow_level is allowed on FT creation and not just for FTEs. + * Instead caller should add an explicit miss rule if needed. + */ + ft_attr.next_ft = tc_slow_fdb(esw); + + /* The root table(chain 0, prio 1, level 0) is required to be + * connected to the previous prio (FDB_BYPASS_PATH if exists). + * We always create it, as a managed table, in order to align with + * fs_core logic. + */ + if (!fdb_ignore_flow_level_supported(esw) || + (chain == 0 && prio == 1 && level == 0)) { + ft_attr.level = level; + ft_attr.prio = prio - 1; + ns = mlx5_get_fdb_sub_ns(esw->dev, chain); + } else { + ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED; + ft_attr.prio = FDB_TC_OFFLOAD; + /* Firmware doesn't allow us to create another level 0 table, + * so we create all unmanaged tables as level 1. + * + * To connect them, we use explicit miss rules with + * ignore_flow_level. Caller is responsible to create + * these rules (if needed). + */ + ft_attr.level = 1; + ns = mlx5_get_flow_namespace(esw->dev, MLX5_FLOW_NAMESPACE_FDB); + } + + ft_attr.autogroup.num_reserved_entries = 2; + ft_attr.autogroup.max_num_groups = ESW_OFFLOADS_NUM_GROUPS; + fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); + if (IS_ERR(fdb)) { + esw_warn(esw->dev, + "Failed to create FDB table err %d (chain: %d, prio: %d, level: %d, size: %d)\n", + (int)PTR_ERR(fdb), chain, prio, level, sz); + mlx5_esw_chains_put_sz_to_pool(esw, sz); + return fdb; + } + + return fdb; +} + +static void +mlx5_esw_chains_destroy_fdb_table(struct mlx5_eswitch *esw, + struct mlx5_flow_table *fdb) +{ + mlx5_esw_chains_put_sz_to_pool(esw, fdb->max_fte); + mlx5_destroy_flow_table(fdb); +} + +static struct fdb_chain * +mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain) +{ + struct fdb_chain *fdb_chain = NULL; + int err; + + fdb_chain = kvzalloc(sizeof(*fdb_chain), GFP_KERNEL); + if (!fdb_chain) + return ERR_PTR(-ENOMEM); + + fdb_chain->esw = esw; + fdb_chain->chain = chain; + INIT_LIST_HEAD(&fdb_chain->prios_list); + + err = rhashtable_insert_fast(&esw_chains_ht(esw), &fdb_chain->node, + chain_params); + if (err) + goto err_insert; + + return fdb_chain; + +err_insert: + kvfree(fdb_chain); + return ERR_PTR(err); +} + +static void +mlx5_esw_chains_destroy_fdb_chain(struct fdb_chain *fdb_chain) +{ + struct mlx5_eswitch *esw = fdb_chain->esw; + + rhashtable_remove_fast(&esw_chains_ht(esw), &fdb_chain->node, + chain_params); + kvfree(fdb_chain); +} + +static struct fdb_chain * +mlx5_esw_chains_get_fdb_chain(struct mlx5_eswitch *esw, u32 chain) +{ + struct fdb_chain *fdb_chain; + + fdb_chain = rhashtable_lookup_fast(&esw_chains_ht(esw), &chain, + chain_params); + if (!fdb_chain) { + fdb_chain = mlx5_esw_chains_create_fdb_chain(esw, chain); + if (IS_ERR(fdb_chain)) + return fdb_chain; + } + + fdb_chain->ref++; + + return fdb_chain; +} + +static struct mlx5_flow_handle * +mlx5_esw_chains_add_miss_rule(struct mlx5_flow_table *fdb, + struct mlx5_flow_table *next_fdb) +{ + static const struct mlx5_flow_spec spec = {}; + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act act = {}; + + act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND; + act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = next_fdb; + + return mlx5_add_flow_rules(fdb, &spec, &act, &dest, 1); +} + +static int +mlx5_esw_chains_update_prio_prevs(struct fdb_prio *fdb_prio, + struct mlx5_flow_table *next_fdb) +{ + struct mlx5_flow_handle *miss_rules[FDB_TC_LEVELS_PER_PRIO + 1] = {}; + struct fdb_chain *fdb_chain = fdb_prio->fdb_chain; + struct fdb_prio *pos; + int n = 0, err; + + if (fdb_prio->key.level) + return 0; + + /* Iterate in reverse order until reaching the level 0 rule of + * the previous priority, adding all the miss rules first, so we can + * revert them if any of them fails. + */ + pos = fdb_prio; + list_for_each_entry_continue_reverse(pos, + &fdb_chain->prios_list, + list) { + miss_rules[n] = mlx5_esw_chains_add_miss_rule(pos->fdb, + next_fdb); + if (IS_ERR(miss_rules[n])) { + err = PTR_ERR(miss_rules[n]); + goto err_prev_rule; + } + + n++; + if (!pos->key.level) + break; + } + + /* Success, delete old miss rules, and update the pointers. */ + n = 0; + pos = fdb_prio; + list_for_each_entry_continue_reverse(pos, + &fdb_chain->prios_list, + list) { + mlx5_del_flow_rules(pos->miss_rule); + + pos->miss_rule = miss_rules[n]; + pos->next_fdb = next_fdb; + + n++; + if (!pos->key.level) + break; + } + + return 0; + +err_prev_rule: + while (--n >= 0) + mlx5_del_flow_rules(miss_rules[n]); + + return err; +} + +static void +mlx5_esw_chains_put_fdb_chain(struct fdb_chain *fdb_chain) +{ + if (--fdb_chain->ref == 0) + mlx5_esw_chains_destroy_fdb_chain(fdb_chain); +} + +static struct fdb_prio * +mlx5_esw_chains_create_fdb_prio(struct mlx5_eswitch *esw, + u32 chain, u32 prio, u32 level) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_handle *miss_rule = NULL; + struct mlx5_flow_group *miss_group; + struct fdb_prio *fdb_prio = NULL; + struct mlx5_flow_table *next_fdb; + struct fdb_chain *fdb_chain; + struct mlx5_flow_table *fdb; + struct list_head *pos; + u32 *flow_group_in; + int err; + + fdb_chain = mlx5_esw_chains_get_fdb_chain(esw, chain); + if (IS_ERR(fdb_chain)) + return ERR_CAST(fdb_chain); + + fdb_prio = kvzalloc(sizeof(*fdb_prio), GFP_KERNEL); + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!fdb_prio || !flow_group_in) { + err = -ENOMEM; + goto err_alloc; + } + + /* Chain's prio list is sorted by prio and level. + * And all levels of some prio point to the next prio's level 0. + * Example list (prio, level): + * (3,0)->(3,1)->(5,0)->(5,1)->(6,1)->(7,0) + * In hardware, we will we have the following pointers: + * (3,0) -> (5,0) -> (7,0) -> Slow path + * (3,1) -> (5,0) + * (5,1) -> (7,0) + * (6,1) -> (7,0) + */ + + /* Default miss for each chain: */ + next_fdb = (chain == mlx5_esw_chains_get_ft_chain(esw)) ? + tc_slow_fdb(esw) : + tc_end_fdb(esw); + list_for_each(pos, &fdb_chain->prios_list) { + struct fdb_prio *p = list_entry(pos, struct fdb_prio, list); + + /* exit on first pos that is larger */ + if (prio < p->key.prio || (prio == p->key.prio && + level < p->key.level)) { + /* Get next level 0 table */ + next_fdb = p->key.level == 0 ? p->fdb : p->next_fdb; + break; + } + } + + fdb = mlx5_esw_chains_create_fdb_table(esw, chain, prio, level); + if (IS_ERR(fdb)) { + err = PTR_ERR(fdb); + goto err_create; + } + + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, + fdb->max_fte - 2); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, + fdb->max_fte - 1); + miss_group = mlx5_create_flow_group(fdb, flow_group_in); + if (IS_ERR(miss_group)) { + err = PTR_ERR(miss_group); + goto err_group; + } + + /* Add miss rule to next_fdb */ + miss_rule = mlx5_esw_chains_add_miss_rule(fdb, next_fdb); + if (IS_ERR(miss_rule)) { + err = PTR_ERR(miss_rule); + goto err_miss_rule; + } + + fdb_prio->miss_group = miss_group; + fdb_prio->miss_rule = miss_rule; + fdb_prio->next_fdb = next_fdb; + fdb_prio->fdb_chain = fdb_chain; + fdb_prio->key.chain = chain; + fdb_prio->key.prio = prio; + fdb_prio->key.level = level; + fdb_prio->fdb = fdb; + + err = rhashtable_insert_fast(&esw_prios_ht(esw), &fdb_prio->node, + prio_params); + if (err) + goto err_insert; + + list_add(&fdb_prio->list, pos->prev); + + /* Table is ready, connect it */ + err = mlx5_esw_chains_update_prio_prevs(fdb_prio, fdb); + if (err) + goto err_update; + + kvfree(flow_group_in); + return fdb_prio; + +err_update: + list_del(&fdb_prio->list); + rhashtable_remove_fast(&esw_prios_ht(esw), &fdb_prio->node, + prio_params); +err_insert: + mlx5_del_flow_rules(miss_rule); +err_miss_rule: + mlx5_destroy_flow_group(miss_group); +err_group: + mlx5_esw_chains_destroy_fdb_table(esw, fdb); +err_create: +err_alloc: + kvfree(fdb_prio); + kvfree(flow_group_in); + mlx5_esw_chains_put_fdb_chain(fdb_chain); + return ERR_PTR(err); +} + +static void +mlx5_esw_chains_destroy_fdb_prio(struct mlx5_eswitch *esw, + struct fdb_prio *fdb_prio) +{ + struct fdb_chain *fdb_chain = fdb_prio->fdb_chain; + + WARN_ON(mlx5_esw_chains_update_prio_prevs(fdb_prio, + fdb_prio->next_fdb)); + + list_del(&fdb_prio->list); + rhashtable_remove_fast(&esw_prios_ht(esw), &fdb_prio->node, + prio_params); + mlx5_del_flow_rules(fdb_prio->miss_rule); + mlx5_destroy_flow_group(fdb_prio->miss_group); + mlx5_esw_chains_destroy_fdb_table(esw, fdb_prio->fdb); + mlx5_esw_chains_put_fdb_chain(fdb_chain); + kvfree(fdb_prio); +} + +struct mlx5_flow_table * +mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio, + u32 level) +{ + struct mlx5_flow_table *prev_fts; + struct fdb_prio *fdb_prio; + struct fdb_prio_key key; + int l = 0; + + if ((chain > mlx5_esw_chains_get_chain_range(esw) && + chain != mlx5_esw_chains_get_ft_chain(esw)) || + prio > mlx5_esw_chains_get_prio_range(esw) || + level > mlx5_esw_chains_get_level_range(esw)) + return ERR_PTR(-EOPNOTSUPP); + + /* create earlier levels for correct fs_core lookup when + * connecting tables. + */ + for (l = 0; l < level; l++) { + prev_fts = mlx5_esw_chains_get_table(esw, chain, prio, l); + if (IS_ERR(prev_fts)) { + fdb_prio = ERR_CAST(prev_fts); + goto err_get_prevs; + } + } + + key.chain = chain; + key.prio = prio; + key.level = level; + + mutex_lock(&esw_chains_lock(esw)); + fdb_prio = rhashtable_lookup_fast(&esw_prios_ht(esw), &key, + prio_params); + if (!fdb_prio) { + fdb_prio = mlx5_esw_chains_create_fdb_prio(esw, chain, + prio, level); + if (IS_ERR(fdb_prio)) + goto err_create_prio; + } + + ++fdb_prio->ref; + mutex_unlock(&esw_chains_lock(esw)); + + return fdb_prio->fdb; + +err_create_prio: + mutex_unlock(&esw_chains_lock(esw)); +err_get_prevs: + while (--l >= 0) + mlx5_esw_chains_put_table(esw, chain, prio, l); + return ERR_CAST(fdb_prio); +} + +void +mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio, + u32 level) +{ + struct fdb_prio *fdb_prio; + struct fdb_prio_key key; + + key.chain = chain; + key.prio = prio; + key.level = level; + + mutex_lock(&esw_chains_lock(esw)); + fdb_prio = rhashtable_lookup_fast(&esw_prios_ht(esw), &key, + prio_params); + if (!fdb_prio) + goto err_get_prio; + + if (--fdb_prio->ref == 0) + mlx5_esw_chains_destroy_fdb_prio(esw, fdb_prio); + mutex_unlock(&esw_chains_lock(esw)); + + while (level-- > 0) + mlx5_esw_chains_put_table(esw, chain, prio, level); + + return; + +err_get_prio: + mutex_unlock(&esw_chains_lock(esw)); + WARN_ONCE(1, + "Couldn't find table: (chain: %d prio: %d level: %d)", + chain, prio, level); +} + +struct mlx5_flow_table * +mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw) +{ + return tc_end_fdb(esw); +} + +static int +mlx5_esw_chains_init(struct mlx5_eswitch *esw) +{ + struct mlx5_esw_chains_priv *chains_priv; + struct mlx5_core_dev *dev = esw->dev; + u32 max_flow_counter, fdb_max; + int err; + + chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL); + if (!chains_priv) + return -ENOMEM; + esw_chains_priv(esw) = chains_priv; + + max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) | + MLX5_CAP_GEN(dev, max_flow_counter_15_0); + fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size); + + esw_debug(dev, + "Init esw offloads chains, max counters(%d), groups(%d), max flow table size(%d)\n", + max_flow_counter, ESW_OFFLOADS_NUM_GROUPS, fdb_max); + + mlx5_esw_chains_init_sz_pool(esw); + + if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) && + esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) { + esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; + esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n"); + } else { + esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; + esw_info(dev, "Supported tc offload range - chains: %u, prios: %u\n", + mlx5_esw_chains_get_chain_range(esw), + mlx5_esw_chains_get_prio_range(esw)); + } + + err = rhashtable_init(&esw_chains_ht(esw), &chain_params); + if (err) + goto init_chains_ht_err; + + err = rhashtable_init(&esw_prios_ht(esw), &prio_params); + if (err) + goto init_prios_ht_err; + + mutex_init(&esw_chains_lock(esw)); + + return 0; + +init_prios_ht_err: + rhashtable_destroy(&esw_chains_ht(esw)); +init_chains_ht_err: + kfree(chains_priv); + return err; +} + +static void +mlx5_esw_chains_cleanup(struct mlx5_eswitch *esw) +{ + mutex_destroy(&esw_chains_lock(esw)); + rhashtable_destroy(&esw_prios_ht(esw)); + rhashtable_destroy(&esw_chains_ht(esw)); + + kfree(esw_chains_priv(esw)); +} + +static int +mlx5_esw_chains_open(struct mlx5_eswitch *esw) +{ + struct mlx5_flow_table *ft; + int err; + + /* Create tc_end_fdb(esw) which is the always created ft chain */ + ft = mlx5_esw_chains_get_table(esw, mlx5_esw_chains_get_ft_chain(esw), + 1, 0); + if (IS_ERR(ft)) + return PTR_ERR(ft); + + tc_end_fdb(esw) = ft; + + /* Always open the root for fast path */ + ft = mlx5_esw_chains_get_table(esw, 0, 1, 0); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + goto level_0_err; + } + + /* Open level 1 for split rules now if prios isn't supported */ + if (!mlx5_esw_chains_prios_supported(esw)) { + ft = mlx5_esw_chains_get_table(esw, 0, 1, 1); + + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + goto level_1_err; + } + } + + return 0; + +level_1_err: + mlx5_esw_chains_put_table(esw, 0, 1, 0); +level_0_err: + mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0); + return err; +} + +static void +mlx5_esw_chains_close(struct mlx5_eswitch *esw) +{ + if (!mlx5_esw_chains_prios_supported(esw)) + mlx5_esw_chains_put_table(esw, 0, 1, 1); + mlx5_esw_chains_put_table(esw, 0, 1, 0); + mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0); +} + +int +mlx5_esw_chains_create(struct mlx5_eswitch *esw) +{ + int err; + + err = mlx5_esw_chains_init(esw); + if (err) + return err; + + err = mlx5_esw_chains_open(esw); + if (err) + goto err_open; + + return 0; + +err_open: + mlx5_esw_chains_cleanup(esw); + return err; +} + +void +mlx5_esw_chains_destroy(struct mlx5_eswitch *esw) +{ + mlx5_esw_chains_close(esw); + mlx5_esw_chains_cleanup(esw); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h new file mode 100644 index 000000000000..2e13097fe348 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies. */ + +#ifndef __ML5_ESW_CHAINS_H__ +#define __ML5_ESW_CHAINS_H__ + +bool +mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw); +u32 +mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw); +u32 +mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw); +u32 +mlx5_esw_chains_get_ft_chain(struct mlx5_eswitch *esw); + +struct mlx5_flow_table * +mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio, + u32 level); +void +mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio, + u32 level); + +struct mlx5_flow_table * +mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw); + +int mlx5_esw_chains_create(struct mlx5_eswitch *esw); +void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw); + +#endif /* __ML5_ESW_CHAINS_H__ */ + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c index 366bda1bb1c3..dc08ed9339ab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c @@ -50,8 +50,8 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev, struct mlx5_flow_act *flow_act) { static const struct mlx5_flow_spec spec = {}; + struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_namespace *root_ns; - int prio, flags; int err; root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); @@ -63,10 +63,11 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev, /* As this is the terminating action then the termination table is the * same prio as the slow path */ - prio = FDB_SLOW_PATH; - flags = MLX5_FLOW_TABLE_TERMINATION; - tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, prio, 1, 1, - 0, flags); + ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION; + ft_attr.prio = FDB_SLOW_PATH; + ft_attr.max_fte = 1; + ft_attr.autogroup.max_num_groups = 1; + tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr); if (IS_ERR(tt->termtbl)) { esw_warn(dev, "Failed to create termination table\n"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 3c816e81f8d9..b25465d9e030 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -432,6 +432,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(set_fte_in, in, table_type, ft->type); MLX5_SET(set_fte_in, in, table_id, ft->id); MLX5_SET(set_fte_in, in, flow_index, fte->index); + MLX5_SET(set_fte_in, in, ignore_flow_level, + !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL)); + if (ft->vport) { MLX5_SET(set_fte_in, in, vport_number, ft->vport); MLX5_SET(set_fte_in, in, other_vport, 1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 8c5df6c7d7b6..c7a16ae05fa8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -579,7 +579,9 @@ static void del_sw_flow_group(struct fs_node *node) rhashtable_destroy(&fg->ftes_hash); ida_destroy(&fg->fte_allocator); - if (ft->autogroup.active && fg->max_ftes == ft->autogroup.group_size) + if (ft->autogroup.active && + fg->max_ftes == ft->autogroup.group_size && + fg->start_index < ft->autogroup.max_fte) ft->autogroup.num_groups--; err = rhltable_remove(&ft->fgs_hash, &fg->hash, @@ -1006,7 +1008,8 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa u16 vport) { struct mlx5_flow_root_namespace *root = find_root(&ns->node); - struct mlx5_flow_table *next_ft = NULL; + bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED; + struct mlx5_flow_table *next_ft; struct fs_prio *fs_prio = NULL; struct mlx5_flow_table *ft; int log_table_sz; @@ -1023,14 +1026,21 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa err = -EINVAL; goto unlock_root; } - if (ft_attr->level >= fs_prio->num_levels) { - err = -ENOSPC; - goto unlock_root; + if (!unmanaged) { + /* The level is related to the + * priority level range. + */ + if (ft_attr->level >= fs_prio->num_levels) { + err = -ENOSPC; + goto unlock_root; + } + + ft_attr->level += fs_prio->start_level; } + /* The level is related to the * priority level range. */ - ft_attr->level += fs_prio->start_level; ft = alloc_flow_table(ft_attr->level, vport, ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0, @@ -1043,19 +1053,27 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table); log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; - next_ft = find_next_chained_ft(fs_prio); + next_ft = unmanaged ? ft_attr->next_ft : + find_next_chained_ft(fs_prio); ft->def_miss_action = ns->def_miss_action; err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft); if (err) goto free_ft; - err = connect_flow_table(root->dev, ft, fs_prio); - if (err) - goto destroy_ft; + if (!unmanaged) { + err = connect_flow_table(root->dev, ft, fs_prio); + if (err) + goto destroy_ft; + } + ft->node.active = true; down_write_ref_node(&fs_prio->node, false); - tree_add_node(&ft->node, &fs_prio->node); - list_add_flow_table(ft, fs_prio); + if (!unmanaged) { + tree_add_node(&ft->node, &fs_prio->node); + list_add_flow_table(ft, fs_prio); + } else { + ft->node.root = fs_prio->node.root; + } fs_prio->num_ft++; up_write_ref_node(&fs_prio->node, false); mutex_unlock(&root->chain_lock); @@ -1103,31 +1121,27 @@ EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table); struct mlx5_flow_table* mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, - int prio, - int num_flow_table_entries, - int max_num_groups, - u32 level, - u32 flags) + struct mlx5_flow_table_attr *ft_attr) { - struct mlx5_flow_table_attr ft_attr = {}; + int num_reserved_entries = ft_attr->autogroup.num_reserved_entries; + int autogroups_max_fte = ft_attr->max_fte - num_reserved_entries; + int max_num_groups = ft_attr->autogroup.max_num_groups; struct mlx5_flow_table *ft; - if (max_num_groups > num_flow_table_entries) + if (max_num_groups > autogroups_max_fte) + return ERR_PTR(-EINVAL); + if (num_reserved_entries > ft_attr->max_fte) return ERR_PTR(-EINVAL); - ft_attr.max_fte = num_flow_table_entries; - ft_attr.prio = prio; - ft_attr.level = level; - ft_attr.flags = flags; - - ft = mlx5_create_flow_table(ns, &ft_attr); + ft = mlx5_create_flow_table(ns, ft_attr); if (IS_ERR(ft)) return ft; ft->autogroup.active = true; ft->autogroup.required_groups = max_num_groups; + ft->autogroup.max_fte = autogroups_max_fte; /* We save place for flow groups in addition to max types */ - ft->autogroup.group_size = ft->max_fte / (max_num_groups + 1); + ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1); return ft; } @@ -1149,7 +1163,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, struct mlx5_flow_group *fg; int err; - if (ft->autogroup.active) + if (ft->autogroup.active && start_index < ft->autogroup.max_fte) return ERR_PTR(-EPERM); down_write_ref_node(&ft->node, false); @@ -1322,9 +1336,10 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft const struct mlx5_flow_spec *spec) { struct list_head *prev = &ft->node.children; - struct mlx5_flow_group *fg; + u32 max_fte = ft->autogroup.max_fte; unsigned int candidate_index = 0; unsigned int group_size = 0; + struct mlx5_flow_group *fg; if (!ft->autogroup.active) return ERR_PTR(-ENOENT); @@ -1332,7 +1347,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft if (ft->autogroup.num_groups < ft->autogroup.required_groups) group_size = ft->autogroup.group_size; - /* ft->max_fte == ft->autogroup.max_types */ + /* max_fte == ft->autogroup.max_types */ if (group_size == 0) group_size = 1; @@ -1345,7 +1360,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft prev = &fg->node.list; } - if (candidate_index + group_size > ft->max_fte) + if (candidate_index + group_size > max_fte) return ERR_PTR(-ENOSPC); fg = alloc_insert_flow_group(ft, @@ -1529,18 +1544,30 @@ static bool counter_is_valid(u32 action) } static bool dest_is_valid(struct mlx5_flow_destination *dest, - u32 action, + struct mlx5_flow_act *flow_act, struct mlx5_flow_table *ft) { + bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL; + u32 action = flow_act->action; + if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)) return counter_is_valid(action); if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) return true; + if (ignore_level) { + if (ft->type != FS_FT_FDB) + return false; + + if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE && + dest->ft->type != FS_FT_FDB) + return false; + } + if (!dest || ((dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) && - (dest->ft->level <= ft->level))) + (dest->ft->level <= ft->level && !ignore_level))) return false; return true; } @@ -1770,7 +1797,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, return ERR_PTR(-EINVAL); for (i = 0; i < dest_num; i++) { - if (!dest_is_valid(&dest[i], flow_act->action, ft)) + if (!dest_is_valid(&dest[i], flow_act, ft)) return ERR_PTR(-EINVAL); } nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT); @@ -2033,7 +2060,8 @@ int mlx5_destroy_flow_table(struct mlx5_flow_table *ft) int err = 0; mutex_lock(&root->chain_lock); - err = disconnect_flow_table(ft); + if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED)) + err = disconnect_flow_table(ft); if (err) { mutex_unlock(&root->chain_lock); return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index c2621b911563..be5f5e32c1e8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -164,6 +164,7 @@ struct mlx5_flow_table { unsigned int required_groups; unsigned int group_size; unsigned int num_groups; + unsigned int max_fte; } autogroup; /* Protect fwd_rules */ struct mutex lock; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index a19790dee7b2..d89ff1d09119 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -131,11 +131,11 @@ static int mlx5_get_pcam_reg(struct mlx5_core_dev *dev) MLX5_PCAM_REGS_5000_TO_507F); } -static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev) +static int mlx5_get_mcam_access_reg_group(struct mlx5_core_dev *dev, + enum mlx5_mcam_reg_groups group) { - return mlx5_query_mcam_reg(dev, dev->caps.mcam, - MLX5_MCAM_FEATURE_ENHANCED_FEATURES, - MLX5_MCAM_REGS_FIRST_128); + return mlx5_query_mcam_reg(dev, dev->caps.mcam[group], + MLX5_MCAM_FEATURE_ENHANCED_FEATURES, group); } static int mlx5_get_qcam_reg(struct mlx5_core_dev *dev) @@ -221,8 +221,11 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) if (MLX5_CAP_GEN(dev, pcam_reg)) mlx5_get_pcam_reg(dev); - if (MLX5_CAP_GEN(dev, mcam_reg)) - mlx5_get_mcam_reg(dev); + if (MLX5_CAP_GEN(dev, mcam_reg)) { + mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_FIRST_128); + mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9080_0x90FF); + mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9100_0x917F); + } if (MLX5_CAP_GEN(dev, qcam_reg)) mlx5_get_qcam_reg(dev); @@ -245,6 +248,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return err; } + if (MLX5_CAP_GEN_64(dev, general_obj_types) & + MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) { + err = mlx5_core_get_caps(dev, MLX5_CAP_VDPA_EMULATION); + if (err) + return err; + } + return 0; } |