diff options
Diffstat (limited to 'drivers/net')
61 files changed, 579 insertions, 333 deletions
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h index 0617d5ccd3ff..8c66d3bf61f0 100644 --- a/drivers/net/dsa/sja1105/sja1105.h +++ b/drivers/net/dsa/sja1105/sja1105.h @@ -266,6 +266,8 @@ struct sja1105_private { * the switch doesn't confuse them with one another. */ struct mutex mgmt_lock; + /* Serializes accesses to the FDB */ + struct mutex fdb_lock; /* PTP two-step TX timestamp ID, and its serialization lock */ spinlock_t ts_id_lock; u8 ts_id; diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c index 7729d3f8b7f5..984c0e604e8d 100644 --- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c +++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c @@ -1175,18 +1175,15 @@ const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = { static int sja1105_dynamic_config_poll_valid(struct sja1105_private *priv, - struct sja1105_dyn_cmd *cmd, - const struct sja1105_dynamic_table_ops *ops) + const struct sja1105_dynamic_table_ops *ops, + void *entry, bool check_valident, + bool check_errors) { u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {}; + struct sja1105_dyn_cmd cmd = {}; int rc; - /* We don't _need_ to read the full entry, just the command area which - * is a fixed SJA1105_SIZE_DYN_CMD. But our cmd_packing() API expects a - * buffer that contains the full entry too. Additionally, our API - * doesn't really know how many bytes into the buffer does the command - * area really begin. So just read back the whole entry. - */ + /* Read back the whole entry + command structure. */ rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf, ops->packed_size); if (rc) @@ -1195,11 +1192,25 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv, /* Unpack the command structure, and return it to the caller in case it * needs to perform further checks on it (VALIDENT). */ - memset(cmd, 0, sizeof(*cmd)); - ops->cmd_packing(packed_buf, cmd, UNPACK); + ops->cmd_packing(packed_buf, &cmd, UNPACK); /* Hardware hasn't cleared VALID => still working on it */ - return cmd->valid ? -EAGAIN : 0; + if (cmd.valid) + return -EAGAIN; + + if (check_valident && !cmd.valident && !(ops->access & OP_VALID_ANYWAY)) + return -ENOENT; + + if (check_errors && cmd.errors) + return -EINVAL; + + /* Don't dereference possibly NULL pointer - maybe caller + * only wanted to see whether the entry existed or not. + */ + if (entry) + ops->entry_packing(packed_buf, entry, UNPACK); + + return 0; } /* Poll the dynamic config entry's control area until the hardware has @@ -1208,16 +1219,19 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv, */ static int sja1105_dynamic_config_wait_complete(struct sja1105_private *priv, - struct sja1105_dyn_cmd *cmd, - const struct sja1105_dynamic_table_ops *ops) + const struct sja1105_dynamic_table_ops *ops, + void *entry, bool check_valident, + bool check_errors) { - int rc; + int err, rc; - return read_poll_timeout(sja1105_dynamic_config_poll_valid, - rc, rc != -EAGAIN, - SJA1105_DYNAMIC_CONFIG_SLEEP_US, - SJA1105_DYNAMIC_CONFIG_TIMEOUT_US, - false, priv, cmd, ops); + err = read_poll_timeout(sja1105_dynamic_config_poll_valid, + rc, rc != -EAGAIN, + SJA1105_DYNAMIC_CONFIG_SLEEP_US, + SJA1105_DYNAMIC_CONFIG_TIMEOUT_US, + false, priv, ops, entry, check_valident, + check_errors); + return err < 0 ? err : rc; } /* Provides read access to the settings through the dynamic interface @@ -1286,25 +1300,14 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv, mutex_lock(&priv->dynamic_config_lock); rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf, ops->packed_size); - if (rc < 0) { - mutex_unlock(&priv->dynamic_config_lock); - return rc; - } - - rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops); - mutex_unlock(&priv->dynamic_config_lock); if (rc < 0) - return rc; + goto out; - if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY)) - return -ENOENT; + rc = sja1105_dynamic_config_wait_complete(priv, ops, entry, true, false); +out: + mutex_unlock(&priv->dynamic_config_lock); - /* Don't dereference possibly NULL pointer - maybe caller - * only wanted to see whether the entry existed or not. - */ - if (entry) - ops->entry_packing(packed_buf, entry, UNPACK); - return 0; + return rc; } int sja1105_dynamic_config_write(struct sja1105_private *priv, @@ -1356,22 +1359,14 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv, mutex_lock(&priv->dynamic_config_lock); rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf, ops->packed_size); - if (rc < 0) { - mutex_unlock(&priv->dynamic_config_lock); - return rc; - } - - rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops); - mutex_unlock(&priv->dynamic_config_lock); if (rc < 0) - return rc; + goto out; - cmd = (struct sja1105_dyn_cmd) {0}; - ops->cmd_packing(packed_buf, &cmd, UNPACK); - if (cmd.errors) - return -EINVAL; + rc = sja1105_dynamic_config_wait_complete(priv, ops, NULL, false, true); +out: + mutex_unlock(&priv->dynamic_config_lock); - return 0; + return rc; } static u8 sja1105_crc8_add(u8 crc, u8 byte, u8 poly) diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index a23d980d28f5..1a367e64bc3b 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -1798,6 +1798,7 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port, struct dsa_db db) { struct sja1105_private *priv = ds->priv; + int rc; if (!vid) { switch (db.type) { @@ -1812,12 +1813,16 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port, } } - return priv->info->fdb_add_cmd(ds, port, addr, vid); + mutex_lock(&priv->fdb_lock); + rc = priv->info->fdb_add_cmd(ds, port, addr, vid); + mutex_unlock(&priv->fdb_lock); + + return rc; } -static int sja1105_fdb_del(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid, - struct dsa_db db) +static int __sja1105_fdb_del(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + struct dsa_db db) { struct sja1105_private *priv = ds->priv; @@ -1837,6 +1842,20 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port, return priv->info->fdb_del_cmd(ds, port, addr, vid); } +static int sja1105_fdb_del(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + struct sja1105_private *priv = ds->priv; + int rc; + + mutex_lock(&priv->fdb_lock); + rc = __sja1105_fdb_del(ds, port, addr, vid, db); + mutex_unlock(&priv->fdb_lock); + + return rc; +} + static int sja1105_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, void *data) { @@ -1868,13 +1887,14 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port, if (!(l2_lookup.destports & BIT(port))) continue; - /* We need to hide the FDB entry for unknown multicast */ - if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST && - l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST) - continue; - u64_to_ether_addr(l2_lookup.macaddr, macaddr); + /* Hardware FDB is shared for fdb and mdb, "bridge fdb show" + * only wants to see unicast + */ + if (is_multicast_ether_addr(macaddr)) + continue; + /* We need to hide the dsa_8021q VLANs from the user. */ if (vid_is_dsa_8021q(l2_lookup.vlanid)) l2_lookup.vlanid = 0; @@ -1898,6 +1918,8 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port) }; int i; + mutex_lock(&priv->fdb_lock); + for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { struct sja1105_l2_lookup_entry l2_lookup = {0}; u8 macaddr[ETH_ALEN]; @@ -1911,7 +1933,7 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port) if (rc) { dev_err(ds->dev, "Failed to read FDB: %pe\n", ERR_PTR(rc)); - return; + break; } if (!(l2_lookup.destports & BIT(port))) @@ -1923,14 +1945,16 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port) u64_to_ether_addr(l2_lookup.macaddr, macaddr); - rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db); + rc = __sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db); if (rc) { dev_err(ds->dev, "Failed to delete FDB entry %pM vid %lld: %pe\n", macaddr, l2_lookup.vlanid, ERR_PTR(rc)); - return; + break; } } + + mutex_unlock(&priv->fdb_lock); } static int sja1105_mdb_add(struct dsa_switch *ds, int port, @@ -2273,6 +2297,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv, int rc, i; s64 now; + mutex_lock(&priv->fdb_lock); mutex_lock(&priv->mgmt_lock); mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; @@ -2385,6 +2410,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv, goto out; out: mutex_unlock(&priv->mgmt_lock); + mutex_unlock(&priv->fdb_lock); return rc; } @@ -2954,7 +2980,9 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to, { struct sja1105_l2_lookup_entry *l2_lookup; struct sja1105_table *table; - int match; + int match, rc; + + mutex_lock(&priv->fdb_lock); table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; l2_lookup = table->entries; @@ -2967,7 +2995,8 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to, if (match == table->entry_count) { NL_SET_ERR_MSG_MOD(extack, "Could not find FDB entry for unknown multicast"); - return -ENOSPC; + rc = -ENOSPC; + goto out; } if (flags.val & BR_MCAST_FLOOD) @@ -2975,10 +3004,13 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to, else l2_lookup[match].destports &= ~BIT(to); - return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, - l2_lookup[match].index, - &l2_lookup[match], - true); + rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, + l2_lookup[match].index, + &l2_lookup[match], true); +out: + mutex_unlock(&priv->fdb_lock); + + return rc; } static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port, @@ -3348,6 +3380,7 @@ static int sja1105_probe(struct spi_device *spi) mutex_init(&priv->ptp_data.lock); mutex_init(&priv->dynamic_config_lock); mutex_init(&priv->mgmt_lock); + mutex_init(&priv->fdb_lock); spin_lock_init(&priv->ts_id_lock); rc = sja1105_parse_dt(priv); diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c index 1c009b485188..ca66b747b7c5 100644 --- a/drivers/net/ethernet/adi/adin1110.c +++ b/drivers/net/ethernet/adi/adin1110.c @@ -1385,7 +1385,7 @@ static int adin1110_fdb_add(struct adin1110_port_priv *port_priv, return -ENOMEM; other_port = priv->ports[!port_priv->nr]; - port_rules = adin1110_port_rules(port_priv, false, true); + port_rules = adin1110_port_rules(other_port, false, true); eth_broadcast_addr(mask); return adin1110_write_mac_address(other_port, mac_nr, (u8 *)fdb->addr, diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index ad32ca81f7ef..f955bde10cf9 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1833,6 +1833,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, return work_done; error: + if (xdp_flags & ENA_XDP_REDIRECT) + xdp_do_flush(); + adapter = netdev_priv(rx_ring->netdev); if (rc == -ENOSPC) { diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c index d63d321f3e7b..41a6098eb0c2 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c @@ -528,13 +528,16 @@ void bcmasp_netfilt_suspend(struct bcmasp_intf *intf) ASP_RX_FILTER_BLK_CTRL); } -void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs, - u32 *rule_cnt) +int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs, + u32 *rule_cnt) { struct bcmasp_priv *priv = intf->parent; int j = 0, i; for (i = 0; i < NUM_NET_FILTERS; i++) { + if (j == *rule_cnt) + return -EMSGSIZE; + if (!priv->net_filters[i].claimed || priv->net_filters[i].port != intf->port) continue; @@ -548,6 +551,8 @@ void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs, } *rule_cnt = j; + + return 0; } int bcmasp_netfilt_get_active(struct bcmasp_intf *intf) @@ -1300,6 +1305,7 @@ static int bcmasp_probe(struct platform_device *pdev) if (!intf) { dev_err(dev, "Cannot create eth interface %d\n", i); bcmasp_remove_intfs(priv); + of_node_put(intf_node); goto of_put_exit; } list_add_tail(&intf->list, &priv->intfs); diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h index 5b512f7f5e94..ec90add6b03e 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h @@ -577,8 +577,8 @@ void bcmasp_netfilt_release(struct bcmasp_intf *intf, int bcmasp_netfilt_get_active(struct bcmasp_intf *intf); -void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs, - u32 *rule_cnt); +int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs, + u32 *rule_cnt); void bcmasp_netfilt_suspend(struct bcmasp_intf *intf); diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c index c4f1604d5ab3..ce6a3d56fb23 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c @@ -335,7 +335,7 @@ static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, err = bcmasp_flow_get(intf, cmd); break; case ETHTOOL_GRXCLSRLALL: - bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt); + err = bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt); cmd->data = NUM_NET_FILTERS; break; default: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 5cc0dbe12132..7551aa8068f8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2614,6 +2614,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) struct rx_cmp_ext *rxcmp1; u32 cp_cons, tmp_raw_cons; u32 raw_cons = cpr->cp_raw_cons; + bool flush_xdp = false; u32 rx_pkts = 0; u8 event = 0; @@ -2648,6 +2649,8 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) rx_pkts++; else if (rc == -EBUSY) /* partial completion */ break; + if (event & BNXT_REDIRECT_EVENT) + flush_xdp = true; } else if (unlikely(TX_CMP_TYPE(txcmp) == CMPL_BASE_TYPE_HWRM_DONE)) { bnxt_hwrm_handler(bp, txcmp); @@ -2667,6 +2670,8 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) if (event & BNXT_AGG_EVENT) bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + if (flush_xdp) + xdp_do_flush(); if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { napi_complete_done(napi, rx_pkts); diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 31f664ee4d77..b940dcd3ace6 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -756,8 +756,6 @@ static void macb_mac_link_up(struct phylink_config *config, if (rx_pause) ctrl |= MACB_BIT(PAE); - macb_set_tx_clk(bp, speed); - /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down * cleared the pipeline and control registers. */ @@ -777,6 +775,9 @@ static void macb_mac_link_up(struct phylink_config *config, spin_unlock_irqrestore(&bp->lock, flags); + if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) + macb_set_tx_clk(bp, speed); + /* Enable Rx and Tx; Enable PTP unicast */ ctrl = macb_readl(bp, NCR); if (gem_has_ptp(bp)) diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c index 716815dad7d2..65ec1abc9442 100644 --- a/drivers/net/ethernet/engleder/tsnep_ethtool.c +++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c @@ -300,10 +300,8 @@ static void tsnep_ethtool_get_channels(struct net_device *netdev, { struct tsnep_adapter *adapter = netdev_priv(netdev); - ch->max_rx = adapter->num_rx_queues; - ch->max_tx = adapter->num_tx_queues; - ch->rx_count = adapter->num_rx_queues; - ch->tx_count = adapter->num_tx_queues; + ch->max_combined = adapter->num_queues; + ch->combined_count = adapter->num_queues; } static int tsnep_ethtool_get_ts_info(struct net_device *netdev, diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index f61bd89734c5..8b992dc9bb52 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -87,8 +87,11 @@ static irqreturn_t tsnep_irq(int irq, void *arg) /* handle TX/RX queue 0 interrupt */ if ((active & adapter->queue[0].irq_mask) != 0) { - tsnep_disable_irq(adapter, adapter->queue[0].irq_mask); - napi_schedule(&adapter->queue[0].napi); + if (napi_schedule_prep(&adapter->queue[0].napi)) { + tsnep_disable_irq(adapter, adapter->queue[0].irq_mask); + /* schedule after masking to avoid races */ + __napi_schedule(&adapter->queue[0].napi); + } } return IRQ_HANDLED; @@ -99,8 +102,11 @@ static irqreturn_t tsnep_irq_txrx(int irq, void *arg) struct tsnep_queue *queue = arg; /* handle TX/RX queue interrupt */ - tsnep_disable_irq(queue->adapter, queue->irq_mask); - napi_schedule(&queue->napi); + if (napi_schedule_prep(&queue->napi)) { + tsnep_disable_irq(queue->adapter, queue->irq_mask); + /* schedule after masking to avoid races */ + __napi_schedule(&queue->napi); + } return IRQ_HANDLED; } @@ -1728,6 +1734,10 @@ static int tsnep_poll(struct napi_struct *napi, int budget) if (queue->tx) complete = tsnep_tx_poll(queue->tx, budget); + /* handle case where we are called by netpoll with a budget of 0 */ + if (unlikely(budget <= 0)) + return budget; + if (queue->rx) { done = queue->rx->xsk_pool ? tsnep_rx_poll_zc(queue->rx, napi, budget) : diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index b4895c7b3efd..cf50368441b7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -3353,6 +3353,15 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_HW_TC); netdev->hw_enc_features |= netdev->vlan_features | NETIF_F_TSO_MANGLEID; + + /* The device_version V3 hardware can't offload the checksum for IP in + * GRE packets, but can do it for NvGRE. So default to disable the + * checksum and GSO offload for GRE. + */ + if (ae_dev->dev_version > HNAE3_DEVICE_VERSION_V2) { + netdev->features &= ~NETIF_F_GSO_GRE; + netdev->features &= ~NETIF_F_GSO_GRE_CSUM; + } } static int hns3_alloc_buffer(struct hns3_enet_ring *ring, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 8ca368424436..c42574e29747 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3564,9 +3564,14 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, u32 regclr) { +#define HCLGE_IMP_RESET_DELAY 5 + switch (event_type) { case HCLGE_VECTOR0_EVENT_PTP: case HCLGE_VECTOR0_EVENT_RST: + if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B)) + mdelay(HCLGE_IMP_RESET_DELAY); + hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); break; case HCLGE_VECTOR0_EVENT_MBX: @@ -7348,6 +7353,12 @@ static int hclge_del_cls_flower(struct hnae3_handle *handle, ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location, NULL, false); if (ret) { + /* if tcam config fail, set rule state to TO_DEL, + * so the rule will be deleted when periodic + * task being scheduled. + */ + hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL); + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); spin_unlock_bh(&hdev->fd_rule_lock); return ret; } @@ -8824,7 +8835,7 @@ static void hclge_update_overflow_flags(struct hclge_vport *vport, if (mac_type == HCLGE_MAC_ADDR_UC) { if (is_all_added) vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE; - else + else if (hclge_is_umv_space_full(vport, true)) vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE; } else { if (is_all_added) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 7a2f9233d695..a4d68fb216fb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1855,7 +1855,8 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) unsigned long delta = round_jiffies_relative(HZ); struct hnae3_handle *handle = &hdev->nic; - if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) + if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state) || + test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) return; if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c index 9406237c461e..f81a43d2cdfc 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c @@ -456,9 +456,6 @@ int hinic_set_vlan_fliter(struct hinic_dev *nic_dev, u32 en) u16 out_size = sizeof(vlan_filter); int err; - if (!hwdev) - return -EINVAL; - vlan_filter.func_idx = HINIC_HWIF_FUNC_IDX(hwif); vlan_filter.enable = en; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 8ea1a238dcef..d3d6415553ed 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -4475,9 +4475,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, goto error_pvid; i40e_vlan_stripping_enable(vsi); - i40e_vc_reset_vf(vf, true); - /* During reset the VF got a new VSI, so refresh a pointer. */ - vsi = pf->vsi[vf->lan_vsi_idx]; + /* Locked once because multiple functions below iterate list */ spin_lock_bh(&vsi->mac_filter_hash_lock); @@ -4563,6 +4561,10 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, */ vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); + i40e_vc_reset_vf(vf, true); + /* During reset the VF got a new VSI, so refresh a pointer. */ + vsi = pf->vsi[vf->lan_vsi_idx]; + ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni); if (ret) { dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n"); diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 85fba85fbb23..e110ba346185 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -521,7 +521,7 @@ void iavf_down(struct iavf_adapter *adapter); int iavf_process_config(struct iavf_adapter *adapter); int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter); void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags); -void iavf_schedule_request_stats(struct iavf_adapter *adapter); +void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags); void iavf_schedule_finish_config(struct iavf_adapter *adapter); void iavf_reset(struct iavf_adapter *adapter); void iavf_set_ethtool_ops(struct net_device *netdev); diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index a34303ad057d..90397293525f 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -362,7 +362,7 @@ static void iavf_get_ethtool_stats(struct net_device *netdev, unsigned int i; /* Explicitly request stats refresh */ - iavf_schedule_request_stats(adapter); + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_REQUEST_STATS); iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 7b300c86ceda..6a2e6d64bc3a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -314,15 +314,13 @@ void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags) } /** - * iavf_schedule_request_stats - Set the flags and schedule statistics request + * iavf_schedule_aq_request - Set the flags and schedule aq request * @adapter: board private structure - * - * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly - * request and refresh ethtool stats + * @flags: requested aq flags **/ -void iavf_schedule_request_stats(struct iavf_adapter *adapter) +void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags) { - adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS; + adapter->aq_required |= flags; mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); } @@ -823,7 +821,7 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, list_add_tail(&f->list, &adapter->vlan_filter_list); f->state = IAVF_VLAN_ADD; adapter->num_vlan_filters++; - adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER); } clearout: @@ -845,7 +843,7 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan) f = iavf_find_vlan(adapter, vlan); if (f) { f->state = IAVF_VLAN_REMOVE; - adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER); } spin_unlock_bh(&adapter->mac_vlan_list_lock); @@ -1421,7 +1419,8 @@ void iavf_down(struct iavf_adapter *adapter) iavf_clear_fdir_filters(adapter); iavf_clear_adv_rss_conf(adapter); - if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) { + if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) && + !(test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))) { /* cancel any current operation */ adapter->current_op = VIRTCHNL_OP_UNKNOWN; /* Schedule operations to close down the HW. Don't wait diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 13ba9c74bd84..76b34cee1da3 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3827,8 +3827,11 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs, bool reinit) } /* only call pci_enable_sriov() if no VFs are allocated already */ - if (!old_vfs) + if (!old_vfs) { err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); + if (err) + goto err_out; + } goto out; diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 93bce729be76..7ab6dd58e400 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -868,6 +868,18 @@ static void igc_ethtool_get_stats(struct net_device *netdev, spin_unlock(&adapter->stats64_lock); } +static int igc_ethtool_get_previous_rx_coalesce(struct igc_adapter *adapter) +{ + return (adapter->rx_itr_setting <= 3) ? + adapter->rx_itr_setting : adapter->rx_itr_setting >> 2; +} + +static int igc_ethtool_get_previous_tx_coalesce(struct igc_adapter *adapter) +{ + return (adapter->tx_itr_setting <= 3) ? + adapter->tx_itr_setting : adapter->tx_itr_setting >> 2; +} + static int igc_ethtool_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, @@ -875,17 +887,8 @@ static int igc_ethtool_get_coalesce(struct net_device *netdev, { struct igc_adapter *adapter = netdev_priv(netdev); - if (adapter->rx_itr_setting <= 3) - ec->rx_coalesce_usecs = adapter->rx_itr_setting; - else - ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; - - if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) { - if (adapter->tx_itr_setting <= 3) - ec->tx_coalesce_usecs = adapter->tx_itr_setting; - else - ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; - } + ec->rx_coalesce_usecs = igc_ethtool_get_previous_rx_coalesce(adapter); + ec->tx_coalesce_usecs = igc_ethtool_get_previous_tx_coalesce(adapter); return 0; } @@ -910,8 +913,12 @@ static int igc_ethtool_set_coalesce(struct net_device *netdev, ec->tx_coalesce_usecs == 2) return -EINVAL; - if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) + if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && + ec->tx_coalesce_usecs != igc_ethtool_get_previous_tx_coalesce(adapter)) { + NL_SET_ERR_MSG_MOD(extack, + "Queue Pair mode enabled, both Rx and Tx coalescing controlled by rx-usecs"); return -EINVAL; + } /* If ITR is disabled, disable DMAC */ if (ec->rx_coalesce_usecs == 0) { diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 293b45717683..98de34d0ce07 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -6491,7 +6491,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames, struct igc_ring *ring; int i, drops; - if (unlikely(test_bit(__IGC_DOWN, &adapter->state))) + if (unlikely(!netif_carrier_ok(dev))) return -ENETDOWN; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 0310af851086..9339edbd9082 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -979,6 +979,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED; u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED; u32 tsync_rx_mtrl = PTP_EV_PORT << 16; + u32 aflags = adapter->flags; bool is_l2 = false; u32 regval; @@ -996,20 +997,20 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, case HWTSTAMP_FILTER_NONE: tsync_rx_ctl = 0; tsync_rx_mtrl = 0; - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + aflags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG; - adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG; - adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: @@ -1023,8 +1024,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; is_l2 = true; config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; - adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_NTP_ALL: @@ -1035,7 +1036,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, if (hw->mac.type >= ixgbe_mac_X550) { tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_ALL; config->rx_filter = HWTSTAMP_FILTER_ALL; - adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; + aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; break; } fallthrough; @@ -1046,8 +1047,6 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, * Delay_Req messages and hardware does not support * timestamping all packets => return error */ - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); config->rx_filter = HWTSTAMP_FILTER_NONE; return -ERANGE; } @@ -1079,8 +1078,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, IXGBE_TSYNCRXCTL_TYPE_ALL | IXGBE_TSYNCRXCTL_TSIP_UT_EN; config->rx_filter = HWTSTAMP_FILTER_ALL; - adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; - adapter->flags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER; + aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; + aflags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER; is_l2 = true; break; default: @@ -1113,6 +1112,9 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, IXGBE_WRITE_FLUSH(hw); + /* configure adapter flags only when HW is actually configured */ + adapter->flags = aflags; + /* clear TX/RX time stamp registers, just to be sure */ ixgbe_ptp_clear_tx_timestamp(adapter); IXGBE_READ_REG(hw, IXGBE_RXSTMPH); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index eb74ccddb440..21c3f9b015c8 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -5586,6 +5586,11 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, break; case ETHTOOL_GRXCLSRLALL: for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) { + if (loc == info->rule_cnt) { + ret = -EMSGSIZE; + break; + } + if (port->rfs_rules[i]) rules[loc++] = i; } diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 4424de2ffd70..dbc518ff8276 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -734,13 +734,13 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb, dma_map_sg_err: if (si > 0) { dma_unmap_single(iq->dev, sglist[0].dma_ptr[0], - sglist[0].len[0], DMA_TO_DEVICE); - sglist[0].len[0] = 0; + sglist[0].len[3], DMA_TO_DEVICE); + sglist[0].len[3] = 0; } while (si > 1) { dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3], - sglist[si >> 2].len[si & 3], DMA_TO_DEVICE); - sglist[si >> 2].len[si & 3] = 0; + sglist[si >> 2].len[3 - (si & 3)], DMA_TO_DEVICE); + sglist[si >> 2].len[3 - (si & 3)] = 0; si--; } tx_buffer->gather = 0; diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c index 5a520d37bea0..d0adb82d65c3 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c @@ -69,12 +69,12 @@ int octep_iq_process_completions(struct octep_iq *iq, u16 budget) compl_sg++; dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0], - tx_buffer->sglist[0].len[0], DMA_TO_DEVICE); + tx_buffer->sglist[0].len[3], DMA_TO_DEVICE); i = 1; /* entry 0 is main skb, unmapped above */ while (frags--) { dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3], - tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE); + tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE); i++; } @@ -131,13 +131,13 @@ static void octep_iq_free_pending(struct octep_iq *iq) dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0], - tx_buffer->sglist[0].len[0], + tx_buffer->sglist[0].len[3], DMA_TO_DEVICE); i = 1; /* entry 0 is main skb, unmapped above */ while (frags--) { dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3], - tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE); + tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE); i++; } diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h index 2ef57980eb47..21e75ff9f5e7 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h @@ -17,7 +17,21 @@ #define TX_BUFTYPE_NET_SG 2 #define NUM_TX_BUFTYPES 3 -/* Hardware format for Scatter/Gather list */ +/* Hardware format for Scatter/Gather list + * + * 63 48|47 32|31 16|15 0 + * ----------------------------------------- + * | Len 0 | Len 1 | Len 2 | Len 3 | + * ----------------------------------------- + * | Ptr 0 | + * ----------------------------------------- + * | Ptr 1 | + * ----------------------------------------- + * | Ptr 2 | + * ----------------------------------------- + * | Ptr 3 | + * ----------------------------------------- + */ struct octep_tx_sglist_desc { u16 len[4]; dma_addr_t dma_ptr[4]; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index 826f691de259..a4a258da8dd5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -107,12 +107,13 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) } #define NPA_MAX_BURST 16 -void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) +int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) { struct otx2_nic *pfvf = dev; + int cnt = cq->pool_ptrs; u64 ptrs[NPA_MAX_BURST]; - int num_ptrs = 1; dma_addr_t bufptr; + int num_ptrs = 1; /* Refill pool with new buffers */ while (cq->pool_ptrs) { @@ -131,6 +132,7 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) num_ptrs = 1; } } + return cnt - cq->pool_ptrs; } void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h index 8ae96815865e..c1861f7de254 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h @@ -24,7 +24,7 @@ static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu) return weight; } -void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); +int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); int cn10k_lmtst_init(struct otx2_nic *pfvf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 8511906cb4e2..997fedac3a98 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -574,20 +574,8 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, dma_addr_t *dma) { - if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) { - struct refill_work *work; - struct delayed_work *dwork; - - work = &pfvf->refill_wrk[cq->cq_idx]; - dwork = &work->pool_refill_work; - /* Schedule a task if no other task is running */ - if (!cq->refill_task_sched) { - cq->refill_task_sched = true; - schedule_delayed_work(dwork, - msecs_to_jiffies(100)); - } + if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) return -ENOMEM; - } return 0; } @@ -1082,39 +1070,20 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) static void otx2_pool_refill_task(struct work_struct *work) { struct otx2_cq_queue *cq; - struct otx2_pool *rbpool; struct refill_work *wrk; - int qidx, free_ptrs = 0; struct otx2_nic *pfvf; - dma_addr_t bufptr; + int qidx; wrk = container_of(work, struct refill_work, pool_refill_work.work); pfvf = wrk->pf; qidx = wrk - pfvf->refill_wrk; cq = &pfvf->qset.cq[qidx]; - rbpool = cq->rbpool; - free_ptrs = cq->pool_ptrs; - while (cq->pool_ptrs) { - if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) { - /* Schedule a WQ if we fails to free atleast half of the - * pointers else enable napi for this RQ. - */ - if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) { - struct delayed_work *dwork; - - dwork = &wrk->pool_refill_work; - schedule_delayed_work(dwork, - msecs_to_jiffies(100)); - } else { - cq->refill_task_sched = false; - } - return; - } - pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM); - cq->pool_ptrs--; - } cq->refill_task_sched = false; + + local_bh_disable(); + napi_schedule(wrk->napi); + local_bh_enable(); } int otx2_config_nix_queues(struct otx2_nic *pfvf) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 4c6032ee7800..c04a8ee53a82 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -302,6 +302,7 @@ struct flr_work { struct refill_work { struct delayed_work pool_refill_work; struct otx2_nic *pf; + struct napi_struct *napi; }; /* PTPv2 originTimestamp structure */ @@ -370,7 +371,7 @@ struct dev_hw_ops { int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura); void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq, int size, int qidx); - void (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); + int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); void (*aura_freeptr)(void *dev, int aura, u64 buf); }; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 70b9065f7d10..6daf4d58c25d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1943,6 +1943,10 @@ int otx2_stop(struct net_device *netdev) netif_tx_disable(netdev); + for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++) + cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work); + devm_kfree(pf->dev, pf->refill_wrk); + otx2_free_hw_resources(pf); otx2_free_cints(pf, pf->hw.cint_cnt); otx2_disable_napi(pf); @@ -1950,9 +1954,6 @@ int otx2_stop(struct net_device *netdev) for (qidx = 0; qidx < netdev->num_tx_queues; qidx++) netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx)); - for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++) - cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work); - devm_kfree(pf->dev, pf->refill_wrk); kfree(qset->sq); kfree(qset->cq); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index e369baf11530..53b2a4ef5298 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -29,7 +29,8 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, struct bpf_prog *prog, struct nix_cqe_rx_s *cqe, - struct otx2_cq_queue *cq); + struct otx2_cq_queue *cq, + bool *need_xdp_flush); static int otx2_nix_cq_op_status(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) @@ -337,7 +338,7 @@ static bool otx2_check_rcv_errors(struct otx2_nic *pfvf, static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, struct napi_struct *napi, struct otx2_cq_queue *cq, - struct nix_cqe_rx_s *cqe) + struct nix_cqe_rx_s *cqe, bool *need_xdp_flush) { struct nix_rx_parse_s *parse = &cqe->parse; struct nix_rx_sg_s *sg = &cqe->sg; @@ -353,7 +354,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, } if (pfvf->xdp_prog) - if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq)) + if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush)) return; skb = napi_get_frags(napi); @@ -388,6 +389,7 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf, struct napi_struct *napi, struct otx2_cq_queue *cq, int budget) { + bool need_xdp_flush = false; struct nix_cqe_rx_s *cqe; int processed_cqe = 0; @@ -409,13 +411,15 @@ process_cqe: cq->cq_head++; cq->cq_head &= (cq->cqe_cnt - 1); - otx2_rcv_pkt_handler(pfvf, napi, cq, cqe); + otx2_rcv_pkt_handler(pfvf, napi, cq, cqe, &need_xdp_flush); cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; cqe->sg.seg_addr = 0x00; processed_cqe++; cq->pend_cqe--; } + if (need_xdp_flush) + xdp_do_flush(); /* Free CQEs to HW */ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, @@ -424,9 +428,10 @@ process_cqe: return processed_cqe; } -void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) +int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) { struct otx2_nic *pfvf = dev; + int cnt = cq->pool_ptrs; dma_addr_t bufptr; while (cq->pool_ptrs) { @@ -435,6 +440,8 @@ void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM); cq->pool_ptrs--; } + + return cnt - cq->pool_ptrs; } static int otx2_tx_napi_handler(struct otx2_nic *pfvf, @@ -521,6 +528,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) struct otx2_cq_queue *cq; struct otx2_qset *qset; struct otx2_nic *pfvf; + int filled_cnt = -1; cq_poll = container_of(napi, struct otx2_cq_poll, napi); pfvf = (struct otx2_nic *)cq_poll->dev; @@ -541,7 +549,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) } if (rx_cq && rx_cq->pool_ptrs) - pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq); + filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq); /* Clear the IRQ */ otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0)); @@ -561,9 +569,25 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) otx2_config_irq_coalescing(pfvf, i); } - /* Re-enable interrupts */ - otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), - BIT_ULL(0)); + if (unlikely(!filled_cnt)) { + struct refill_work *work; + struct delayed_work *dwork; + + work = &pfvf->refill_wrk[cq->cq_idx]; + dwork = &work->pool_refill_work; + /* Schedule a task if no other task is running */ + if (!cq->refill_task_sched) { + work->napi = napi; + cq->refill_task_sched = true; + schedule_delayed_work(dwork, + msecs_to_jiffies(100)); + } + } else { + /* Re-enable interrupts */ + otx2_write64(pfvf, + NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), + BIT_ULL(0)); + } } return workdone; } @@ -1334,7 +1358,8 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx) static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, struct bpf_prog *prog, struct nix_cqe_rx_s *cqe, - struct otx2_cq_queue *cq) + struct otx2_cq_queue *cq, + bool *need_xdp_flush) { unsigned char *hard_start, *data; int qidx = cq->cq_idx; @@ -1371,8 +1396,10 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE); - if (!err) + if (!err) { + *need_xdp_flush = true; return true; + } put_page(page); break; default: diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index 9e3bfbe5c480..a82ffca8ce1b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -170,6 +170,6 @@ void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); -void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); -void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); +int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); +int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); #endif /* OTX2_TXRX_H */ diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 6ad42e3b488f..3cffd1bd3067 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -2005,11 +2005,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, u8 *data, *new_data; struct mtk_rx_dma_v2 *rxd, trxd; int done = 0, bytes = 0; + dma_addr_t dma_addr = DMA_MAPPING_ERROR; while (done < budget) { unsigned int pktlen, *rxdcsum; struct net_device *netdev; - dma_addr_t dma_addr; u32 hash, reason; int mac = 0; @@ -2186,7 +2186,8 @@ release_desc: else rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size); - if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) + if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) && + likely(dma_addr != DMA_MAPPING_ERROR)) rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr); ring->calc_idx = idx; @@ -2994,6 +2995,9 @@ static int mtk_hwlro_get_fdir_all(struct net_device *dev, int i; for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + if (mac->hwlro_ip[i]) { rule_locs[cnt] = i; cnt++; diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index a70a5417c173..a4efbeb16208 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -214,9 +214,11 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe, dsa_port = mtk_flow_get_dsa_port(&dev); if (dev == eth->netdev[0]) - pse_port = 1; + pse_port = PSE_GDM1_PORT; else if (dev == eth->netdev[1]) - pse_port = 2; + pse_port = PSE_GDM2_PORT; + else if (dev == eth->netdev[2]) + pse_port = PSE_GDM3_PORT; else return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.c b/drivers/net/ethernet/microchip/vcap/vcap_api.c index 300fe1a93dce..ef980e4e5bc2 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api.c +++ b/drivers/net/ethernet/microchip/vcap/vcap_api.c @@ -1021,18 +1021,32 @@ static struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri, list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list) { newckf = kmemdup(ckf, sizeof(*newckf), GFP_KERNEL); if (!newckf) - return ERR_PTR(-ENOMEM); + goto err; list_add_tail(&newckf->ctrl.list, &duprule->data.keyfields); } list_for_each_entry(caf, &ri->data.actionfields, ctrl.list) { newcaf = kmemdup(caf, sizeof(*newcaf), GFP_KERNEL); if (!newcaf) - return ERR_PTR(-ENOMEM); + goto err; list_add_tail(&newcaf->ctrl.list, &duprule->data.actionfields); } return duprule; + +err: + list_for_each_entry_safe(ckf, newckf, &duprule->data.keyfields, ctrl.list) { + list_del(&ckf->ctrl.list); + kfree(ckf); + } + + list_for_each_entry_safe(caf, newcaf, &duprule->data.actionfields, ctrl.list) { + list_del(&caf->ctrl.list); + kfree(caf); + } + + kfree(duprule); + return ERR_PTR(-ENOMEM); } static void vcap_apply_width(u8 *dst, int width, int bytes) diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c index c07f25e791c7..fe4e166de8a0 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c +++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c @@ -243,10 +243,9 @@ static void vcap_test_api_init(struct vcap_admin *admin) } /* Helper function to create a rule of a specific size */ -static struct vcap_rule * -test_vcap_xn_rule_creator(struct kunit *test, int cid, enum vcap_user user, - u16 priority, - int id, int size, int expected_addr) +static void test_vcap_xn_rule_creator(struct kunit *test, int cid, + enum vcap_user user, u16 priority, + int id, int size, int expected_addr) { struct vcap_rule *rule; struct vcap_rule_internal *ri; @@ -311,7 +310,7 @@ test_vcap_xn_rule_creator(struct kunit *test, int cid, enum vcap_user user, ret = vcap_add_rule(rule); KUNIT_EXPECT_EQ(test, 0, ret); KUNIT_EXPECT_EQ(test, expected_addr, ri->addr); - return rule; + vcap_free_rule(rule); } /* Prepare testing rule deletion */ @@ -995,6 +994,16 @@ static void vcap_api_encode_rule_actionset_test(struct kunit *test) KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[11]); } +static void vcap_free_ckf(struct vcap_rule *rule) +{ + struct vcap_client_keyfield *ckf, *next_ckf; + + list_for_each_entry_safe(ckf, next_ckf, &rule->keyfields, ctrl.list) { + list_del(&ckf->ctrl.list); + kfree(ckf); + } +} + static void vcap_api_rule_add_keyvalue_test(struct kunit *test) { struct vcap_admin admin = { @@ -1027,6 +1036,7 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test) KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type); KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.value); KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.mask); + vcap_free_ckf(rule); INIT_LIST_HEAD(&rule->keyfields); ret = vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_1); @@ -1039,6 +1049,7 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test) KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type); KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.value); KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.mask); + vcap_free_ckf(rule); INIT_LIST_HEAD(&rule->keyfields); ret = vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, @@ -1052,6 +1063,7 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test) KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type); KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.value); KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.mask); + vcap_free_ckf(rule); INIT_LIST_HEAD(&rule->keyfields); ret = vcap_rule_add_key_u32(rule, VCAP_KF_TYPE, 0x98765432, 0xff00ffab); @@ -1064,6 +1076,7 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test) KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, kf->ctrl.type); KUNIT_EXPECT_EQ(test, 0x98765432, kf->data.u32.value); KUNIT_EXPECT_EQ(test, 0xff00ffab, kf->data.u32.mask); + vcap_free_ckf(rule); INIT_LIST_HEAD(&rule->keyfields); ret = vcap_rule_add_key_u128(rule, VCAP_KF_L3_IP6_SIP, &dip); @@ -1078,6 +1091,18 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test) KUNIT_EXPECT_EQ(test, dip.value[idx], kf->data.u128.value[idx]); for (idx = 0; idx < ARRAY_SIZE(dip.mask); ++idx) KUNIT_EXPECT_EQ(test, dip.mask[idx], kf->data.u128.mask[idx]); + vcap_free_ckf(rule); +} + +static void vcap_free_caf(struct vcap_rule *rule) +{ + struct vcap_client_actionfield *caf, *next_caf; + + list_for_each_entry_safe(caf, next_caf, + &rule->actionfields, ctrl.list) { + list_del(&caf->ctrl.list); + kfree(caf); + } } static void vcap_api_rule_add_actionvalue_test(struct kunit *test) @@ -1105,6 +1130,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test) KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action); KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type); KUNIT_EXPECT_EQ(test, 0x0, af->data.u1.value); + vcap_free_caf(rule); INIT_LIST_HEAD(&rule->actionfields); ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_1); @@ -1116,6 +1142,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test) KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action); KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type); KUNIT_EXPECT_EQ(test, 0x1, af->data.u1.value); + vcap_free_caf(rule); INIT_LIST_HEAD(&rule->actionfields); ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_ANY); @@ -1127,6 +1154,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test) KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action); KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type); KUNIT_EXPECT_EQ(test, 0x0, af->data.u1.value); + vcap_free_caf(rule); INIT_LIST_HEAD(&rule->actionfields); ret = vcap_rule_add_action_u32(rule, VCAP_AF_TYPE, 0x98765432); @@ -1138,6 +1166,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test) KUNIT_EXPECT_EQ(test, VCAP_AF_TYPE, af->ctrl.action); KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, af->ctrl.type); KUNIT_EXPECT_EQ(test, 0x98765432, af->data.u32.value); + vcap_free_caf(rule); INIT_LIST_HEAD(&rule->actionfields); ret = vcap_rule_add_action_u32(rule, VCAP_AF_MASK_MODE, 0xaabbccdd); @@ -1149,6 +1178,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test) KUNIT_EXPECT_EQ(test, VCAP_AF_MASK_MODE, af->ctrl.action); KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, af->ctrl.type); KUNIT_EXPECT_EQ(test, 0xaabbccdd, af->data.u32.value); + vcap_free_caf(rule); } static void vcap_api_rule_find_keyset_basic_test(struct kunit *test) @@ -1408,6 +1438,10 @@ static void vcap_api_encode_rule_test(struct kunit *test) ret = list_empty(&is2_admin.rules); KUNIT_EXPECT_EQ(test, false, ret); KUNIT_EXPECT_EQ(test, 0, ret); + + vcap_enable_lookups(&test_vctrl, &test_netdev, 0, 0, + rule->cookie, false); + vcap_free_rule(rule); /* Check that the rule has been freed: tricky to access since this @@ -1418,6 +1452,8 @@ static void vcap_api_encode_rule_test(struct kunit *test) KUNIT_EXPECT_EQ(test, true, ret); ret = list_empty(&rule->actionfields); KUNIT_EXPECT_EQ(test, true, ret); + + vcap_del_rule(&test_vctrl, &test_netdev, id); } static void vcap_api_set_rule_counter_test(struct kunit *test) @@ -1561,6 +1597,11 @@ static void vcap_api_rule_insert_in_order_test(struct kunit *test) test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 20, 400, 6, 774); test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 30, 300, 3, 771); test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 40, 200, 2, 768); + + vcap_del_rule(&test_vctrl, &test_netdev, 200); + vcap_del_rule(&test_vctrl, &test_netdev, 300); + vcap_del_rule(&test_vctrl, &test_netdev, 400); + vcap_del_rule(&test_vctrl, &test_netdev, 500); } static void vcap_api_rule_insert_reverse_order_test(struct kunit *test) @@ -1619,6 +1660,11 @@ static void vcap_api_rule_insert_reverse_order_test(struct kunit *test) ++idx; } KUNIT_EXPECT_EQ(test, 768, admin.last_used_addr); + + vcap_del_rule(&test_vctrl, &test_netdev, 500); + vcap_del_rule(&test_vctrl, &test_netdev, 400); + vcap_del_rule(&test_vctrl, &test_netdev, 300); + vcap_del_rule(&test_vctrl, &test_netdev, 200); } static void vcap_api_rule_remove_at_end_test(struct kunit *test) @@ -1819,6 +1865,9 @@ static void vcap_api_rule_remove_in_front_test(struct kunit *test) KUNIT_EXPECT_EQ(test, 786, test_init_start); KUNIT_EXPECT_EQ(test, 8, test_init_count); KUNIT_EXPECT_EQ(test, 794, admin.last_used_addr); + + vcap_del_rule(&test_vctrl, &test_netdev, 200); + vcap_del_rule(&test_vctrl, &test_netdev, 300); } static struct kunit_case vcap_api_rule_remove_test_cases[] = { diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h index 6aac98bcb9f4..aae4131f146a 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -187,6 +187,7 @@ typedef void (*ionic_desc_cb)(struct ionic_queue *q, struct ionic_desc_info *desc_info, struct ionic_cq_info *cq_info, void *cb_arg); +#define IONIC_MAX_BUF_LEN ((u16)-1) #define IONIC_PAGE_SIZE PAGE_SIZE #define IONIC_PAGE_SPLIT_SZ (PAGE_SIZE / 2) #define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 26798fc635db..44466e8c5d77 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -207,7 +207,8 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, return NULL; } - frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset); + frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN, + IONIC_PAGE_SIZE - buf_info->page_offset)); len -= frag_len; dma_sync_single_for_cpu(dev, @@ -452,7 +453,8 @@ void ionic_rx_fill(struct ionic_queue *q) /* fill main descriptor - buf[0] */ desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset); - frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset); + frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN, + IONIC_PAGE_SIZE - buf_info->page_offset)); desc->len = cpu_to_le16(frag_len); remain_len -= frag_len; buf_info++; @@ -471,7 +473,9 @@ void ionic_rx_fill(struct ionic_queue *q) } sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset); - frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE - buf_info->page_offset); + frag_len = min_t(u16, remain_len, min_t(u32, IONIC_MAX_BUF_LEN, + IONIC_PAGE_SIZE - + buf_info->page_offset)); sg_elem->len = cpu_to_le16(frag_len); remain_len -= frag_len; buf_info++; diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index 6083b1c8e4fb..ea9186178091 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -799,6 +799,7 @@ static int rswitch_poll(struct napi_struct *napi, int budget) struct net_device *ndev = napi->dev; struct rswitch_private *priv; struct rswitch_device *rdev; + unsigned long flags; int quota = budget; rdev = netdev_priv(ndev); @@ -816,10 +817,12 @@ retry: netif_wake_subqueue(ndev, 0); - napi_complete(napi); - - rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true); - rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true); + if (napi_complete_done(napi, budget - quota)) { + spin_lock_irqsave(&priv->lock, flags); + rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true); + rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true); + spin_unlock_irqrestore(&priv->lock, flags); + } out: return budget - quota; @@ -835,8 +838,10 @@ static void rswitch_queue_interrupt(struct net_device *ndev) struct rswitch_device *rdev = netdev_priv(ndev); if (napi_schedule_prep(&rdev->napi)) { + spin_lock(&rdev->priv->lock); rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); + spin_unlock(&rdev->priv->lock); __napi_schedule(&rdev->napi); } } @@ -1440,14 +1445,17 @@ static void rswitch_ether_port_deinit_all(struct rswitch_private *priv) static int rswitch_open(struct net_device *ndev) { struct rswitch_device *rdev = netdev_priv(ndev); + unsigned long flags; phy_start(ndev->phydev); napi_enable(&rdev->napi); netif_start_queue(ndev); + spin_lock_irqsave(&rdev->priv->lock, flags); rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true); rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true); + spin_unlock_irqrestore(&rdev->priv->lock, flags); if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE); @@ -1461,6 +1469,7 @@ static int rswitch_stop(struct net_device *ndev) { struct rswitch_device *rdev = netdev_priv(ndev); struct rswitch_gwca_ts_info *ts_info, *ts_info2; + unsigned long flags; netif_tx_stop_all_queues(ndev); bitmap_clear(rdev->priv->opened_ports, rdev->port, 1); @@ -1476,8 +1485,10 @@ static int rswitch_stop(struct net_device *ndev) kfree(ts_info); } + spin_lock_irqsave(&rdev->priv->lock, flags); rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); + spin_unlock_irqrestore(&rdev->priv->lock, flags); phy_stop(ndev->phydev); napi_disable(&rdev->napi); @@ -1887,6 +1898,7 @@ static int renesas_eth_sw_probe(struct platform_device *pdev) priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; + spin_lock_init(&priv->lock); attr = soc_device_match(rswitch_soc_no_speed_change); if (attr) diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h index 54f397effbc6..f0c16a37ea55 100644 --- a/drivers/net/ethernet/renesas/rswitch.h +++ b/drivers/net/ethernet/renesas/rswitch.h @@ -1011,6 +1011,8 @@ struct rswitch_private { struct rswitch_etha etha[RSWITCH_NUM_PORTS]; struct rswitch_mfwd mfwd; + spinlock_t lock; /* lock interrupt registers' control */ + bool etha_no_runtime_change; bool gwca_halt; }; diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c index 047322b04d4f..834f000ba1c4 100644 --- a/drivers/net/ethernet/sfc/tc.c +++ b/drivers/net/ethernet/sfc/tc.c @@ -136,6 +136,8 @@ static struct efx_tc_mac_pedit_action *efx_tc_flower_get_mac(struct efx_nic *efx if (old) { /* don't need our new entry */ kfree(ped); + if (IS_ERR(old)) /* oh dear, it's actually an error */ + return ERR_CAST(old); if (!refcount_inc_not_zero(&old->ref)) return ERR_PTR(-EAGAIN); /* existing entry found, ref taken */ @@ -602,6 +604,8 @@ static int efx_tc_flower_record_encap_match(struct efx_nic *efx, kfree(encap); if (pseudo) /* don't need our new pseudo either */ efx_tc_flower_release_encap_match(efx, pseudo); + if (IS_ERR(old)) /* oh dear, it's actually an error */ + return PTR_ERR(old); /* check old and new em_types are compatible */ switch (old->type) { case EFX_TC_EM_DIRECT: @@ -700,6 +704,8 @@ static struct efx_tc_recirc_id *efx_tc_get_recirc_id(struct efx_nic *efx, if (old) { /* don't need our new entry */ kfree(rid); + if (IS_ERR(old)) /* oh dear, it's actually an error */ + return ERR_CAST(old); if (!refcount_inc_not_zero(&old->ref)) return ERR_PTR(-EAGAIN); /* existing entry found */ @@ -1482,7 +1488,10 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht, &rule->linkage, efx_tc_match_action_ht_params); - if (old) { + if (IS_ERR(old)) { + rc = PTR_ERR(old); + goto release; + } else if (old) { netif_dbg(efx, drv, efx->net_dev, "Ignoring already-offloaded rule (cookie %lx)\n", tc->cookie); @@ -1697,7 +1706,10 @@ static int efx_tc_flower_replace_lhs(struct efx_nic *efx, old = rhashtable_lookup_get_insert_fast(&efx->tc->lhs_rule_ht, &rule->linkage, efx_tc_lhs_rule_ht_params); - if (old) { + if (IS_ERR(old)) { + rc = PTR_ERR(old); + goto release; + } else if (old) { netif_dbg(efx, drv, efx->net_dev, "Already offloaded rule (cookie %lx)\n", tc->cookie); rc = -EEXIST; @@ -1858,7 +1870,10 @@ static int efx_tc_flower_replace(struct efx_nic *efx, old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht, &rule->linkage, efx_tc_match_action_ht_params); - if (old) { + if (IS_ERR(old)) { + rc = PTR_ERR(old); + goto release; + } else if (old) { netif_dbg(efx, drv, efx->net_dev, "Already offloaded rule (cookie %lx)\n", tc->cookie); NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded"); diff --git a/drivers/net/ethernet/sfc/tc_conntrack.c b/drivers/net/ethernet/sfc/tc_conntrack.c index 8e06bfbcbea1..44bb57670340 100644 --- a/drivers/net/ethernet/sfc/tc_conntrack.c +++ b/drivers/net/ethernet/sfc/tc_conntrack.c @@ -298,7 +298,10 @@ static int efx_tc_ct_replace(struct efx_tc_ct_zone *ct_zone, old = rhashtable_lookup_get_insert_fast(&efx->tc->ct_ht, &conn->linkage, efx_tc_ct_ht_params); - if (old) { + if (IS_ERR(old)) { + rc = PTR_ERR(old); + goto release; + } else if (old) { netif_dbg(efx, drv, efx->net_dev, "Already offloaded conntrack (cookie %lx)\n", tc->cookie); rc = -EEXIST; @@ -482,6 +485,8 @@ struct efx_tc_ct_zone *efx_tc_ct_register_zone(struct efx_nic *efx, u16 zone, if (old) { /* don't need our new entry */ kfree(ct_zone); + if (IS_ERR(old)) /* oh dear, it's actually an error */ + return ERR_CAST(old); if (!refcount_inc_not_zero(&old->ref)) return ERR_PTR(-EAGAIN); /* existing entry found */ diff --git a/drivers/net/ethernet/sfc/tc_counters.c b/drivers/net/ethernet/sfc/tc_counters.c index 0fafb47ea082..c44088424323 100644 --- a/drivers/net/ethernet/sfc/tc_counters.c +++ b/drivers/net/ethernet/sfc/tc_counters.c @@ -236,6 +236,8 @@ struct efx_tc_counter_index *efx_tc_flower_get_counter_index( if (old) { /* don't need our new entry */ kfree(ctr); + if (IS_ERR(old)) /* oh dear, it's actually an error */ + return ERR_CAST(old); if (!refcount_inc_not_zero(&old->ref)) return ERR_PTR(-EAGAIN); /* existing entry found */ diff --git a/drivers/net/ethernet/sfc/tc_encap_actions.c b/drivers/net/ethernet/sfc/tc_encap_actions.c index 7e8bcdb222ad..87443f9dfd22 100644 --- a/drivers/net/ethernet/sfc/tc_encap_actions.c +++ b/drivers/net/ethernet/sfc/tc_encap_actions.c @@ -132,6 +132,8 @@ static int efx_bind_neigh(struct efx_nic *efx, /* don't need our new entry */ put_net_track(neigh->net, &neigh->ns_tracker); kfree(neigh); + if (IS_ERR(old)) /* oh dear, it's actually an error */ + return PTR_ERR(old); if (!refcount_inc_not_zero(&old->ref)) return -EAGAIN; /* existing entry found, ref taken */ @@ -640,6 +642,8 @@ struct efx_tc_encap_action *efx_tc_flower_create_encap_md( if (old) { /* don't need our new entry */ kfree(encap); + if (IS_ERR(old)) /* oh dear, it's actually an error */ + return ERR_CAST(old); if (!refcount_inc_not_zero(&old->ref)) return ERR_PTR(-EAGAIN); /* existing entry found, ref taken */ diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 403cb397d4d3..1e996c29043d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -70,7 +70,7 @@ struct stmmac_txq_stats { u64 tx_tso_frames; u64 tx_tso_nfrags; struct u64_stats_sync syncp; -}; +} ____cacheline_aligned_in_smp; struct stmmac_rxq_stats { u64 rx_bytes; @@ -79,7 +79,7 @@ struct stmmac_rxq_stats { u64 rx_normal_irq_n; u64 napi_poll; struct u64_stats_sync syncp; -}; +} ____cacheline_aligned_in_smp; /* Extra statistic and debug information exposed by ethtool */ struct stmmac_extra_stats { @@ -202,6 +202,9 @@ struct stmmac_extra_stats { unsigned long mtl_est_hlbf; unsigned long mtl_est_btre; unsigned long mtl_est_btrlm; + /* per queue statistics */ + struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES]; + struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES]; unsigned long rx_dropped; unsigned long rx_errors; unsigned long tx_dropped; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 01e77368eef1..465ff1fd4785 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -441,8 +441,8 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv, struct stmmac_extra_stats *x, u32 chan, u32 dir) { - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; + struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; int ret = 0; u32 v; @@ -455,9 +455,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv, if (v & EMAC_TX_INT) { ret |= handle_tx; - u64_stats_update_begin(&tx_q->txq_stats.syncp); - tx_q->txq_stats.tx_normal_irq_n++; - u64_stats_update_end(&tx_q->txq_stats.syncp); + u64_stats_update_begin(&txq_stats->syncp); + txq_stats->tx_normal_irq_n++; + u64_stats_update_end(&txq_stats->syncp); } if (v & EMAC_TX_DMA_STOP_INT) @@ -479,9 +479,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv, if (v & EMAC_RX_INT) { ret |= handle_rx; - u64_stats_update_begin(&rx_q->rxq_stats.syncp); - rx_q->rxq_stats.rx_normal_irq_n++; - u64_stats_update_end(&rx_q->rxq_stats.syncp); + u64_stats_update_begin(&rxq_stats->syncp); + rxq_stats->rx_normal_irq_n++; + u64_stats_update_end(&rxq_stats->syncp); } if (v & EMAC_RX_BUF_UA_INT) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c index 980e5f8a37ec..9470d3fd2ded 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c @@ -171,8 +171,8 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, chan)); u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan)); - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; + struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; int ret = 0; if (dir == DMA_DIR_RX) @@ -201,15 +201,15 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, } /* TX/RX NORMAL interrupts */ if (likely(intr_status & DMA_CHAN_STATUS_RI)) { - u64_stats_update_begin(&rx_q->rxq_stats.syncp); - rx_q->rxq_stats.rx_normal_irq_n++; - u64_stats_update_end(&rx_q->rxq_stats.syncp); + u64_stats_update_begin(&rxq_stats->syncp); + rxq_stats->rx_normal_irq_n++; + u64_stats_update_end(&rxq_stats->syncp); ret |= handle_rx; } if (likely(intr_status & DMA_CHAN_STATUS_TI)) { - u64_stats_update_begin(&tx_q->txq_stats.syncp); - tx_q->txq_stats.tx_normal_irq_n++; - u64_stats_update_end(&tx_q->txq_stats.syncp); + u64_stats_update_begin(&txq_stats->syncp); + txq_stats->tx_normal_irq_n++; + u64_stats_update_end(&txq_stats->syncp); ret |= handle_tx; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index aaa09b16b016..7907d62d3437 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -162,8 +162,8 @@ static void show_rx_process_state(unsigned int status) int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, struct stmmac_extra_stats *x, u32 chan, u32 dir) { - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; + struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; int ret = 0; /* read the status register (CSR5) */ u32 intr_status = readl(ioaddr + DMA_STATUS); @@ -215,16 +215,16 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, u32 value = readl(ioaddr + DMA_INTR_ENA); /* to schedule NAPI on real RIE event. */ if (likely(value & DMA_INTR_ENA_RIE)) { - u64_stats_update_begin(&rx_q->rxq_stats.syncp); - rx_q->rxq_stats.rx_normal_irq_n++; - u64_stats_update_end(&rx_q->rxq_stats.syncp); + u64_stats_update_begin(&rxq_stats->syncp); + rxq_stats->rx_normal_irq_n++; + u64_stats_update_end(&rxq_stats->syncp); ret |= handle_rx; } } if (likely(intr_status & DMA_STATUS_TI)) { - u64_stats_update_begin(&tx_q->txq_stats.syncp); - tx_q->txq_stats.tx_normal_irq_n++; - u64_stats_update_end(&tx_q->txq_stats.syncp); + u64_stats_update_begin(&txq_stats->syncp); + txq_stats->tx_normal_irq_n++; + u64_stats_update_end(&txq_stats->syncp); ret |= handle_tx; } if (unlikely(intr_status & DMA_STATUS_ERI)) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index fa69d64a8694..3cde695fec91 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -337,8 +337,8 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv, struct stmmac_extra_stats *x, u32 chan, u32 dir) { - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; + struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan)); u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); int ret = 0; @@ -367,15 +367,15 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv, /* TX/RX NORMAL interrupts */ if (likely(intr_status & XGMAC_NIS)) { if (likely(intr_status & XGMAC_RI)) { - u64_stats_update_begin(&rx_q->rxq_stats.syncp); - rx_q->rxq_stats.rx_normal_irq_n++; - u64_stats_update_end(&rx_q->rxq_stats.syncp); + u64_stats_update_begin(&rxq_stats->syncp); + rxq_stats->rx_normal_irq_n++; + u64_stats_update_end(&rxq_stats->syncp); ret |= handle_rx; } if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) { - u64_stats_update_begin(&tx_q->txq_stats.syncp); - tx_q->txq_stats.tx_normal_irq_n++; - u64_stats_update_end(&tx_q->txq_stats.syncp); + u64_stats_update_begin(&txq_stats->syncp); + txq_stats->tx_normal_irq_n++; + u64_stats_update_end(&txq_stats->syncp); ret |= handle_tx; } } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 3401e888a9f6..cd7a9768de5f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -78,7 +78,6 @@ struct stmmac_tx_queue { dma_addr_t dma_tx_phy; dma_addr_t tx_tail_addr; u32 mss; - struct stmmac_txq_stats txq_stats; }; struct stmmac_rx_buffer { @@ -123,7 +122,6 @@ struct stmmac_rx_queue { unsigned int len; unsigned int error; } state; - struct stmmac_rxq_stats rxq_stats; }; struct stmmac_channel { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index b7ac7abecdd3..6aa5c0556d22 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -548,14 +548,14 @@ static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data) pos = data; for (q = 0; q < tx_cnt; q++) { - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[q]; + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q]; struct stmmac_txq_stats snapshot; data = pos; do { - start = u64_stats_fetch_begin(&tx_q->txq_stats.syncp); - snapshot = tx_q->txq_stats; - } while (u64_stats_fetch_retry(&tx_q->txq_stats.syncp, start)); + start = u64_stats_fetch_begin(&txq_stats->syncp); + snapshot = *txq_stats; + } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); p = (char *)&snapshot + offsetof(struct stmmac_txq_stats, tx_pkt_n); for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) { @@ -566,14 +566,14 @@ static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data) pos = data; for (q = 0; q < rx_cnt; q++) { - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[q]; + struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q]; struct stmmac_rxq_stats snapshot; data = pos; do { - start = u64_stats_fetch_begin(&rx_q->rxq_stats.syncp); - snapshot = rx_q->rxq_stats; - } while (u64_stats_fetch_retry(&rx_q->rxq_stats.syncp, start)); + start = u64_stats_fetch_begin(&rxq_stats->syncp); + snapshot = *rxq_stats; + } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); p = (char *)&snapshot + offsetof(struct stmmac_rxq_stats, rx_pkt_n); for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) { @@ -637,14 +637,14 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, pos = j; for (i = 0; i < rx_queues_count; i++) { - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[i]; + struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[i]; struct stmmac_rxq_stats snapshot; j = pos; do { - start = u64_stats_fetch_begin(&rx_q->rxq_stats.syncp); - snapshot = rx_q->rxq_stats; - } while (u64_stats_fetch_retry(&rx_q->rxq_stats.syncp, start)); + start = u64_stats_fetch_begin(&rxq_stats->syncp); + snapshot = *rxq_stats; + } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); data[j++] += snapshot.rx_pkt_n; data[j++] += snapshot.rx_normal_irq_n; @@ -654,14 +654,14 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, pos = j; for (i = 0; i < tx_queues_count; i++) { - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[i]; + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[i]; struct stmmac_txq_stats snapshot; j = pos; do { - start = u64_stats_fetch_begin(&tx_q->txq_stats.syncp); - snapshot = tx_q->txq_stats; - } while (u64_stats_fetch_retry(&tx_q->txq_stats.syncp, start)); + start = u64_stats_fetch_begin(&txq_stats->syncp); + snapshot = *txq_stats; + } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); data[j++] += snapshot.tx_pkt_n; data[j++] += snapshot.tx_normal_irq_n; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 9a3182b9e767..83c567a89a46 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2426,6 +2426,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) { struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; struct xsk_buff_pool *pool = tx_q->xsk_pool; unsigned int entry = tx_q->cur_tx; struct dma_desc *tx_desc = NULL; @@ -2505,9 +2506,9 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); entry = tx_q->cur_tx; } - flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp); - tx_q->txq_stats.tx_set_ic_bit += tx_set_ic_bit; - u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags); + flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); + txq_stats->tx_set_ic_bit += tx_set_ic_bit; + u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); if (tx_desc) { stmmac_flush_tx_descriptors(priv, queue); @@ -2547,6 +2548,7 @@ static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan) static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; unsigned int bytes_compl = 0, pkts_compl = 0; unsigned int entry, xmits = 0, count = 0; u32 tx_packets = 0, tx_errors = 0; @@ -2704,15 +2706,13 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) /* We still have pending packets, let's call for a new scheduling */ if (tx_q->dirty_tx != tx_q->cur_tx) - hrtimer_start(&tx_q->txtimer, - STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), - HRTIMER_MODE_REL); + stmmac_tx_timer_arm(priv, queue); - flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp); - tx_q->txq_stats.tx_packets += tx_packets; - tx_q->txq_stats.tx_pkt_n += tx_packets; - tx_q->txq_stats.tx_clean++; - u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags); + flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); + txq_stats->tx_packets += tx_packets; + txq_stats->tx_pkt_n += tx_packets; + txq_stats->tx_clean++; + u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); priv->xstats.tx_errors += tx_errors; @@ -2995,9 +2995,13 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + u32 tx_coal_timer = priv->tx_coal_timer[queue]; + + if (!tx_coal_timer) + return; hrtimer_start(&tx_q->txtimer, - STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), + STMMAC_COAL_TIMER(tx_coal_timer), HRTIMER_MODE_REL); } @@ -4112,6 +4116,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) int nfrags = skb_shinfo(skb)->nr_frags; u32 queue = skb_get_queue_mapping(skb); unsigned int first_entry, tx_packets; + struct stmmac_txq_stats *txq_stats; int tmp_pay_len = 0, first_tx; struct stmmac_tx_queue *tx_q; bool has_vlan, set_ic; @@ -4122,6 +4127,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) int i; tx_q = &priv->dma_conf.tx_queue[queue]; + txq_stats = &priv->xstats.txq_stats[queue]; first_tx = tx_q->cur_tx; /* Compute header lengths */ @@ -4280,13 +4286,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); } - flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp); - tx_q->txq_stats.tx_bytes += skb->len; - tx_q->txq_stats.tx_tso_frames++; - tx_q->txq_stats.tx_tso_nfrags += nfrags; + flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); + txq_stats->tx_bytes += skb->len; + txq_stats->tx_tso_frames++; + txq_stats->tx_tso_nfrags += nfrags; if (set_ic) - tx_q->txq_stats.tx_set_ic_bit++; - u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags); + txq_stats->tx_set_ic_bit++; + u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); @@ -4357,6 +4363,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) u32 queue = skb_get_queue_mapping(skb); int nfrags = skb_shinfo(skb)->nr_frags; int gso = skb_shinfo(skb)->gso_type; + struct stmmac_txq_stats *txq_stats; struct dma_edesc *tbs_desc = NULL; struct dma_desc *desc, *first; struct stmmac_tx_queue *tx_q; @@ -4366,6 +4373,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) dma_addr_t des; tx_q = &priv->dma_conf.tx_queue[queue]; + txq_stats = &priv->xstats.txq_stats[queue]; first_tx = tx_q->cur_tx; if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) @@ -4517,11 +4525,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); } - flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp); - tx_q->txq_stats.tx_bytes += skb->len; + flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); + txq_stats->tx_bytes += skb->len; if (set_ic) - tx_q->txq_stats.tx_set_ic_bit++; - u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags); + txq_stats->tx_set_ic_bit++; + u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); @@ -4728,6 +4736,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, struct xdp_frame *xdpf, bool dma_map) { + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; unsigned int entry = tx_q->cur_tx; struct dma_desc *tx_desc; @@ -4787,9 +4796,9 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, unsigned long flags; tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, tx_desc); - flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp); - tx_q->txq_stats.tx_set_ic_bit++; - u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags); + flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); + txq_stats->tx_set_ic_bit++; + u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); } stmmac_enable_dma_transmission(priv, priv->ioaddr); @@ -4934,7 +4943,7 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, struct dma_desc *p, struct dma_desc *np, struct xdp_buff *xdp) { - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; struct stmmac_channel *ch = &priv->channel[queue]; unsigned int len = xdp->data_end - xdp->data; enum pkt_hash_types hash_type; @@ -4964,10 +4973,10 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, skb_record_rx_queue(skb, queue); napi_gro_receive(&ch->rxtx_napi, skb); - flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp); - rx_q->rxq_stats.rx_pkt_n++; - rx_q->rxq_stats.rx_bytes += len; - u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags); + flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); + rxq_stats->rx_pkt_n++; + rxq_stats->rx_bytes += len; + u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); } static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) @@ -5040,6 +5049,7 @@ static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp) static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) { + struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; unsigned int count = 0, error = 0, len = 0; int dirty = stmmac_rx_dirty(priv, queue); @@ -5203,9 +5213,9 @@ read_again: stmmac_finalize_xdp_rx(priv, xdp_status); - flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp); - rx_q->rxq_stats.rx_pkt_n += count; - u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags); + flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); + rxq_stats->rx_pkt_n += count; + u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); priv->xstats.rx_dropped += rx_dropped; priv->xstats.rx_errors += rx_errors; @@ -5233,6 +5243,7 @@ read_again: static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) { u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0; + struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; struct stmmac_channel *ch = &priv->channel[queue]; unsigned int count = 0, error = 0, len = 0; @@ -5494,11 +5505,11 @@ drain_data: stmmac_rx_refill(priv, queue); - flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp); - rx_q->rxq_stats.rx_packets += rx_packets; - rx_q->rxq_stats.rx_bytes += rx_bytes; - rx_q->rxq_stats.rx_pkt_n += count; - u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags); + flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); + rxq_stats->rx_packets += rx_packets; + rxq_stats->rx_bytes += rx_bytes; + rxq_stats->rx_pkt_n += count; + u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); priv->xstats.rx_dropped += rx_dropped; priv->xstats.rx_errors += rx_errors; @@ -5511,15 +5522,15 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) struct stmmac_channel *ch = container_of(napi, struct stmmac_channel, rx_napi); struct stmmac_priv *priv = ch->priv_data; - struct stmmac_rx_queue *rx_q; + struct stmmac_rxq_stats *rxq_stats; u32 chan = ch->index; unsigned long flags; int work_done; - rx_q = &priv->dma_conf.rx_queue[chan]; - flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp); - rx_q->rxq_stats.napi_poll++; - u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags); + rxq_stats = &priv->xstats.rxq_stats[chan]; + flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); + rxq_stats->napi_poll++; + u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); work_done = stmmac_rx(priv, budget, chan); if (work_done < budget && napi_complete_done(napi, work_done)) { @@ -5538,15 +5549,15 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) struct stmmac_channel *ch = container_of(napi, struct stmmac_channel, tx_napi); struct stmmac_priv *priv = ch->priv_data; - struct stmmac_tx_queue *tx_q; + struct stmmac_txq_stats *txq_stats; u32 chan = ch->index; unsigned long flags; int work_done; - tx_q = &priv->dma_conf.tx_queue[chan]; - flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp); - tx_q->txq_stats.napi_poll++; - u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags); + txq_stats = &priv->xstats.txq_stats[chan]; + flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); + txq_stats->napi_poll++; + u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); work_done = stmmac_tx_clean(priv, budget, chan); work_done = min(work_done, budget); @@ -5568,20 +5579,20 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) container_of(napi, struct stmmac_channel, rxtx_napi); struct stmmac_priv *priv = ch->priv_data; int rx_done, tx_done, rxtx_done; - struct stmmac_rx_queue *rx_q; - struct stmmac_tx_queue *tx_q; + struct stmmac_rxq_stats *rxq_stats; + struct stmmac_txq_stats *txq_stats; u32 chan = ch->index; unsigned long flags; - rx_q = &priv->dma_conf.rx_queue[chan]; - flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp); - rx_q->rxq_stats.napi_poll++; - u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags); + rxq_stats = &priv->xstats.rxq_stats[chan]; + flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); + rxq_stats->napi_poll++; + u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); - tx_q = &priv->dma_conf.tx_queue[chan]; - flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp); - tx_q->txq_stats.napi_poll++; - u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags); + txq_stats = &priv->xstats.txq_stats[chan]; + flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); + txq_stats->napi_poll++; + u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); tx_done = stmmac_tx_clean(priv, budget, chan); tx_done = min(tx_done, budget); @@ -6924,7 +6935,7 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 int q; for (q = 0; q < tx_cnt; q++) { - struct stmmac_txq_stats *txq_stats = &priv->dma_conf.tx_queue[q].txq_stats; + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q]; u64 tx_packets; u64 tx_bytes; @@ -6939,7 +6950,7 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 } for (q = 0; q < rx_cnt; q++) { - struct stmmac_rxq_stats *rxq_stats = &priv->dma_conf.rx_queue[q].rxq_stats; + struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q]; u64 rx_packets; u64 rx_bytes; @@ -7340,9 +7351,9 @@ int stmmac_dvr_probe(struct device *device, priv->dev = ndev; for (i = 0; i < MTL_MAX_RX_QUEUES; i++) - u64_stats_init(&priv->dma_conf.rx_queue[i].rxq_stats.syncp); + u64_stats_init(&priv->xstats.rxq_stats[i].syncp); for (i = 0; i < MTL_MAX_TX_QUEUES; i++) - u64_stats_init(&priv->dma_conf.tx_queue[i].txq_stats.syncp); + u64_stats_init(&priv->xstats.txq_stats[i].syncp); stmmac_set_ethtool_ops(ndev); priv->pause = pause; diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 88b5b1b47779..0a3346650e03 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -199,6 +199,7 @@ config TI_ICSSG_PRUETH config TI_ICSS_IEP tristate "TI PRU ICSS IEP driver" + depends on PTP_1588_CLOCK_OPTIONAL depends on TI_PRUSS default TI_PRUSS help diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index e8b94580194e..508d9a392ab1 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2115,7 +2115,12 @@ static const struct ethtool_ops team_ethtool_ops = { static void team_setup_by_port(struct net_device *dev, struct net_device *port_dev) { - dev->header_ops = port_dev->header_ops; + struct team *team = netdev_priv(dev); + + if (port_dev->type == ARPHRD_ETHER) + dev->header_ops = team->header_ops_cache; + else + dev->header_ops = port_dev->header_ops; dev->type = port_dev->type; dev->hard_header_len = port_dev->hard_header_len; dev->needed_headroom = port_dev->needed_headroom; @@ -2162,8 +2167,11 @@ static int team_dev_type_check_change(struct net_device *dev, static void team_setup(struct net_device *dev) { + struct team *team = netdev_priv(dev); + ether_setup(dev); dev->max_mtu = ETH_MAX_MTU; + team->header_ops_cache = dev->header_ops; dev->netdev_ops = &team_netdev_ops; dev->ethtool_ops = &team_ethtool_ops; diff --git a/drivers/net/thunderbolt/main.c b/drivers/net/thunderbolt/main.c index 0c1e8970ee58..0a53ec293d04 100644 --- a/drivers/net/thunderbolt/main.c +++ b/drivers/net/thunderbolt/main.c @@ -1049,12 +1049,11 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb, *tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 0, ip_hdr(skb)->protocol, 0); - } else if (skb_is_gso_v6(skb)) { + } else if (skb_is_gso(skb) && skb_is_gso_v6(skb)) { tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data); *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); - return false; } else if (protocol == htons(ETH_P_IPV6)) { tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset; *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 332c853ca99b..0c13d9950cd8 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -2636,6 +2636,9 @@ static int r8152_poll(struct napi_struct *napi, int budget) struct r8152 *tp = container_of(napi, struct r8152, napi); int work_done; + if (!budget) + return 0; + work_done = rx_bottom(tp, budget); if (work_done < budget) { diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 9c6f4f83f22b..0deefd1573cf 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -1446,6 +1446,8 @@ static int veth_open(struct net_device *dev) netif_carrier_on(peer); } + veth_set_xdp_features(dev); + return 0; } diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index e463f59e95c2..5b5597073b00 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -4331,6 +4331,10 @@ static size_t vxlan_get_size(const struct net_device *dev) nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LOCALBYPASS */ + nla_total_size(0) + /* IFLA_VXLAN_GBP */ + nla_total_size(0) + /* IFLA_VXLAN_GPE */ + nla_total_size(0) + /* IFLA_VXLAN_REMCSUM_NOPARTIAL */ + nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_VNIFILTER */ 0; } |
