summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c4
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c75
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c6
-rw-r--r--drivers/net/ethernet/intel/Kconfig10
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_cgu_regs.h116
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c854
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h374
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c2794
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h345
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c3
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h7
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c189
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h124
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c5
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile3
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c244
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mac.c342
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c113
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h66
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_regs.h135
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c536
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c312
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c121
-rw-r--r--drivers/net/phy/micrel.c67
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c8
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c1
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c6
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c2
83 files changed, 7048 insertions, 467 deletions
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 110d6c403bdd..4ffd303c64ea 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1745,7 +1745,7 @@ static void vsc9959_psfp_sfi_table_del(struct ocelot *ocelot, u32 index)
static void vsc9959_psfp_parse_gate(const struct flow_action_entry *entry,
struct felix_stream_gate *sgi)
{
- sgi->index = entry->gate.index;
+ sgi->index = entry->hw_index;
sgi->ipv_valid = (entry->gate.prio < 0) ? 0 : 1;
sgi->init_ipv = (sgi->ipv_valid) ? entry->gate.prio : 0;
sgi->basetime = entry->gate.basetime;
@@ -1947,7 +1947,7 @@ static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
kfree(sgi);
break;
case FLOW_ACTION_POLICE:
- index = a->police.index + VSC9959_PSFP_POLICER_BASE;
+ index = a->hw_index + VSC9959_PSFP_POLICER_BASE;
if (index > VSC9959_PSFP_POLICER_MAX) {
ret = -EINVAL;
goto err;
diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
index 72b9b39b0989..7dcdd784aea4 100644
--- a/drivers/net/dsa/sja1105/sja1105_flower.c
+++ b/drivers/net/dsa/sja1105/sja1105_flower.c
@@ -379,7 +379,7 @@ int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
vl_rule = true;
rc = sja1105_vl_gate(priv, port, extack, cookie,
- &key, act->gate.index,
+ &key, act->hw_index,
act->gate.prio,
act->gate.basetime,
act->gate.cycletime,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 533b8519ec35..466273b22f0a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -898,6 +898,8 @@
#define PCS_V2_WINDOW_SELECT 0x9064
#define PCS_V2_RV_WINDOW_DEF 0x1060
#define PCS_V2_RV_WINDOW_SELECT 0x1064
+#define PCS_V2_YC_WINDOW_DEF 0x18060
+#define PCS_V2_YC_WINDOW_SELECT 0x18064
/* PCS register entry bit positions and sizes */
#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
@@ -1030,8 +1032,8 @@
#define XP_PROP_0_PORT_ID_WIDTH 8
#define XP_PROP_0_PORT_MODE_INDEX 8
#define XP_PROP_0_PORT_MODE_WIDTH 4
-#define XP_PROP_0_PORT_SPEEDS_INDEX 23
-#define XP_PROP_0_PORT_SPEEDS_WIDTH 4
+#define XP_PROP_0_PORT_SPEEDS_INDEX 22
+#define XP_PROP_0_PORT_SPEEDS_WIDTH 5
#define XP_PROP_1_MAX_RX_DMA_INDEX 24
#define XP_PROP_1_MAX_RX_DMA_WIDTH 5
#define XP_PROP_1_MAX_RX_QUEUES_INDEX 8
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index 90cb55eb5466..efdcf484a510 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -278,6 +278,13 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
(rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
+ } else if (rdev && (rdev->vendor == PCI_VENDOR_ID_AMD) &&
+ (rdev->device == 0x14b5)) {
+ pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT;
+
+ /* Yellow Carp devices do not need cdr workaround */
+ pdata->vdata->an_cdr_workaround = 0;
} else {
pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
@@ -460,7 +467,7 @@ static int __maybe_unused xgbe_pci_resume(struct device *dev)
return ret;
}
-static const struct xgbe_version_data xgbe_v2a = {
+static struct xgbe_version_data xgbe_v2a = {
.init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
.xpcs_access = XGBE_XPCS_ACCESS_V2,
.mmc_64bit = 1,
@@ -475,7 +482,7 @@ static const struct xgbe_version_data xgbe_v2a = {
.an_cdr_workaround = 1,
};
-static const struct xgbe_version_data xgbe_v2b = {
+static struct xgbe_version_data xgbe_v2b = {
.init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
.xpcs_access = XGBE_XPCS_ACCESS_V2,
.mmc_64bit = 1,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 213769054391..2156600641b6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -124,10 +124,10 @@
#include "xgbe.h"
#include "xgbe-common.h"
-#define XGBE_PHY_PORT_SPEED_100 BIT(0)
-#define XGBE_PHY_PORT_SPEED_1000 BIT(1)
-#define XGBE_PHY_PORT_SPEED_2500 BIT(2)
-#define XGBE_PHY_PORT_SPEED_10000 BIT(3)
+#define XGBE_PHY_PORT_SPEED_100 BIT(1)
+#define XGBE_PHY_PORT_SPEED_1000 BIT(2)
+#define XGBE_PHY_PORT_SPEED_2500 BIT(3)
+#define XGBE_PHY_PORT_SPEED_10000 BIT(4)
#define XGBE_MUTEX_RELEASE 0x80000000
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index b07cb9bc5f2d..4a2622b05ee1 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -635,7 +635,6 @@ static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight)
struct bcm4908_enet_dma_ring_bd *buf_desc;
struct bcm4908_enet_dma_ring_slot *slot;
struct device *dev = enet->dev;
- unsigned int bytes = 0;
int handled = 0;
while (handled < weight && tx_ring->read_idx != tx_ring->write_idx) {
@@ -646,7 +645,6 @@ static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight)
dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
dev_kfree_skb(slot->skb);
- bytes += slot->len;
if (++tx_ring->read_idx == tx_ring->length)
tx_ring->read_idx = 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 2b06d78baa08..a19dd6797070 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1850,6 +1850,14 @@ struct bnx2x {
/* Vxlan/Geneve related information */
u16 udp_tunnel_ports[BNX2X_UDP_PORT_MAX];
+
+#define FW_CAP_INVALIDATE_VF_FP_HSI BIT(0)
+ u32 fw_cap;
+
+ u32 fw_major;
+ u32 fw_minor;
+ u32 fw_rev;
+ u32 fw_eng;
};
/* Tx queues may be less or equal to Rx queues */
@@ -2525,5 +2533,6 @@ void bnx2x_register_phc(struct bnx2x *bp);
* Meant for implicit re-load flows.
*/
int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp);
-
+int bnx2x_init_firmware(struct bnx2x *bp);
+void bnx2x_release_firmware(struct bnx2x *bp);
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 54a2334dee56..8d36ebbf08e1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2365,10 +2365,8 @@ int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
/* build my FW version dword */
- u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
- (BCM_5710_FW_MINOR_VERSION << 8) +
- (BCM_5710_FW_REVISION_VERSION << 16) +
- (BCM_5710_FW_ENGINEERING_VERSION << 24);
+ u32 my_fw = (bp->fw_major) + (bp->fw_minor << 8) +
+ (bp->fw_rev << 16) + (bp->fw_eng << 24);
/* read loaded FW from chip */
u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 3f8435208bf4..a84d015da5df 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -241,6 +241,8 @@
IRO[221].m2))
#define XSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[48].base + ((funcId) * IRO[48].m1))
+#define XSTORM_ETH_FUNCTION_INFO_FP_HSI_VALID_E2_OFFSET(fid) \
+ (IRO[386].base + ((fid) * IRO[386].m1))
#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
/* eth hsi version */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 622fadc50316..611efee75834 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -3024,7 +3024,8 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 13
-#define BCM_5710_FW_REVISION_VERSION 15
+#define BCM_5710_FW_REVISION_VERSION 21
+#define BCM_5710_FW_REVISION_VERSION_V15 15
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 651bc1d7a57a..4953f5e1e390 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -74,9 +74,19 @@
__stringify(BCM_5710_FW_MINOR_VERSION) "." \
__stringify(BCM_5710_FW_REVISION_VERSION) "." \
__stringify(BCM_5710_FW_ENGINEERING_VERSION)
+
+#define FW_FILE_VERSION_V15 \
+ __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
+ __stringify(BCM_5710_FW_MINOR_VERSION) "." \
+ __stringify(BCM_5710_FW_REVISION_VERSION_V15) "." \
+ __stringify(BCM_5710_FW_ENGINEERING_VERSION)
+
#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E1_V15 "bnx2x/bnx2x-e1-" FW_FILE_VERSION_V15 ".fw"
+#define FW_FILE_NAME_E1H_V15 "bnx2x/bnx2x-e1h-" FW_FILE_VERSION_V15 ".fw"
+#define FW_FILE_NAME_E2_V15 "bnx2x/bnx2x-e2-" FW_FILE_VERSION_V15 ".fw"
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
@@ -747,9 +757,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
CHIP_IS_E1(bp) ? "everest1" :
CHIP_IS_E1H(bp) ? "everest1h" :
CHIP_IS_E2(bp) ? "everest2" : "everest3",
- BCM_5710_FW_MAJOR_VERSION,
- BCM_5710_FW_MINOR_VERSION,
- BCM_5710_FW_REVISION_VERSION);
+ bp->fw_major, bp->fw_minor, bp->fw_rev);
return rc;
}
@@ -12308,6 +12316,15 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bnx2x_read_fwinfo(bp);
+ if (IS_PF(bp)) {
+ rc = bnx2x_init_firmware(bp);
+
+ if (rc) {
+ bnx2x_free_mem_bp(bp);
+ return rc;
+ }
+ }
+
func = BP_FUNC(bp);
/* need to reset chip if undi was active */
@@ -12320,6 +12337,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
rc = bnx2x_prev_unload(bp);
if (rc) {
+ bnx2x_release_firmware(bp);
bnx2x_free_mem_bp(bp);
return rc;
}
@@ -13317,16 +13335,11 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
/* Check FW version */
offset = be32_to_cpu(fw_hdr->fw_version.offset);
fw_ver = firmware->data + offset;
- if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
- (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
- (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
- (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
+ if (fw_ver[0] != bp->fw_major || fw_ver[1] != bp->fw_minor ||
+ fw_ver[2] != bp->fw_rev || fw_ver[3] != bp->fw_eng) {
BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
- fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
- BCM_5710_FW_MAJOR_VERSION,
- BCM_5710_FW_MINOR_VERSION,
- BCM_5710_FW_REVISION_VERSION,
- BCM_5710_FW_ENGINEERING_VERSION);
+ fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
+ bp->fw_major, bp->fw_minor, bp->fw_rev, bp->fw_eng);
return -EINVAL;
}
@@ -13404,34 +13417,51 @@ do { \
(u8 *)bp->arr, len); \
} while (0)
-static int bnx2x_init_firmware(struct bnx2x *bp)
+int bnx2x_init_firmware(struct bnx2x *bp)
{
- const char *fw_file_name;
+ const char *fw_file_name, *fw_file_name_v15;
struct bnx2x_fw_file_hdr *fw_hdr;
int rc;
if (bp->firmware)
return 0;
- if (CHIP_IS_E1(bp))
+ if (CHIP_IS_E1(bp)) {
fw_file_name = FW_FILE_NAME_E1;
- else if (CHIP_IS_E1H(bp))
+ fw_file_name_v15 = FW_FILE_NAME_E1_V15;
+ } else if (CHIP_IS_E1H(bp)) {
fw_file_name = FW_FILE_NAME_E1H;
- else if (!CHIP_IS_E1x(bp))
+ fw_file_name_v15 = FW_FILE_NAME_E1H_V15;
+ } else if (!CHIP_IS_E1x(bp)) {
fw_file_name = FW_FILE_NAME_E2;
- else {
+ fw_file_name_v15 = FW_FILE_NAME_E2_V15;
+ } else {
BNX2X_ERR("Unsupported chip revision\n");
return -EINVAL;
}
+
BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
if (rc) {
- BNX2X_ERR("Can't load firmware file %s\n",
- fw_file_name);
- goto request_firmware_exit;
+ BNX2X_DEV_INFO("Trying to load older fw %s\n", fw_file_name_v15);
+
+ /* try to load prev version */
+ rc = request_firmware(&bp->firmware, fw_file_name_v15, &bp->pdev->dev);
+
+ if (rc)
+ goto request_firmware_exit;
+
+ bp->fw_rev = BCM_5710_FW_REVISION_VERSION_V15;
+ } else {
+ bp->fw_cap |= FW_CAP_INVALIDATE_VF_FP_HSI;
+ bp->fw_rev = BCM_5710_FW_REVISION_VERSION;
}
+ bp->fw_major = BCM_5710_FW_MAJOR_VERSION;
+ bp->fw_minor = BCM_5710_FW_MINOR_VERSION;
+ bp->fw_eng = BCM_5710_FW_ENGINEERING_VERSION;
+
rc = bnx2x_check_firmware(bp);
if (rc) {
BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
@@ -13487,7 +13517,7 @@ request_firmware_exit:
return rc;
}
-static void bnx2x_release_firmware(struct bnx2x *bp)
+void bnx2x_release_firmware(struct bnx2x *bp)
{
kfree(bp->init_ops_offsets);
kfree(bp->init_ops);
@@ -14004,6 +14034,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
return 0;
init_one_freemem:
+ bnx2x_release_firmware(bp);
bnx2x_free_mem_bp(bp);
init_one_exit:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 74a8931ce1d1..11d15cd03600 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -758,9 +758,18 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
{
+ u16 abs_fid;
+
+ abs_fid = FW_VF_HANDLE(abs_vfid);
+
/* set the VF-PF association in the FW */
- storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
- storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
+ storm_memset_vf_to_pf(bp, abs_fid, BP_FUNC(bp));
+ storm_memset_func_en(bp, abs_fid, 1);
+
+ /* Invalidate fp_hsi version for vfs */
+ if (bp->fw_cap & FW_CAP_INVALIDATE_VF_FP_HSI)
+ REG_WR8(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ETH_FUNCTION_INFO_FP_HSI_VALID_E2_OFFSET(abs_fid), 0);
/* clear vf errors*/
bnx2x_vf_semi_clear_err(bp, abs_vfid);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 1471b6130a2b..d8afcf8d6b30 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -1962,7 +1962,7 @@ static int bnxt_tc_setup_indr_cb(struct net_device *netdev, struct Qdisc *sch, v
void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
{
- if (!bnxt_is_netdev_indr_offload(netdev))
+ if (!netdev || !bnxt_is_netdev_indr_offload(netdev))
return -EOPNOTSUPP;
switch (type) {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 0536d2c76fbc..3555c12edb45 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -1182,7 +1182,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
}
/* parsing gate action */
- if (entryg->gate.index >= priv->psfp_cap.max_psfp_gate) {
+ if (entryg->hw_index >= priv->psfp_cap.max_psfp_gate) {
NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
err = -ENOSPC;
goto free_filter;
@@ -1202,7 +1202,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
}
refcount_set(&sgi->refcount, 1);
- sgi->index = entryg->gate.index;
+ sgi->index = entryg->hw_index;
sgi->init_ipv = entryg->gate.prio;
sgi->basetime = entryg->gate.basetime;
sgi->cycletime = entryg->gate.cycletime;
@@ -1244,7 +1244,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
refcount_set(&fmi->refcount, 1);
fmi->cir = entryp->police.rate_bytes_ps;
fmi->cbs = entryp->police.burst;
- fmi->index = entryp->police.index;
+ fmi->index = entryp->hw_index;
filter->flags |= ENETC_PSFP_FLAGS_FMI;
filter->fmi_index = fmi->index;
sfi->meter_id = fmi->index;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 0b274d8fa45b..3facb55b7161 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -327,6 +327,16 @@ config ICE_SWITCHDEV
If unsure, say N.
+config ICE_HWTS
+ bool "Support HW cross-timestamp on platforms with PTM support"
+ default y
+ depends on ICE && X86
+ help
+ Say Y to enable hardware supported cross-timestamping on platforms
+ with PCIe PTM support. The cross-timestamp is available through
+ the PTP clock driver precise cross-timestamp ioctl
+ (PTP_SYS_OFFSET_PRECISE).
+
config FM10K
tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support"
default n
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index 21eff0895a7a..f6d56867f857 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -143,7 +143,7 @@ s32 fm10k_tlv_attr_put_mac_vlan(u32 *msg, u16 attr_id,
* @vlan: location of buffer to store VLAN
*
* This function pulls the MAC address back out of the attribute and will
- * place it in the array pointed by by mac_addr. It will return success
+ * place it in the array pointed by mac_addr. It will return success
* if provided with a valid pointers.
**/
s32 fm10k_tlv_attr_get_mac_vlan(u32 *attr, u8 *mac_addr, u16 *vlan)
diff --git a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
new file mode 100644
index 000000000000..57abd52386d0
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#ifndef _ICE_CGU_REGS_H_
+#define _ICE_CGU_REGS_H_
+
+#define NAC_CGU_DWORD9 0x24
+union nac_cgu_dword9 {
+ struct {
+ u32 time_ref_freq_sel : 3;
+ u32 clk_eref1_en : 1;
+ u32 clk_eref0_en : 1;
+ u32 time_ref_en : 1;
+ u32 time_sync_en : 1;
+ u32 one_pps_out_en : 1;
+ u32 clk_ref_synce_en : 1;
+ u32 clk_synce1_en : 1;
+ u32 clk_synce0_en : 1;
+ u32 net_clk_ref1_en : 1;
+ u32 net_clk_ref0_en : 1;
+ u32 clk_synce1_amp : 2;
+ u32 misc6 : 1;
+ u32 clk_synce0_amp : 2;
+ u32 one_pps_out_amp : 2;
+ u32 misc24 : 12;
+ } field;
+ u32 val;
+};
+
+#define NAC_CGU_DWORD19 0x4c
+union nac_cgu_dword19 {
+ struct {
+ u32 tspll_fbdiv_intgr : 8;
+ u32 fdpll_ulck_thr : 5;
+ u32 misc15 : 3;
+ u32 tspll_ndivratio : 4;
+ u32 tspll_iref_ndivratio : 3;
+ u32 misc19 : 1;
+ u32 japll_ndivratio : 4;
+ u32 japll_iref_ndivratio : 3;
+ u32 misc27 : 1;
+ } field;
+ u32 val;
+};
+
+#define NAC_CGU_DWORD22 0x58
+union nac_cgu_dword22 {
+ struct {
+ u32 fdpll_frac_div_out_nc : 2;
+ u32 fdpll_lock_int_for : 1;
+ u32 synce_hdov_int_for : 1;
+ u32 synce_lock_int_for : 1;
+ u32 fdpll_phlead_slip_nc : 1;
+ u32 fdpll_acc1_ovfl_nc : 1;
+ u32 fdpll_acc2_ovfl_nc : 1;
+ u32 synce_status_nc : 6;
+ u32 fdpll_acc1f_ovfl : 1;
+ u32 misc18 : 1;
+ u32 fdpllclk_div : 4;
+ u32 time1588clk_div : 4;
+ u32 synceclk_div : 4;
+ u32 synceclk_sel_div2 : 1;
+ u32 fdpllclk_sel_div2 : 1;
+ u32 time1588clk_sel_div2 : 1;
+ u32 misc3 : 1;
+ } field;
+ u32 val;
+};
+
+#define NAC_CGU_DWORD24 0x60
+union nac_cgu_dword24 {
+ struct {
+ u32 tspll_fbdiv_frac : 22;
+ u32 misc20 : 2;
+ u32 ts_pll_enable : 1;
+ u32 time_sync_tspll_align_sel : 1;
+ u32 ext_synce_sel : 1;
+ u32 ref1588_ck_div : 4;
+ u32 time_ref_sel : 1;
+ } field;
+ u32 val;
+};
+
+#define TSPLL_CNTR_BIST_SETTINGS 0x344
+union tspll_cntr_bist_settings {
+ struct {
+ u32 i_irefgen_settling_time_cntr_7_0 : 8;
+ u32 i_irefgen_settling_time_ro_standby_1_0 : 2;
+ u32 reserved195 : 5;
+ u32 i_plllock_sel_0 : 1;
+ u32 i_plllock_sel_1 : 1;
+ u32 i_plllock_cnt_6_0 : 7;
+ u32 i_plllock_cnt_10_7 : 4;
+ u32 reserved200 : 4;
+ } field;
+ u32 val;
+};
+
+#define TSPLL_RO_BWM_LF 0x370
+union tspll_ro_bwm_lf {
+ struct {
+ u32 bw_freqov_high_cri_7_0 : 8;
+ u32 bw_freqov_high_cri_9_8 : 2;
+ u32 biascaldone_cri : 1;
+ u32 plllock_gain_tran_cri : 1;
+ u32 plllock_true_lock_cri : 1;
+ u32 pllunlock_flag_cri : 1;
+ u32 afcerr_cri : 1;
+ u32 afcdone_cri : 1;
+ u32 feedfwrdgain_cal_cri_7_0 : 8;
+ u32 m2fbdivmod_cri_7_0 : 8;
+ } field;
+ u32 val;
+};
+
+#endif /* _ICE_CGU_REGS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 157add1268d9..2a1ee60e85f4 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -2189,6 +2189,18 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
+ if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
+ info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
+ } else {
+ /* Unknown clock frequency, so assume a (probably incorrect)
+ * default to avoid out-of-bounds look ups of frequency
+ * related information.
+ */
+ ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n",
+ info->clk_freq);
+ info->time_ref = ICE_TIME_REF_FREQ_25_000;
+ }
+
ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
func_p->common_cap.ieee_1588);
ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index a49082485642..d16738a3d3a7 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -100,6 +100,7 @@
#define PF_SB_ATQT 0x0022FE00
#define PF_SB_ATQT_ATQT_S 0
#define PF_SB_ATQT_ATQT_M ICE_M(0x3FF, 0)
+#define PF_SB_REM_DEV_CTL 0x002300F0
#define PRTDCB_GENC 0x00083000
#define PRTDCB_GENC_PFCLDA_S 16
#define PRTDCB_GENC_PFCLDA_M ICE_M(0xFFFF, 16)
@@ -440,6 +441,10 @@
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
#define PRTRPB_RDPC 0x000AC260
+#define GLHH_ART_CTL 0x000A41D4
+#define GLHH_ART_CTL_ACTIVE_M BIT(0)
+#define GLHH_ART_TIME_H 0x000A41D8
+#define GLHH_ART_TIME_L 0x000A41DC
#define GLTSYN_AUX_IN_0(_i) (0x000889D8 + ((_i) * 4))
#define GLTSYN_AUX_IN_0_INT_ENA_M BIT(4)
#define GLTSYN_AUX_OUT_0(_i) (0x00088998 + ((_i) * 4))
@@ -452,6 +457,8 @@
#define GLTSYN_ENA_TSYN_ENA_M BIT(0)
#define GLTSYN_EVNT_H_0(_i) (0x00088970 + ((_i) * 4))
#define GLTSYN_EVNT_L_0(_i) (0x00088968 + ((_i) * 4))
+#define GLTSYN_HHTIME_H(_i) (0x00088900 + ((_i) * 4))
+#define GLTSYN_HHTIME_L(_i) (0x000888F8 + ((_i) * 4))
#define GLTSYN_INCVAL_H(_i) (0x00088920 + ((_i) * 4))
#define GLTSYN_INCVAL_L(_i) (0x00088918 + ((_i) * 4))
#define GLTSYN_SHADJ_H(_i) (0x00088910 + ((_i) * 4))
@@ -468,6 +475,8 @@
#define GLTSYN_TGT_L_0(_i) (0x00088928 + ((_i) * 4))
#define GLTSYN_TIME_H(_i) (0x000888D8 + ((_i) * 4))
#define GLTSYN_TIME_L(_i) (0x000888D0 + ((_i) * 4))
+#define PFHH_SEM 0x000A4200 /* Reset Source: PFR */
+#define PFHH_SEM_BUSY_M BIT(0)
#define PFTSYN_SEM 0x00088880
#define PFTSYN_SEM_BUSY_M BIT(0)
#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4))
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 865f2231bb24..476533f63e09 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -539,7 +539,7 @@ skip:
ice_pf_dis_all_vsi(pf, false);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_release(pf);
+ ice_ptp_prepare_for_reset(pf);
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
@@ -1063,6 +1063,9 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (link_up == old_link && link_speed == old_link_speed)
return 0;
+ if (!ice_is_e810(&pf->hw))
+ ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
+
if (ice_is_dcb_active(pf)) {
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
ice_dcb_rebuild(pf);
@@ -4123,13 +4126,14 @@ static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
break;
case ICE_DDP_PKG_LOAD_ERROR:
dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
- /* poll for reset to complete */
- if (ice_check_reset(hw))
- dev_err(dev, "Error resetting device. Please reload the driver\n");
+ /* poll for reset to complete */
+ if (ice_check_reset(hw))
+ dev_err(dev, "Error resetting device. Please reload the driver\n");
break;
case ICE_DDP_PKG_ERR:
default:
dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n");
+ break;
}
}
@@ -5839,6 +5843,8 @@ static int ice_up_complete(struct ice_vsi *vsi)
ice_print_link_msg(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
+ if (!ice_is_e810(&pf->hw))
+ ice_ptp_link_change(pf, pf->hw.pf_id, true);
}
/* clear this now, and the first stats read will be used as baseline */
@@ -6239,6 +6245,8 @@ int ice_down(struct ice_vsi *vsi)
WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ if (!ice_is_e810(&vsi->back->hw))
+ ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
netif_carrier_off(vsi->netdev);
netif_tx_disable(vsi->netdev);
} else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
@@ -6685,7 +6693,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
* fail.
*/
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_init(pf);
+ ice_ptp_reset(pf);
/* rebuild PF VSI */
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
@@ -6694,6 +6702,10 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
+ /* configure PTP timestamping after VSI rebuild */
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+ ice_ptp_cfg_timestamp(pf, false);
+
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
if (err) {
dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 0014a1002ed3..d3f65e061a62 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -6,6 +6,8 @@
#define E810_OUT_PROP_DELAY_NS 1
+#define UNKNOWN_INCVAL_E822 0x100000000ULL
+
static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
/* name idx func chan */
{ "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } },
@@ -281,6 +283,8 @@ static void ice_set_tx_tstamp(struct ice_pf *pf, bool on)
else
val &= ~PFINT_OICR_TSYN_TX_M;
wr32(&pf->hw, PFINT_OICR_ENA, val);
+
+ pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
}
/**
@@ -303,6 +307,9 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
continue;
vsi->rx_rings[i]->ptp_rx = on;
}
+
+ pf->ptp.tstamp_config.rx_filter = on ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
}
/**
@@ -313,18 +320,10 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
* This function will configure timestamping during PTP initialization
* and deinitialization
*/
-static void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena)
+void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena)
{
ice_set_tx_tstamp(pf, ena);
ice_set_rx_tstamp(pf, ena);
-
- if (ena) {
- pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
- pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
- } else {
- pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
- pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
- }
}
/**
@@ -682,6 +681,406 @@ static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
}
/**
+ * ice_base_incval - Get base timer increment value
+ * @pf: Board private structure
+ *
+ * Look up the base timer increment value for this device. The base increment
+ * value is used to define the nominal clock tick rate. This increment value
+ * is programmed during device initialization. It is also used as the basis
+ * for calculating adjustments using scaled_ppm.
+ */
+static u64 ice_base_incval(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ u64 incval;
+
+ if (ice_is_e810(hw))
+ incval = ICE_PTP_NOMINAL_INCVAL_E810;
+ else if (ice_e822_time_ref(hw) < NUM_ICE_TIME_REF_FREQ)
+ incval = ice_e822_nominal_incval(ice_e822_time_ref(hw));
+ else
+ incval = UNKNOWN_INCVAL_E822;
+
+ dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
+ incval);
+
+ return incval;
+}
+
+/**
+ * ice_ptp_reset_ts_memory_quad - Reset timestamp memory for one quad
+ * @pf: The PF private data structure
+ * @quad: The quad (0-4)
+ */
+static void ice_ptp_reset_ts_memory_quad(struct ice_pf *pf, int quad)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M);
+ ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M);
+}
+
+/**
+ * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state
+ * @port: PTP port for which Tx FIFO is checked
+ */
+static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
+{
+ int quad = port->port_num / ICE_PORTS_PER_QUAD;
+ int offs = port->port_num % ICE_PORTS_PER_QUAD;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ u32 val, phy_sts;
+ int err;
+
+ pf = ptp_port_to_pf(port);
+ hw = &pf->hw;
+
+ if (port->tx_fifo_busy_cnt == FIFO_OK)
+ return 0;
+
+ /* need to read FIFO state */
+ if (offs == 0 || offs == 1)
+ err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO01_STATUS,
+ &val);
+ else
+ err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO23_STATUS,
+ &val);
+
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n",
+ port->port_num, err);
+ return err;
+ }
+
+ if (offs & 0x1)
+ phy_sts = (val & Q_REG_FIFO13_M) >> Q_REG_FIFO13_S;
+ else
+ phy_sts = (val & Q_REG_FIFO02_M) >> Q_REG_FIFO02_S;
+
+ if (phy_sts & FIFO_EMPTY) {
+ port->tx_fifo_busy_cnt = FIFO_OK;
+ return 0;
+ }
+
+ port->tx_fifo_busy_cnt++;
+
+ dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n",
+ port->tx_fifo_busy_cnt, port->port_num);
+
+ if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) {
+ dev_dbg(ice_pf_to_dev(pf),
+ "Port %d Tx FIFO still not empty; resetting quad %d\n",
+ port->port_num, quad);
+ ice_ptp_reset_ts_memory_quad(pf, quad);
+ port->tx_fifo_busy_cnt = FIFO_OK;
+ return 0;
+ }
+
+ return -EAGAIN;
+}
+
+/**
+ * ice_ptp_check_tx_offset_valid - Check if the Tx PHY offset is valid
+ * @port: the PTP port to check
+ *
+ * Checks whether the Tx offset for the PHY associated with this port is
+ * valid. Returns 0 if the offset is valid, and a non-zero error code if it is
+ * not.
+ */
+static int ice_ptp_check_tx_offset_valid(struct ice_ptp_port *port)
+{
+ struct ice_pf *pf = ptp_port_to_pf(port);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+ int err;
+
+ err = ice_ptp_check_tx_fifo(port);
+ if (err)
+ return err;
+
+ err = ice_read_phy_reg_e822(hw, port->port_num, P_REG_TX_OV_STATUS,
+ &val);
+ if (err) {
+ dev_err(dev, "Failed to read TX_OV_STATUS for port %d, err %d\n",
+ port->port_num, err);
+ return -EAGAIN;
+ }
+
+ if (!(val & P_REG_TX_OV_STATUS_OV_M))
+ return -EAGAIN;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_check_rx_offset_valid - Check if the Rx PHY offset is valid
+ * @port: the PTP port to check
+ *
+ * Checks whether the Rx offset for the PHY associated with this port is
+ * valid. Returns 0 if the offset is valid, and a non-zero error code if it is
+ * not.
+ */
+static int ice_ptp_check_rx_offset_valid(struct ice_ptp_port *port)
+{
+ struct ice_pf *pf = ptp_port_to_pf(port);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ int err;
+ u32 val;
+
+ err = ice_read_phy_reg_e822(hw, port->port_num, P_REG_RX_OV_STATUS,
+ &val);
+ if (err) {
+ dev_err(dev, "Failed to read RX_OV_STATUS for port %d, err %d\n",
+ port->port_num, err);
+ return err;
+ }
+
+ if (!(val & P_REG_RX_OV_STATUS_OV_M))
+ return -EAGAIN;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_check_offset_valid - Check port offset valid bit
+ * @port: Port for which offset valid bit is checked
+ *
+ * Returns 0 if both Tx and Rx offset are valid, and -EAGAIN if one of the
+ * offset is not ready.
+ */
+static int ice_ptp_check_offset_valid(struct ice_ptp_port *port)
+{
+ int tx_err, rx_err;
+
+ /* always check both Tx and Rx offset validity */
+ tx_err = ice_ptp_check_tx_offset_valid(port);
+ rx_err = ice_ptp_check_rx_offset_valid(port);
+
+ if (tx_err || rx_err)
+ return -EAGAIN;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_wait_for_offset_valid - Check for valid Tx and Rx offsets
+ * @work: Pointer to the kthread_work structure for this task
+ *
+ * Check whether both the Tx and Rx offsets are valid for enabling the vernier
+ * calibration.
+ *
+ * Once we have valid offsets from hardware, update the total Tx and Rx
+ * offsets, and exit bypass mode. This enables more precise timestamps using
+ * the extra data measured during the vernier calibration process.
+ */
+static void ice_ptp_wait_for_offset_valid(struct kthread_work *work)
+{
+ struct ice_ptp_port *port;
+ int err;
+ struct device *dev;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+
+ port = container_of(work, struct ice_ptp_port, ov_work.work);
+ pf = ptp_port_to_pf(port);
+ hw = &pf->hw;
+ dev = ice_pf_to_dev(pf);
+
+ if (ice_ptp_check_offset_valid(port)) {
+ /* Offsets not ready yet, try again later */
+ kthread_queue_delayed_work(pf->ptp.kworker,
+ &port->ov_work,
+ msecs_to_jiffies(100));
+ return;
+ }
+
+ /* Offsets are valid, so it is safe to exit bypass mode */
+ err = ice_phy_exit_bypass_e822(hw, port->port_num);
+ if (err) {
+ dev_warn(dev, "Failed to exit bypass mode for PHY port %u, err %d\n",
+ port->port_num, err);
+ return;
+ }
+}
+
+/**
+ * ice_ptp_port_phy_stop - Stop timestamping for a PHY port
+ * @ptp_port: PTP port to stop
+ */
+static int
+ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
+{
+ struct ice_pf *pf = ptp_port_to_pf(ptp_port);
+ u8 port = ptp_port->port_num;
+ struct ice_hw *hw = &pf->hw;
+ int err;
+
+ if (ice_is_e810(hw))
+ return 0;
+
+ mutex_lock(&ptp_port->ps_lock);
+
+ kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
+
+ err = ice_stop_phy_timer_e822(hw, port, true);
+ if (err)
+ dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
+ port, err);
+
+ mutex_unlock(&ptp_port->ps_lock);
+
+ return err;
+}
+
+/**
+ * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping
+ * @ptp_port: PTP port for which the PHY start is set
+ *
+ * Start the PHY timestamping block, and initiate Vernier timestamping
+ * calibration. If timestamping cannot be calibrated (such as if link is down)
+ * then disable the timestamping block instead.
+ */
+static int
+ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
+{
+ struct ice_pf *pf = ptp_port_to_pf(ptp_port);
+ u8 port = ptp_port->port_num;
+ struct ice_hw *hw = &pf->hw;
+ int err;
+
+ if (ice_is_e810(hw))
+ return 0;
+
+ if (!ptp_port->link_up)
+ return ice_ptp_port_phy_stop(ptp_port);
+
+ mutex_lock(&ptp_port->ps_lock);
+
+ kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
+
+ /* temporarily disable Tx timestamps while calibrating PHY offset */
+ ptp_port->tx.calibrating = true;
+ ptp_port->tx_fifo_busy_cnt = 0;
+
+ /* Start the PHY timer in bypass mode */
+ err = ice_start_phy_timer_e822(hw, port, true);
+ if (err)
+ goto out_unlock;
+
+ /* Enable Tx timestamps right away */
+ ptp_port->tx.calibrating = false;
+
+ kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0);
+
+out_unlock:
+ if (err)
+ dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
+ port, err);
+
+ mutex_unlock(&ptp_port->ps_lock);
+
+ return err;
+}
+
+/**
+ * ice_ptp_link_change - Set or clear port registers for timestamping
+ * @pf: Board private structure
+ * @port: Port for which the PHY start is set
+ * @linkup: Link is up or down
+ */
+int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+{
+ struct ice_ptp_port *ptp_port;
+
+ if (!test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+ return 0;
+
+ if (port >= ICE_NUM_EXTERNAL_PORTS)
+ return -EINVAL;
+
+ ptp_port = &pf->ptp.port;
+ if (ptp_port->port_num != port)
+ return -EINVAL;
+
+ /* Update cached link err for this port immediately */
+ ptp_port->link_up = linkup;
+
+ if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ /* PTP is not setup */
+ return -EAGAIN;
+
+ return ice_ptp_port_phy_restart(ptp_port);
+}
+
+/**
+ * ice_ptp_reset_ts_memory - Reset timestamp memory for all quads
+ * @pf: The PF private data structure
+ */
+static void ice_ptp_reset_ts_memory(struct ice_pf *pf)
+{
+ int quad;
+
+ quad = pf->hw.port_info->lport / ICE_PORTS_PER_QUAD;
+ ice_ptp_reset_ts_memory_quad(pf, quad);
+}
+
+/**
+ * ice_ptp_tx_ena_intr - Enable or disable the Tx timestamp interrupt
+ * @pf: PF private structure
+ * @ena: bool value to enable or disable interrupt
+ * @threshold: Minimum number of packets at which intr is triggered
+ *
+ * Utility function to enable or disable Tx timestamp interrupt and threshold
+ */
+static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold)
+{
+ struct ice_hw *hw = &pf->hw;
+ int err = 0;
+ int quad;
+ u32 val;
+
+ ice_ptp_reset_ts_memory(pf);
+
+ for (quad = 0; quad < ICE_MAX_QUAD; quad++) {
+ err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG,
+ &val);
+ if (err)
+ break;
+
+ if (ena) {
+ val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
+ val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M;
+ val |= ((threshold << Q_REG_TX_MEM_GBL_CFG_INTR_THR_S) &
+ Q_REG_TX_MEM_GBL_CFG_INTR_THR_M);
+ } else {
+ val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
+ }
+
+ err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG,
+ val);
+ if (err)
+ break;
+ }
+
+ if (err)
+ dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n",
+ err);
+ return err;
+}
+
+/**
+ * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block
+ * @pf: Board private structure
+ */
+static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
+{
+ ice_ptp_port_phy_restart(&pf->ptp.port);
+}
+
+/**
* ice_ptp_adjfine - Adjust clock increment rate
* @info: the driver's PTP info structure
* @scaled_ppm: Parts per million with 16-bit fractional field
@@ -698,7 +1097,7 @@ static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
int neg_adj = 0;
int err;
- incval = ICE_PTP_NOMINAL_INCVAL_E810;
+ incval = ice_base_incval(pf);
if (scaled_ppm < 0) {
neg_adj = 1;
@@ -905,7 +1304,10 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
start_time = div64_u64(current_time + NSEC_PER_SEC - 1,
NSEC_PER_SEC) * NSEC_PER_SEC + phase;
- start_time -= E810_OUT_PROP_DELAY_NS;
+ if (ice_is_e810(hw))
+ start_time -= E810_OUT_PROP_DELAY_NS;
+ else
+ start_time -= ice_e822_pps_delay(ice_e822_time_ref(hw));
/* 2. Write TARGET time */
wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
@@ -1088,6 +1490,12 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
struct ice_hw *hw = &pf->hw;
int err;
+ /* For Vernier mode, we need to recalibrate after new settime
+ * Start with disabling timestamp block
+ */
+ if (pf->ptp.port.link_up)
+ ice_ptp_port_phy_stop(&pf->ptp.port);
+
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
goto exit;
@@ -1104,6 +1512,10 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
/* Reenable periodic outputs */
ice_ptp_enable_all_clkout(pf);
+
+ /* Recalibrate and re-enable timestamp block */
+ if (pf->ptp.port.link_up)
+ ice_ptp_port_phy_restart(&pf->ptp.port);
exit:
if (err) {
dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
@@ -1177,6 +1589,101 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
return 0;
}
+#ifdef CONFIG_ICE_HWTS
+/**
+ * ice_ptp_get_syncdevicetime - Get the cross time stamp info
+ * @device: Current device time
+ * @system: System counter value read synchronously with device time
+ * @ctx: Context provided by timekeeping code
+ *
+ * Read device and system (ART) clock simultaneously and return the corrected
+ * clock values in ns.
+ */
+static int
+ice_ptp_get_syncdevicetime(ktime_t *device,
+ struct system_counterval_t *system,
+ void *ctx)
+{
+ struct ice_pf *pf = (struct ice_pf *)ctx;
+ struct ice_hw *hw = &pf->hw;
+ u32 hh_lock, hh_art_ctl;
+ int i;
+
+ /* Get the HW lock */
+ hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
+ if (hh_lock & PFHH_SEM_BUSY_M) {
+ dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
+ return -EFAULT;
+ }
+
+ /* Start the ART and device clock sync sequence */
+ hh_art_ctl = rd32(hw, GLHH_ART_CTL);
+ hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
+ wr32(hw, GLHH_ART_CTL, hh_art_ctl);
+
+#define MAX_HH_LOCK_TRIES 100
+
+ for (i = 0; i < MAX_HH_LOCK_TRIES; i++) {
+ /* Wait for sync to complete */
+ hh_art_ctl = rd32(hw, GLHH_ART_CTL);
+ if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
+ udelay(1);
+ continue;
+ } else {
+ u32 hh_ts_lo, hh_ts_hi, tmr_idx;
+ u64 hh_ts;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
+ /* Read ART time */
+ hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
+ hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
+ hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
+ *system = convert_art_ns_to_tsc(hh_ts);
+ /* Read Device source clock time */
+ hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
+ hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
+ hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
+ *device = ns_to_ktime(hh_ts);
+ break;
+ }
+ }
+ /* Release HW lock */
+ hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
+ hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
+ wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
+
+ if (i == MAX_HH_LOCK_TRIES)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_getcrosststamp_e822 - Capture a device cross timestamp
+ * @info: the driver's PTP info structure
+ * @cts: The memory to fill the cross timestamp info
+ *
+ * Capture a cross timestamp between the ART and the device PTP hardware
+ * clock. Fill the cross timestamp information and report it back to the
+ * caller.
+ *
+ * This is only valid for E822 devices which have support for generating the
+ * cross timestamp via PCIe PTM.
+ *
+ * In order to correctly correlate the ART timestamp back to the TSC time, the
+ * CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
+ */
+static int
+ice_ptp_getcrosststamp_e822(struct ptp_clock_info *info,
+ struct system_device_crosststamp *cts)
+{
+ struct ice_pf *pf = ptp_info_to_pf(info);
+
+ return get_device_system_crosststamp(ice_ptp_get_syncdevicetime,
+ pf, NULL, cts);
+}
+#endif /* CONFIG_ICE_HWTS */
+
/**
* ice_ptp_get_ts_config - ioctl interface to read the timestamping config
* @pf: Board private structure
@@ -1234,7 +1741,6 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
- config->rx_filter = HWTSTAMP_FILTER_ALL;
ice_set_rx_tstamp(pf, true);
break;
default:
@@ -1266,8 +1772,8 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
if (err)
return err;
- /* Save these settings for future reference */
- pf->ptp.tstamp_config = config;
+ /* Return the actual configuration set */
+ config = pf->ptp.tstamp_config;
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
@@ -1399,6 +1905,26 @@ static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info)
}
/**
+ * ice_ptp_set_funcs_e822 - Set specialized functions for E822 support
+ * @pf: Board private structure
+ * @info: PTP info to fill
+ *
+ * Assign functions to the PTP capabiltiies structure for E822 devices.
+ * Functions which operate across all device families should be set directly
+ * in ice_ptp_set_caps. Only add functions here which are distinct for E822
+ * devices.
+ */
+static void
+ice_ptp_set_funcs_e822(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+#ifdef CONFIG_ICE_HWTS
+ if (boot_cpu_has(X86_FEATURE_ART) &&
+ boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
+ info->getcrosststamp = ice_ptp_getcrosststamp_e822;
+#endif /* CONFIG_ICE_HWTS */
+}
+
+/**
* ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
* @pf: Board private structure
* @info: PTP info to fill
@@ -1437,7 +1963,10 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
info->gettimex64 = ice_ptp_gettimex64;
info->settime64 = ice_ptp_settime64;
- ice_ptp_set_funcs_e810(pf, info);
+ if (ice_is_e810(&pf->hw))
+ ice_ptp_set_funcs_e810(pf, info);
+ else
+ ice_ptp_set_funcs_e822(pf, info);
}
/**
@@ -1584,7 +2113,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
u8 idx;
/* Check if this tracker is initialized */
- if (!tx->init)
+ if (!tx->init || tx->calibrating)
return -1;
spin_lock(&tx->lock);
@@ -1707,6 +2236,27 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
}
/**
+ * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps
+ * @pf: Board private structure
+ * @tx: the Tx tracking structure to initialize
+ * @port: the port this structure tracks
+ *
+ * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
+ * the timestamp block is shared for all ports in the same quad. To avoid
+ * ports using the same timestamp index, logically break the block of
+ * registers into chunks based on the port number.
+ */
+static int
+ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
+{
+ tx->quad = port / ICE_PORTS_PER_QUAD;
+ tx->quad_offset = tx->quad * INDEX_PER_PORT;
+ tx->len = INDEX_PER_PORT;
+
+ return ice_ptp_alloc_tx_tracker(tx);
+}
+
+/**
* ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
@@ -1777,6 +2327,130 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
}
/**
+ * ice_ptp_reset - Initialize PTP hardware clock support after reset
+ * @pf: Board private structure
+ */
+void ice_ptp_reset(struct ice_pf *pf)
+{
+ struct ice_ptp *ptp = &pf->ptp;
+ struct ice_hw *hw = &pf->hw;
+ struct timespec64 ts;
+ int err, itr = 1;
+ u64 time_diff;
+
+ if (test_bit(ICE_PFR_REQ, pf->state))
+ goto pfr;
+
+ if (!hw->func_caps.ts_func_info.src_tmr_owned)
+ goto reset_ts;
+
+ err = ice_ptp_init_phc(hw);
+ if (err)
+ goto err;
+
+ /* Acquire the global hardware lock */
+ if (!ice_ptp_lock(hw)) {
+ err = -EBUSY;
+ goto err;
+ }
+
+ /* Write the increment time value to PHY and LAN */
+ err = ice_ptp_write_incval(hw, ice_base_incval(pf));
+ if (err) {
+ ice_ptp_unlock(hw);
+ goto err;
+ }
+
+ /* Write the initial Time value to PHY and LAN using the cached PHC
+ * time before the reset and time difference between stopping and
+ * starting the clock.
+ */
+ if (ptp->cached_phc_time) {
+ time_diff = ktime_get_real_ns() - ptp->reset_time;
+ ts = ns_to_timespec64(ptp->cached_phc_time + time_diff);
+ } else {
+ ts = ktime_to_timespec64(ktime_get_real());
+ }
+ err = ice_ptp_write_init(pf, &ts);
+ if (err) {
+ ice_ptp_unlock(hw);
+ goto err;
+ }
+
+ /* Release the global hardware lock */
+ ice_ptp_unlock(hw);
+
+ if (!ice_is_e810(hw)) {
+ /* Enable quad interrupts */
+ err = ice_ptp_tx_ena_intr(pf, true, itr);
+ if (err)
+ goto err;
+ }
+
+reset_ts:
+ /* Restart the PHY timestamping block */
+ ice_ptp_reset_phy_timestamping(pf);
+
+pfr:
+ /* Init Tx structures */
+ if (ice_is_e810(&pf->hw)) {
+ err = ice_ptp_init_tx_e810(pf, &ptp->port.tx);
+ } else {
+ kthread_init_delayed_work(&ptp->port.ov_work,
+ ice_ptp_wait_for_offset_valid);
+ err = ice_ptp_init_tx_e822(pf, &ptp->port.tx,
+ ptp->port.port_num);
+ }
+ if (err)
+ goto err;
+
+ set_bit(ICE_FLAG_PTP, pf->flags);
+
+ /* Start periodic work going */
+ kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
+
+ dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");
+ return;
+
+err:
+ dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
+}
+
+/**
+ * ice_ptp_prepare_for_reset - Prepare PTP for reset
+ * @pf: Board private structure
+ */
+void ice_ptp_prepare_for_reset(struct ice_pf *pf)
+{
+ struct ice_ptp *ptp = &pf->ptp;
+ u8 src_tmr;
+
+ clear_bit(ICE_FLAG_PTP, pf->flags);
+
+ /* Disable timestamping for both Tx and Rx */
+ ice_ptp_cfg_timestamp(pf, false);
+
+ kthread_cancel_delayed_work_sync(&ptp->work);
+ kthread_cancel_work_sync(&ptp->extts_work);
+
+ if (test_bit(ICE_PFR_REQ, pf->state))
+ return;
+
+ ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
+
+ /* Disable periodic outputs */
+ ice_ptp_disable_all_clkout(pf);
+
+ src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
+
+ /* Disable source clock */
+ wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
+
+ /* Acquire PHC and system timer to restore after reset */
+ ptp->reset_time = ktime_get_real_ns();
+}
+
+/**
* ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
* @pf: Board private structure
*
@@ -1786,27 +2460,16 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
*/
static int ice_ptp_init_owner(struct ice_pf *pf)
{
- struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
struct timespec64 ts;
- u8 src_idx;
- int err;
-
- wr32(hw, GLTSYN_SYNC_DLAY, 0);
-
- /* Clear some HW residue and enable source clock */
- src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ int err, itr = 1;
- /* Enable source clocks */
- wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
-
- /* Enable PHY time sync */
- err = ice_ptp_init_phy_e810(hw);
- if (err)
- goto err_exit;
-
- /* Clear event status indications for auxiliary pins */
- (void)rd32(hw, GLTSYN_STAT(src_idx));
+ err = ice_ptp_init_phc(hw);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n",
+ err);
+ return err;
+ }
/* Acquire the global hardware lock */
if (!ice_ptp_lock(hw)) {
@@ -1815,7 +2478,7 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
}
/* Write the increment time value to PHY and LAN */
- err = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
+ err = ice_ptp_write_incval(hw, ice_base_incval(pf));
if (err) {
ice_ptp_unlock(hw);
goto err_exit;
@@ -1832,6 +2495,13 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
/* Release the global hardware lock */
ice_ptp_unlock(hw);
+ if (!ice_is_e810(hw)) {
+ /* Enable quad interrupts */
+ err = ice_ptp_tx_ena_intr(pf, true, itr);
+ if (err)
+ goto err_exit;
+ }
+
/* Ensure we have a clock device */
err = ice_ptp_create_clock(pf);
if (err)
@@ -1845,72 +2515,106 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
err_clk:
pf->ptp.clock = NULL;
err_exit:
- dev_err(dev, "PTP failed to register clock, err %d\n", err);
-
return err;
}
/**
- * ice_ptp_init - Initialize the PTP support after device probe or reset
+ * ice_ptp_init_work - Initialize PTP work threads
+ * @pf: Board private structure
+ * @ptp: PF PTP structure
+ */
+static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
+{
+ struct kthread_worker *kworker;
+
+ /* Initialize work functions */
+ kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
+ kthread_init_work(&ptp->extts_work, ice_ptp_extts_work);
+
+ /* Allocate a kworker for handling work required for the ports
+ * connected to the PTP hardware clock.
+ */
+ kworker = kthread_create_worker(0, "ice-ptp-%s",
+ dev_name(ice_pf_to_dev(pf)));
+ if (IS_ERR(kworker))
+ return PTR_ERR(kworker);
+
+ ptp->kworker = kworker;
+
+ /* Start periodic work going */
+ kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_init_port - Initialize PTP port structure
+ * @pf: Board private structure
+ * @ptp_port: PTP port structure
+ */
+static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
+{
+ mutex_init(&ptp_port->ps_lock);
+
+ if (ice_is_e810(&pf->hw))
+ return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
+
+ kthread_init_delayed_work(&ptp_port->ov_work,
+ ice_ptp_wait_for_offset_valid);
+ return ice_ptp_init_tx_e822(pf, &ptp_port->tx, ptp_port->port_num);
+}
+
+/**
+ * ice_ptp_init - Initialize PTP hardware clock support
* @pf: Board private structure
*
- * This function sets device up for PTP support. The first time it is run, it
- * will create a clock device. It does not create a clock device if one
- * already exists. It also reconfigures the device after a reset.
+ * Set up the device for interacting with the PTP hardware clock for all
+ * functions, both the function that owns the clock hardware, and the
+ * functions connected to the clock hardware.
+ *
+ * The clock owner will allocate and register a ptp_clock with the
+ * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work
+ * items used for asynchronous work such as Tx timestamps and periodic work.
*/
void ice_ptp_init(struct ice_pf *pf)
{
- struct device *dev = ice_pf_to_dev(pf);
- struct kthread_worker *kworker;
+ struct ice_ptp *ptp = &pf->ptp;
struct ice_hw *hw = &pf->hw;
int err;
- /* PTP is currently only supported on E810 devices */
- if (!ice_is_e810(hw))
- return;
-
- /* Check if this PF owns the source timer */
+ /* If this function owns the clock hardware, it must allocate and
+ * configure the PTP clock device to represent it.
+ */
if (hw->func_caps.ts_func_info.src_tmr_owned) {
err = ice_ptp_init_owner(pf);
if (err)
- return;
+ goto err;
}
- /* Disable timestamping for both Tx and Rx */
- ice_ptp_cfg_timestamp(pf, false);
-
- /* Initialize the PTP port Tx timestamp tracker */
- ice_ptp_init_tx_e810(pf, &pf->ptp.port.tx);
-
- /* Initialize work functions */
- kthread_init_delayed_work(&pf->ptp.work, ice_ptp_periodic_work);
- kthread_init_work(&pf->ptp.extts_work, ice_ptp_extts_work);
+ ptp->port.port_num = hw->pf_id;
+ err = ice_ptp_init_port(pf, &ptp->port);
+ if (err)
+ goto err;
- /* Allocate a kworker for handling work required for the ports
- * connected to the PTP hardware clock.
- */
- kworker = kthread_create_worker(0, "ice-ptp-%s", dev_name(dev));
- if (IS_ERR(kworker)) {
- err = PTR_ERR(kworker);
- goto err_kworker;
- }
- pf->ptp.kworker = kworker;
+ /* Start the PHY timestamping block */
+ ice_ptp_reset_phy_timestamping(pf);
set_bit(ICE_FLAG_PTP, pf->flags);
+ err = ice_ptp_init_work(pf, ptp);
+ if (err)
+ goto err;
- /* Start periodic work going */
- kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 0);
-
- dev_info(dev, "PTP init successful\n");
+ dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
return;
-err_kworker:
+err:
/* If we registered a PTP clock, release it */
if (pf->ptp.clock) {
- ptp_clock_unregister(pf->ptp.clock);
+ ptp_clock_unregister(ptp->clock);
pf->ptp.clock = NULL;
}
- dev_err(dev, "PTP failed %d\n", err);
+ clear_bit(ICE_FLAG_PTP, pf->flags);
+ dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
}
/**
@@ -1934,6 +2638,8 @@ void ice_ptp_release(struct ice_pf *pf)
kthread_cancel_delayed_work_sync(&pf->ptp.work);
+ ice_ptp_port_phy_stop(&pf->ptp.port);
+ mutex_destroy(&pf->ptp.port.ps_lock);
if (pf->ptp.kworker) {
kthread_destroy_worker(pf->ptp.kworker);
pf->ptp.kworker = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 53c15fc9d996..afd048d69959 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -82,6 +82,8 @@ struct ice_tx_tstamp {
* @quad_offset: offset into timestamp block of the quad to get the real index
* @len: length of the tstamps and in_use fields.
* @init: if true, the tracker is initialized;
+ * @calibrating: if true, the PHY is calibrating the Tx offset. During this
+ * window, timestamps are temporarily disabled.
*/
struct ice_ptp_tx {
struct kthread_work work;
@@ -92,6 +94,7 @@ struct ice_ptp_tx {
u8 quad_offset;
u8 len;
u8 init;
+ u8 calibrating;
};
/* Quad and port information for initializing timestamp blocks */
@@ -101,15 +104,24 @@ struct ice_ptp_tx {
/**
* struct ice_ptp_port - data used to initialize an external port for PTP
*
- * This structure contains PTP data related to the external ports. Currently
- * it is used for tracking the Tx timestamps of a port. In the future this
- * structure will also hold information for the E822 port initialization
- * logic.
+ * This structure contains data indicating whether a single external port is
+ * ready for PTP functionality. It is used to track the port initialization
+ * and determine when the port's PHY offset is valid.
*
* @tx: Tx timestamp tracking for this port
+ * @ov_work: delayed work task for tracking when PHY offset is valid
+ * @ps_lock: mutex used to protect the overall PTP PHY start procedure
+ * @link_up: indicates whether the link is up
+ * @tx_fifo_busy_cnt: number of times the Tx FIFO was busy
+ * @port_num: the port number this structure represents
*/
struct ice_ptp_port {
struct ice_ptp_tx tx;
+ struct kthread_delayed_work ov_work;
+ struct mutex ps_lock; /* protects overall PTP PHY start procedure */
+ bool link_up;
+ u8 tx_fifo_busy_cnt;
+ u8 port_num;
};
#define GLTSYN_TGT_H_IDX_MAX 4
@@ -127,6 +139,7 @@ struct ice_ptp_port {
* @info: structure defining PTP hardware capabilities
* @clock: pointer to registered PTP clock device
* @tstamp_config: hardware timestamping configuration
+ * @reset_time: kernel time after clock stop on reset
*/
struct ice_ptp {
struct ice_ptp_port port;
@@ -140,6 +153,7 @@ struct ice_ptp {
struct ptp_clock_info info;
struct ptp_clock *clock;
struct hwtstamp_config tstamp_config;
+ u64 reset_time;
};
#define __ptp_port_to_ptp(p) \
@@ -152,9 +166,15 @@ struct ice_ptp {
#define ptp_info_to_pf(i) \
container_of(__ptp_info_to_ptp((i)), struct ice_pf, ptp)
+#define PFTSYN_SEM_BYTES 4
#define PTP_SHARED_CLK_IDX_VALID BIT(31)
+#define TS_CMD_MASK 0xF
+#define SYNC_EXEC_CMD 0x3
#define ICE_PTP_TS_VALID BIT(0)
+#define FIFO_EMPTY BIT(2)
+#define FIFO_OK 0xFF
+#define ICE_PTP_FIFO_NUM_CHECKS 5
/* Per-channel register definitions */
#define GLTSYN_AUX_OUT(_chan, _idx) (GLTSYN_AUX_OUT_0(_idx) + ((_chan) * 8))
#define GLTSYN_AUX_IN(_chan, _idx) (GLTSYN_AUX_IN_0(_idx) + ((_chan) * 8))
@@ -175,11 +195,13 @@ struct ice_ptp {
#define N_PER_OUT_E810T 3
#define N_PER_OUT_E810T_NO_SMA 2
#define N_EXT_TS_E810_NO_SMA 2
+#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4))
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
struct ice_pf;
int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr);
int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr);
+void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena);
int ice_get_ptp_clock_index(struct ice_pf *pf);
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
@@ -188,8 +210,11 @@ void ice_ptp_process_ts(struct ice_pf *pf);
void
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb);
+void ice_ptp_reset(struct ice_pf *pf);
+void ice_ptp_prepare_for_reset(struct ice_pf *pf);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
+int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup);
#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
{
@@ -201,6 +226,7 @@ static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
return -EOPNOTSUPP;
}
+static inline void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) { }
static inline int ice_get_ptp_clock_index(struct ice_pf *pf)
{
return -1;
@@ -216,7 +242,11 @@ static inline void ice_ptp_process_ts(struct ice_pf *pf) { }
static inline void
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { }
+static inline void ice_ptp_reset(struct ice_pf *pf) { }
+static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf) { }
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
+static inline int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+{ return 0; }
#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
#endif /* _ICE_PTP_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
new file mode 100644
index 000000000000..4109aa3b2fcd
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -0,0 +1,374 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#ifndef _ICE_PTP_CONSTS_H_
+#define _ICE_PTP_CONSTS_H_
+
+/* Constant definitions related to the hardware clock used for PTP 1588
+ * features and functionality.
+ */
+/* Constants defined for the PTP 1588 clock hardware. */
+
+/* struct ice_time_ref_info_e822
+ *
+ * E822 hardware can use different sources as the reference for the PTP
+ * hardware clock. Each clock has different characteristics such as a slightly
+ * different frequency, etc.
+ *
+ * This lookup table defines several constants that depend on the current time
+ * reference. See the struct ice_time_ref_info_e822 for information about the
+ * meaning of each constant.
+ */
+const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ] = {
+ /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
+ {
+ /* pll_freq */
+ 823437500, /* 823.4375 MHz PLL */
+ /* nominal_incval */
+ 0x136e44fabULL,
+ /* pps_delay */
+ 11,
+ },
+
+ /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
+ {
+ /* pll_freq */
+ 783360000, /* 783.36 MHz */
+ /* nominal_incval */
+ 0x146cc2177ULL,
+ /* pps_delay */
+ 12,
+ },
+
+ /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
+ {
+ /* pll_freq */
+ 796875000, /* 796.875 MHz */
+ /* nominal_incval */
+ 0x141414141ULL,
+ /* pps_delay */
+ 12,
+ },
+
+ /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
+ {
+ /* pll_freq */
+ 816000000, /* 816 MHz */
+ /* nominal_incval */
+ 0x139b9b9baULL,
+ /* pps_delay */
+ 12,
+ },
+
+ /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
+ {
+ /* pll_freq */
+ 830078125, /* 830.78125 MHz */
+ /* nominal_incval */
+ 0x134679aceULL,
+ /* pps_delay */
+ 11,
+ },
+
+ /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
+ {
+ /* pll_freq */
+ 783360000, /* 783.36 MHz */
+ /* nominal_incval */
+ 0x146cc2177ULL,
+ /* pps_delay */
+ 12,
+ },
+};
+
+const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
+ /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
+ {
+ /* refclk_pre_div */
+ 1,
+ /* feedback_div */
+ 197,
+ /* frac_n_div */
+ 2621440,
+ /* post_pll_div */
+ 6,
+ },
+
+ /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
+ {
+ /* refclk_pre_div */
+ 5,
+ /* feedback_div */
+ 223,
+ /* frac_n_div */
+ 524288,
+ /* post_pll_div */
+ 7,
+ },
+
+ /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
+ {
+ /* refclk_pre_div */
+ 5,
+ /* feedback_div */
+ 223,
+ /* frac_n_div */
+ 524288,
+ /* post_pll_div */
+ 7,
+ },
+
+ /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
+ {
+ /* refclk_pre_div */
+ 5,
+ /* feedback_div */
+ 159,
+ /* frac_n_div */
+ 1572864,
+ /* post_pll_div */
+ 6,
+ },
+
+ /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
+ {
+ /* refclk_pre_div */
+ 5,
+ /* feedback_div */
+ 159,
+ /* frac_n_div */
+ 1572864,
+ /* post_pll_div */
+ 6,
+ },
+
+ /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
+ {
+ /* refclk_pre_div */
+ 10,
+ /* feedback_div */
+ 223,
+ /* frac_n_div */
+ 524288,
+ /* post_pll_div */
+ 7,
+ },
+};
+
+/* struct ice_vernier_info_e822
+ *
+ * E822 hardware calibrates the delay of the timestamp indication from the
+ * actual packet transmission or reception during the initialization of the
+ * PHY. To do this, the hardware mechanism uses some conversions between the
+ * various clocks within the PHY block. This table defines constants used to
+ * calculate the correct conversion ratios in the PHY registers.
+ *
+ * Many of the values relate to the PAR/PCS clock conversion registers. For
+ * these registers, a value of 0 means that the associated register is not
+ * used by this link speed, and that the register should be cleared by writing
+ * 0. Other values specify the clock frequency in Hz.
+ */
+const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD] = {
+ /* ICE_PTP_LNK_SPD_1G */
+ {
+ /* tx_par_clk */
+ 31250000, /* 31.25 MHz */
+ /* rx_par_clk */
+ 31250000, /* 31.25 MHz */
+ /* tx_pcs_clk */
+ 125000000, /* 125 MHz */
+ /* rx_pcs_clk */
+ 125000000, /* 125 MHz */
+ /* tx_desk_rsgb_par */
+ 0, /* unused */
+ /* rx_desk_rsgb_par */
+ 0, /* unused */
+ /* tx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* rx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* tx_fixed_delay */
+ 25140,
+ /* pmd_adj_divisor */
+ 10000000,
+ /* rx_fixed_delay */
+ 17372,
+ },
+ /* ICE_PTP_LNK_SPD_10G */
+ {
+ /* tx_par_clk */
+ 257812500, /* 257.8125 MHz */
+ /* rx_par_clk */
+ 257812500, /* 257.8125 MHz */
+ /* tx_pcs_clk */
+ 156250000, /* 156.25 MHz */
+ /* rx_pcs_clk */
+ 156250000, /* 156.25 MHz */
+ /* tx_desk_rsgb_par */
+ 0, /* unused */
+ /* rx_desk_rsgb_par */
+ 0, /* unused */
+ /* tx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* rx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* tx_fixed_delay */
+ 6938,
+ /* pmd_adj_divisor */
+ 82500000,
+ /* rx_fixed_delay */
+ 6212,
+ },
+ /* ICE_PTP_LNK_SPD_25G */
+ {
+ /* tx_par_clk */
+ 644531250, /* 644.53125 MHZ */
+ /* rx_par_clk */
+ 644531250, /* 644.53125 MHz */
+ /* tx_pcs_clk */
+ 390625000, /* 390.625 MHz */
+ /* rx_pcs_clk */
+ 390625000, /* 390.625 MHz */
+ /* tx_desk_rsgb_par */
+ 0, /* unused */
+ /* rx_desk_rsgb_par */
+ 0, /* unused */
+ /* tx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* rx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* tx_fixed_delay */
+ 2778,
+ /* pmd_adj_divisor */
+ 206250000,
+ /* rx_fixed_delay */
+ 2491,
+ },
+ /* ICE_PTP_LNK_SPD_25G_RS */
+ {
+ /* tx_par_clk */
+ 0, /* unused */
+ /* rx_par_clk */
+ 0, /* unused */
+ /* tx_pcs_clk */
+ 0, /* unused */
+ /* rx_pcs_clk */
+ 0, /* unused */
+ /* tx_desk_rsgb_par */
+ 161132812, /* 162.1328125 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_par */
+ 161132812, /* 162.1328125 MHz Reed Solomon gearbox */
+ /* tx_desk_rsgb_pcs */
+ 97656250, /* 97.62625 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_pcs */
+ 97656250, /* 97.62625 MHz Reed Solomon gearbox */
+ /* tx_fixed_delay */
+ 3928,
+ /* pmd_adj_divisor */
+ 206250000,
+ /* rx_fixed_delay */
+ 29535,
+ },
+ /* ICE_PTP_LNK_SPD_40G */
+ {
+ /* tx_par_clk */
+ 257812500,
+ /* rx_par_clk */
+ 257812500,
+ /* tx_pcs_clk */
+ 156250000, /* 156.25 MHz */
+ /* rx_pcs_clk */
+ 156250000, /* 156.25 MHz */
+ /* tx_desk_rsgb_par */
+ 0, /* unused */
+ /* rx_desk_rsgb_par */
+ 156250000, /* 156.25 MHz deskew clock */
+ /* tx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* rx_desk_rsgb_pcs */
+ 156250000, /* 156.25 MHz deskew clock */
+ /* tx_fixed_delay */
+ 5666,
+ /* pmd_adj_divisor */
+ 82500000,
+ /* rx_fixed_delay */
+ 4244,
+ },
+ /* ICE_PTP_LNK_SPD_50G */
+ {
+ /* tx_par_clk */
+ 644531250, /* 644.53125 MHZ */
+ /* rx_par_clk */
+ 644531250, /* 644.53125 MHZ */
+ /* tx_pcs_clk */
+ 390625000, /* 390.625 MHz */
+ /* rx_pcs_clk */
+ 390625000, /* 390.625 MHz */
+ /* tx_desk_rsgb_par */
+ 0, /* unused */
+ /* rx_desk_rsgb_par */
+ 195312500, /* 193.3125 MHz deskew clock */
+ /* tx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* rx_desk_rsgb_pcs */
+ 195312500, /* 193.3125 MHz deskew clock */
+ /* tx_fixed_delay */
+ 2778,
+ /* pmd_adj_divisor */
+ 206250000,
+ /* rx_fixed_delay */
+ 2868,
+ },
+ /* ICE_PTP_LNK_SPD_50G_RS */
+ {
+ /* tx_par_clk */
+ 0, /* unused */
+ /* rx_par_clk */
+ 644531250, /* 644.53125 MHz */
+ /* tx_pcs_clk */
+ 0, /* unused */
+ /* rx_pcs_clk */
+ 644531250, /* 644.53125 MHz */
+ /* tx_desk_rsgb_par */
+ 322265625, /* 322.265625 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_par */
+ 322265625, /* 322.265625 MHz Reed Solomon gearbox */
+ /* tx_desk_rsgb_pcs */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_pcs */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* tx_fixed_delay */
+ 2095,
+ /* pmd_adj_divisor */
+ 206250000,
+ /* rx_fixed_delay */
+ 14524,
+ },
+ /* ICE_PTP_LNK_SPD_100G_RS */
+ {
+ /* tx_par_clk */
+ 0, /* unused */
+ /* rx_par_clk */
+ 644531250, /* 644.53125 MHz */
+ /* tx_pcs_clk */
+ 0, /* unused */
+ /* rx_pcs_clk */
+ 644531250, /* 644.53125 MHz */
+ /* tx_desk_rsgb_par */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_par */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* tx_desk_rsgb_pcs */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_pcs */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* tx_fixed_delay */
+ 1620,
+ /* pmd_adj_divisor */
+ 206250000,
+ /* rx_fixed_delay */
+ 7775,
+ },
+};
+
+#endif /* _ICE_PTP_CONSTS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 29f947c0cd2e..ec8450f034e6 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -3,6 +3,8 @@
#include "ice_common.h"
#include "ice_ptp_hw.h"
+#include "ice_ptp_consts.h"
+#include "ice_cgu_regs.h"
/* Low level functions for interacting with and managing the device clock used
* for the Precision Time Protocol.
@@ -29,6 +31,15 @@
*
* For E810 devices, the increment frequency is 812.5 MHz
*
+ * For E822 devices the clock can be derived from different sources, and the
+ * increment has an effective frequency of one of the following:
+ * - 823.4375 MHz
+ * - 783.36 MHz
+ * - 796.875 MHz
+ * - 816 MHz
+ * - 830.078125 MHz
+ * - 783.36 MHz
+ *
* The hardware captures timestamps in the PHY for incoming packets, and for
* outgoing packets on request. To support this, the PHY maintains a timer
* that matches the lower 64 bits of the global source timer.
@@ -37,6 +48,24 @@
* shadow registers are used to prepare the desired initial values. A special
* sync command is issued to trigger copying from the shadow registers into
* the appropriate source and PHY registers simultaneously.
+ *
+ * The driver supports devices which have different PHYs with subtly different
+ * mechanisms to program and control the timers. We divide the devices into
+ * families named after the first major device, E810 and similar devices, and
+ * E822 and similar devices.
+ *
+ * - E822 based devices have additional support for fine grained Vernier
+ * calibration which requires significant setup
+ * - The layout of timestamp data in the PHY register blocks is different
+ * - The way timer synchronization commands are issued is different.
+ *
+ * To support this, very low level functions have an e810 or e822 suffix
+ * indicating what type of device they work on. Higher level abstractions for
+ * tasks that can be done on both devices do not have the suffix and will
+ * correctly look up the appropriate low level function when running.
+ *
+ * Functions which only make sense on a single device family may not have
+ * a suitable generic implementation
*/
/**
@@ -51,6 +80,2447 @@ u8 ice_get_ptp_src_clock_index(struct ice_hw *hw)
return hw->func_caps.ts_func_info.tmr_index_assoc;
}
+/**
+ * ice_ptp_read_src_incval - Read source timer increment value
+ * @hw: pointer to HW struct
+ *
+ * Read the increment value of the source timer and return it.
+ */
+static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
+{
+ u32 lo, hi;
+ u8 tmr_idx;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+
+ return ((u64)(hi & INCVAL_HIGH_M) << 32) | lo;
+}
+
+/**
+ * ice_ptp_src_cmd - Prepare source timer for a timer command
+ * @hw: pointer to HW structure
+ * @cmd: Timer command
+ *
+ * Prepare the source timer for an upcoming timer sync command.
+ */
+static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 cmd_val;
+ u8 tmr_idx;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+ cmd_val = tmr_idx << SEL_CPK_SRC;
+
+ switch (cmd) {
+ case INIT_TIME:
+ cmd_val |= GLTSYN_CMD_INIT_TIME;
+ break;
+ case INIT_INCVAL:
+ cmd_val |= GLTSYN_CMD_INIT_INCVAL;
+ break;
+ case ADJ_TIME:
+ cmd_val |= GLTSYN_CMD_ADJ_TIME;
+ break;
+ case ADJ_TIME_AT_TIME:
+ cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
+ break;
+ case READ_TIME:
+ cmd_val |= GLTSYN_CMD_READ_TIME;
+ break;
+ }
+
+ wr32(hw, GLTSYN_CMD, cmd_val);
+}
+
+/**
+ * ice_ptp_exec_tmr_cmd - Execute all prepared timer commands
+ * @hw: pointer to HW struct
+ *
+ * Write the SYNC_EXEC_CMD bit to the GLTSYN_CMD_SYNC register, and flush the
+ * write immediately. This triggers the hardware to begin executing all of the
+ * source and PHY timer commands synchronously.
+ */
+static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
+{
+ wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
+ ice_flush(hw);
+}
+
+/* E822 family functions
+ *
+ * The following functions operate on the E822 family of devices.
+ */
+
+/**
+ * ice_fill_phy_msg_e822 - Fill message data for a PHY register access
+ * @msg: the PHY message buffer to fill in
+ * @port: the port to access
+ * @offset: the register offset
+ */
+static void
+ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
+{
+ int phy_port, phy, quadtype;
+
+ phy_port = port % ICE_PORTS_PER_PHY;
+ phy = port / ICE_PORTS_PER_PHY;
+ quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_NUM_QUAD_TYPE;
+
+ if (quadtype == 0) {
+ msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
+ msg->msg_addr_high = P_Q0_H(P_0_BASE + offset, phy_port);
+ } else {
+ msg->msg_addr_low = P_Q1_L(P_4_BASE + offset, phy_port);
+ msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
+ }
+
+ if (phy == 0)
+ msg->dest_dev = rmn_0;
+ else if (phy == 1)
+ msg->dest_dev = rmn_1;
+ else
+ msg->dest_dev = rmn_2;
+}
+
+/**
+ * ice_is_64b_phy_reg_e822 - Check if this is a 64bit PHY register
+ * @low_addr: the low address to check
+ * @high_addr: on return, contains the high address of the 64bit register
+ *
+ * Checks if the provided low address is one of the known 64bit PHY values
+ * represented as two 32bit registers. If it is, return the appropriate high
+ * register offset to use.
+ */
+static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr)
+{
+ switch (low_addr) {
+ case P_REG_PAR_PCS_TX_OFFSET_L:
+ *high_addr = P_REG_PAR_PCS_TX_OFFSET_U;
+ return true;
+ case P_REG_PAR_PCS_RX_OFFSET_L:
+ *high_addr = P_REG_PAR_PCS_RX_OFFSET_U;
+ return true;
+ case P_REG_PAR_TX_TIME_L:
+ *high_addr = P_REG_PAR_TX_TIME_U;
+ return true;
+ case P_REG_PAR_RX_TIME_L:
+ *high_addr = P_REG_PAR_RX_TIME_U;
+ return true;
+ case P_REG_TOTAL_TX_OFFSET_L:
+ *high_addr = P_REG_TOTAL_TX_OFFSET_U;
+ return true;
+ case P_REG_TOTAL_RX_OFFSET_L:
+ *high_addr = P_REG_TOTAL_RX_OFFSET_U;
+ return true;
+ case P_REG_UIX66_10G_40G_L:
+ *high_addr = P_REG_UIX66_10G_40G_U;
+ return true;
+ case P_REG_UIX66_25G_100G_L:
+ *high_addr = P_REG_UIX66_25G_100G_U;
+ return true;
+ case P_REG_TX_CAPTURE_L:
+ *high_addr = P_REG_TX_CAPTURE_U;
+ return true;
+ case P_REG_RX_CAPTURE_L:
+ *high_addr = P_REG_RX_CAPTURE_U;
+ return true;
+ case P_REG_TX_TIMER_INC_PRE_L:
+ *high_addr = P_REG_TX_TIMER_INC_PRE_U;
+ return true;
+ case P_REG_RX_TIMER_INC_PRE_L:
+ *high_addr = P_REG_RX_TIMER_INC_PRE_U;
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_is_40b_phy_reg_e822 - Check if this is a 40bit PHY register
+ * @low_addr: the low address to check
+ * @high_addr: on return, contains the high address of the 40bit value
+ *
+ * Checks if the provided low address is one of the known 40bit PHY values
+ * split into two registers with the lower 8 bits in the low register and the
+ * upper 32 bits in the high register. If it is, return the appropriate high
+ * register offset to use.
+ */
+static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
+{
+ switch (low_addr) {
+ case P_REG_TIMETUS_L:
+ *high_addr = P_REG_TIMETUS_U;
+ return true;
+ case P_REG_PAR_RX_TUS_L:
+ *high_addr = P_REG_PAR_RX_TUS_U;
+ return true;
+ case P_REG_PAR_TX_TUS_L:
+ *high_addr = P_REG_PAR_TX_TUS_U;
+ return true;
+ case P_REG_PCS_RX_TUS_L:
+ *high_addr = P_REG_PCS_RX_TUS_U;
+ return true;
+ case P_REG_PCS_TX_TUS_L:
+ *high_addr = P_REG_PCS_TX_TUS_U;
+ return true;
+ case P_REG_DESK_PAR_RX_TUS_L:
+ *high_addr = P_REG_DESK_PAR_RX_TUS_U;
+ return true;
+ case P_REG_DESK_PAR_TX_TUS_L:
+ *high_addr = P_REG_DESK_PAR_TX_TUS_U;
+ return true;
+ case P_REG_DESK_PCS_RX_TUS_L:
+ *high_addr = P_REG_DESK_PCS_RX_TUS_U;
+ return true;
+ case P_REG_DESK_PCS_TX_TUS_L:
+ *high_addr = P_REG_DESK_PCS_TX_TUS_U;
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_read_phy_reg_e822 - Read a PHY register
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @offset: PHY register offset to read
+ * @val: on return, the contents read from the PHY
+ *
+ * Read a PHY register for the given port over the device sideband queue.
+ */
+int
+ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
+{
+ struct ice_sbq_msg_input msg = {0};
+ int err;
+
+ ice_fill_phy_msg_e822(&msg, port, offset);
+ msg.opcode = ice_sbq_msg_rd;
+
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
+ }
+
+ *val = msg.data;
+
+ return 0;
+}
+
+/**
+ * ice_read_64b_phy_reg_e822 - Read a 64bit value from PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: on return, the contents of the 64bit value from the PHY registers
+ *
+ * Reads the two registers associated with a 64bit value and returns it in the
+ * val pointer. The offset always specifies the lower register offset to use.
+ * The high offset is looked up. This function only operates on registers
+ * known to be two parts of a 64bit value.
+ */
+static int
+ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
+{
+ u32 low, high;
+ u16 high_addr;
+ int err;
+
+ /* Only operate on registers known to be split into two 32bit
+ * registers.
+ */
+ if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
+ ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
+ low_addr);
+ return -EINVAL;
+ }
+
+ err = ice_read_phy_reg_e822(hw, port, low_addr, &low);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_read_phy_reg_e822(hw, port, high_addr, &high);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ *val = (u64)high << 32 | low;
+
+ return 0;
+}
+
+/**
+ * ice_write_phy_reg_e822 - Write a PHY register
+ * @hw: pointer to the HW struct
+ * @port: PHY port to write to
+ * @offset: PHY register offset to write
+ * @val: The value to write to the register
+ *
+ * Write a PHY register for the given port over the device sideband queue.
+ */
+int
+ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
+{
+ struct ice_sbq_msg_input msg = {0};
+ int err;
+
+ ice_fill_phy_msg_e822(&msg, port, offset);
+ msg.opcode = ice_sbq_msg_wr;
+ msg.data = val;
+
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY
+ * @hw: pointer to the HW struct
+ * @port: port to write to
+ * @low_addr: offset of the low register
+ * @val: 40b value to write
+ *
+ * Write the provided 40b value to the two associated registers by splitting
+ * it up into two chunks, the lower 8 bits and the upper 32 bits.
+ */
+static int
+ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
+{
+ u32 low, high;
+ u16 high_addr;
+ int err;
+
+ /* Only operate on registers known to be split into a lower 8 bit
+ * register and an upper 32 bit register.
+ */
+ if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) {
+ ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n",
+ low_addr);
+ return -EINVAL;
+ }
+
+ low = (u32)(val & P_REG_40B_LOW_M);
+ high = (u32)(val >> P_REG_40B_HIGH_S);
+
+ err = ice_write_phy_reg_e822(hw, port, low_addr, low);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_write_phy_reg_e822(hw, port, high_addr, high);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: the contents of the 64bit value to write to PHY
+ *
+ * Write the 64bit value to the two associated 32bit PHY registers. The offset
+ * is always specified as the lower register, and the high address is looked
+ * up. This function only operates on registers known to be two parts of
+ * a 64bit value.
+ */
+static int
+ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
+{
+ u32 low, high;
+ u16 high_addr;
+ int err;
+
+ /* Only operate on registers known to be split into two 32bit
+ * registers.
+ */
+ if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
+ ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
+ low_addr);
+ return -EINVAL;
+ }
+
+ low = lower_32_bits(val);
+ high = upper_32_bits(val);
+
+ err = ice_write_phy_reg_e822(hw, port, low_addr, low);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_write_phy_reg_e822(hw, port, high_addr, high);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_fill_quad_msg_e822 - Fill message data for quad register access
+ * @msg: the PHY message buffer to fill in
+ * @quad: the quad to access
+ * @offset: the register offset
+ *
+ * Fill a message buffer for accessing a register in a quad shared between
+ * multiple PHYs.
+ */
+static void
+ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
+{
+ u32 addr;
+
+ msg->dest_dev = rmn_0;
+
+ if ((quad % ICE_NUM_QUAD_TYPE) == 0)
+ addr = Q_0_BASE + offset;
+ else
+ addr = Q_1_BASE + offset;
+
+ msg->msg_addr_low = lower_16_bits(addr);
+ msg->msg_addr_high = upper_16_bits(addr);
+}
+
+/**
+ * ice_read_quad_reg_e822 - Read a PHY quad register
+ * @hw: pointer to the HW struct
+ * @quad: quad to read from
+ * @offset: quad register offset to read
+ * @val: on return, the contents read from the quad
+ *
+ * Read a quad register over the device sideband queue. Quad registers are
+ * shared between multiple PHYs.
+ */
+int
+ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
+{
+ struct ice_sbq_msg_input msg = {0};
+ int err;
+
+ if (quad >= ICE_MAX_QUAD)
+ return -EINVAL;
+
+ ice_fill_quad_msg_e822(&msg, quad, offset);
+ msg.opcode = ice_sbq_msg_rd;
+
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
+ }
+
+ *val = msg.data;
+
+ return 0;
+}
+
+/**
+ * ice_write_quad_reg_e822 - Write a PHY quad register
+ * @hw: pointer to the HW struct
+ * @quad: quad to write to
+ * @offset: quad register offset to write
+ * @val: The value to write to the register
+ *
+ * Write a quad register over the device sideband queue. Quad registers are
+ * shared between multiple PHYs.
+ */
+int
+ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
+{
+ struct ice_sbq_msg_input msg = {0};
+ int err;
+
+ if (quad >= ICE_MAX_QUAD)
+ return -EINVAL;
+
+ ice_fill_quad_msg_e822(&msg, quad, offset);
+ msg.opcode = ice_sbq_msg_wr;
+ msg.data = val;
+
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad block
+ * @hw: pointer to the HW struct
+ * @quad: the quad to read from
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the two associated registers in the
+ * quad memory block that is shared between the internal PHYs of the E822
+ * family of devices.
+ */
+static int
+ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
+{
+ u16 lo_addr, hi_addr;
+ u32 lo, hi;
+ int err;
+
+ lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
+ hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
+
+ err = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ err = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ /* For E822 based internal PHYs, the timestamp is reported with the
+ * lower 8 bits in the low register, and the upper 32 bits in the high
+ * register.
+ */
+ *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
+
+ return 0;
+}
+
+/**
+ * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block
+ * @hw: pointer to the HW struct
+ * @quad: the quad to read from
+ * @idx: the timestamp index to reset
+ *
+ * Clear a timestamp, resetting its valid bit, from the PHY quad block that is
+ * shared between the internal PHYs on the E822 devices.
+ */
+static int
+ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
+{
+ u16 lo_addr, hi_addr;
+ int err;
+
+ lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
+ hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
+
+ err = ice_write_quad_reg_e822(hw, quad, lo_addr, 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ err = ice_write_quad_reg_e822(hw, quad, hi_addr, 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_read_cgu_reg_e822 - Read a CGU register
+ * @hw: pointer to the HW struct
+ * @addr: Register address to read
+ * @val: storage for register value read
+ *
+ * Read the contents of a register of the Clock Generation Unit. Only
+ * applicable to E822 devices.
+ */
+static int
+ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
+{
+ struct ice_sbq_msg_input cgu_msg;
+ int err;
+
+ cgu_msg.opcode = ice_sbq_msg_rd;
+ cgu_msg.dest_dev = cgu;
+ cgu_msg.msg_addr_low = addr;
+ cgu_msg.msg_addr_high = 0x0;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
+ addr, err);
+ return err;
+ }
+
+ *val = cgu_msg.data;
+
+ return err;
+}
+
+/**
+ * ice_write_cgu_reg_e822 - Write a CGU register
+ * @hw: pointer to the HW struct
+ * @addr: Register address to write
+ * @val: value to write into the register
+ *
+ * Write the specified value to a register of the Clock Generation Unit. Only
+ * applicable to E822 devices.
+ */
+static int
+ice_write_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 val)
+{
+ struct ice_sbq_msg_input cgu_msg;
+ int err;
+
+ cgu_msg.opcode = ice_sbq_msg_wr;
+ cgu_msg.dest_dev = cgu;
+ cgu_msg.msg_addr_low = addr;
+ cgu_msg.msg_addr_high = 0x0;
+ cgu_msg.data = val;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
+ addr, err);
+ return err;
+ }
+
+ return err;
+}
+
+/**
+ * ice_clk_freq_str - Convert time_ref_freq to string
+ * @clk_freq: Clock frequency
+ *
+ * Convert the specified TIME_REF clock frequency to a string.
+ */
+static const char *ice_clk_freq_str(u8 clk_freq)
+{
+ switch ((enum ice_time_ref_freq)clk_freq) {
+ case ICE_TIME_REF_FREQ_25_000:
+ return "25 MHz";
+ case ICE_TIME_REF_FREQ_122_880:
+ return "122.88 MHz";
+ case ICE_TIME_REF_FREQ_125_000:
+ return "125 MHz";
+ case ICE_TIME_REF_FREQ_153_600:
+ return "153.6 MHz";
+ case ICE_TIME_REF_FREQ_156_250:
+ return "156.25 MHz";
+ case ICE_TIME_REF_FREQ_245_760:
+ return "245.76 MHz";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_clk_src_str - Convert time_ref_src to string
+ * @clk_src: Clock source
+ *
+ * Convert the specified clock source to its string name.
+ */
+static const char *ice_clk_src_str(u8 clk_src)
+{
+ switch ((enum ice_clk_src)clk_src) {
+ case ICE_CLK_SRC_TCX0:
+ return "TCX0";
+ case ICE_CLK_SRC_TIME_REF:
+ return "TIME_REF";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit
+ * @hw: pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCX0)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the PLL which drives the PTP hardware clock.
+ */
+static int
+ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ union tspll_ro_bwm_lf bwm_lf;
+ union nac_cgu_dword19 dw19;
+ union nac_cgu_dword22 dw22;
+ union nac_cgu_dword24 dw24;
+ union nac_cgu_dword9 dw9;
+ int err;
+
+ if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
+ clk_freq);
+ return -EINVAL;
+ }
+
+ if (clk_src >= NUM_ICE_CLK_SRC) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
+ clk_src);
+ return -EINVAL;
+ }
+
+ if (clk_src == ICE_CLK_SRC_TCX0 &&
+ clk_freq != ICE_TIME_REF_FREQ_25_000) {
+ dev_warn(ice_hw_to_dev(hw),
+ "TCX0 only supports 25 MHz frequency\n");
+ return -EINVAL;
+ }
+
+ err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ if (err)
+ return err;
+
+ /* Log the current clock configuration */
+ ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+ dw24.field.ts_pll_enable ? "enabled" : "disabled",
+ ice_clk_src_str(dw24.field.time_ref_sel),
+ ice_clk_freq_str(dw9.field.time_ref_freq_sel),
+ bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
+
+ /* Disable the PLL before changing the clock source or frequency */
+ if (dw24.field.ts_pll_enable) {
+ dw24.field.ts_pll_enable = 0;
+
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+ }
+
+ /* Set the frequency */
+ dw9.field.time_ref_freq_sel = clk_freq;
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL feedback divisor */
+ err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19, &dw19.val);
+ if (err)
+ return err;
+
+ dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
+ dw19.field.tspll_ndivratio = 1;
+
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19, dw19.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL post divisor */
+ err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22, &dw22.val);
+ if (err)
+ return err;
+
+ dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
+ dw22.field.time1588clk_sel_div2 = 0;
+
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22, dw22.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL pre divisor and clock source */
+ err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
+ if (err)
+ return err;
+
+ dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
+ dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
+ dw24.field.time_ref_sel = clk_src;
+
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+
+ /* Finally, enable the PLL */
+ dw24.field.ts_pll_enable = 1;
+
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+
+ /* Wait to verify if the PLL locks */
+ usleep_range(1000, 5000);
+
+ err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ if (err)
+ return err;
+
+ if (!bwm_lf.field.plllock_true_lock_cri) {
+ dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
+ return -EBUSY;
+ }
+
+ /* Log the current clock configuration */
+ ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+ dw24.field.ts_pll_enable ? "enabled" : "disabled",
+ ice_clk_src_str(dw24.field.time_ref_sel),
+ ice_clk_freq_str(dw9.field.time_ref_freq_sel),
+ bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
+
+ return 0;
+}
+
+/**
+ * ice_init_cgu_e822 - Initialize CGU with settings from firmware
+ * @hw: pointer to the HW structure
+ *
+ * Initialize the Clock Generation Unit of the E822 device.
+ */
+static int ice_init_cgu_e822(struct ice_hw *hw)
+{
+ struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
+ union tspll_cntr_bist_settings cntr_bist;
+ int err;
+
+ err = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
+ &cntr_bist.val);
+ if (err)
+ return err;
+
+ /* Disable sticky lock detection so lock err reported is accurate */
+ cntr_bist.field.i_plllock_sel_0 = 0;
+ cntr_bist.field.i_plllock_sel_1 = 0;
+
+ err = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
+ cntr_bist.val);
+ if (err)
+ return err;
+
+ /* Configure the CGU PLL using the parameters from the function
+ * capabilities.
+ */
+ err = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref,
+ (enum ice_clk_src)ts_info->clk_src);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_set_vernier_wl - Set the window length for vernier calibration
+ * @hw: pointer to the HW struct
+ *
+ * Set the window length used for the vernier port calibration process.
+ */
+static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
+{
+ u8 port;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ int err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_WL,
+ PTP_VERNIER_WL);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_init_phc_e822 - Perform E822 specific PHC initialization
+ * @hw: pointer to HW struct
+ *
+ * Perform PHC initialization steps specific to E822 devices.
+ */
+static int ice_ptp_init_phc_e822(struct ice_hw *hw)
+{
+ int err;
+ u32 regval;
+
+ /* Enable reading switch and PHY registers over the sideband queue */
+#define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1)
+#define PF_SB_REM_DEV_CTL_PHY0 BIT(2)
+ regval = rd32(hw, PF_SB_REM_DEV_CTL);
+ regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ |
+ PF_SB_REM_DEV_CTL_PHY0);
+ wr32(hw, PF_SB_REM_DEV_CTL, regval);
+
+ /* Initialize the Clock Generation Unit */
+ err = ice_init_cgu_e822(hw);
+ if (err)
+ return err;
+
+ /* Set window length for all the ports */
+ return ice_ptp_set_vernier_wl(hw);
+}
+
+/**
+ * ice_ptp_prep_phy_time_e822 - Prepare PHY port with initial time
+ * @hw: pointer to the HW struct
+ * @time: Time to initialize the PHY port clocks to
+ *
+ * Program the PHY port registers with a new initial time value. The port
+ * clock will be initialized once the driver issues an INIT_TIME sync
+ * command. The time value is the upper 32 bits of the PHY timer, usually in
+ * units of nominal nanoseconds.
+ */
+static int
+ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time)
+{
+ u64 phy_time;
+ u8 port;
+ int err;
+
+ /* The time represents the upper 32 bits of the PHY timer, so we need
+ * to shift to account for this when programming.
+ */
+ phy_time = (u64)time << 32;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ /* Tx case */
+ err = ice_write_64b_phy_reg_e822(hw, port,
+ P_REG_TX_TIMER_INC_PRE_L,
+ phy_time);
+ if (err)
+ goto exit_err;
+
+ /* Rx case */
+ err = ice_write_64b_phy_reg_e822(hw, port,
+ P_REG_RX_TIMER_INC_PRE_L,
+ phy_time);
+ if (err)
+ goto exit_err;
+ }
+
+ return 0;
+
+exit_err:
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n",
+ port, err);
+
+ return err;
+}
+
+/**
+ * ice_ptp_prep_port_adj_e822 - Prepare a single port for time adjust
+ * @hw: pointer to HW struct
+ * @port: Port number to be programmed
+ * @time: time in cycles to adjust the port Tx and Rx clocks
+ *
+ * Program the port for an atomic adjustment by writing the Tx and Rx timer
+ * registers. The atomic adjustment won't be completed until the driver issues
+ * an ADJ_TIME command.
+ *
+ * Note that time is not in units of nanoseconds. It is in clock time
+ * including the lower sub-nanosecond portion of the port timer.
+ *
+ * Negative adjustments are supported using 2s complement arithmetic.
+ */
+int
+ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time)
+{
+ u32 l_time, u_time;
+ int err;
+
+ l_time = lower_32_bits(time);
+ u_time = upper_32_bits(time);
+
+ /* Tx case */
+ err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_L,
+ l_time);
+ if (err)
+ goto exit_err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_U,
+ u_time);
+ if (err)
+ goto exit_err;
+
+ /* Rx case */
+ err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_L,
+ l_time);
+ if (err)
+ goto exit_err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_U,
+ u_time);
+ if (err)
+ goto exit_err;
+
+ return 0;
+
+exit_err:
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n",
+ port, err);
+ return err;
+}
+
+/**
+ * ice_ptp_prep_phy_adj_e822 - Prep PHY ports for a time adjustment
+ * @hw: pointer to HW struct
+ * @adj: adjustment in nanoseconds
+ *
+ * Prepare the PHY ports for an atomic time adjustment by programming the PHY
+ * Tx and Rx port registers. The actual adjustment is completed by issuing an
+ * ADJ_TIME or ADJ_TIME_AT_TIME sync command.
+ */
+static int
+ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
+{
+ s64 cycles;
+ u8 port;
+
+ /* The port clock supports adjustment of the sub-nanosecond portion of
+ * the clock. We shift the provided adjustment in nanoseconds to
+ * calculate the appropriate adjustment to program into the PHY ports.
+ */
+ if (adj > 0)
+ cycles = (s64)adj << 32;
+ else
+ cycles = -(((s64)-adj) << 32);
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ int err;
+
+ err = ice_ptp_prep_port_adj_e822(hw, port, cycles);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_prep_phy_incval_e822 - Prepare PHY ports for time adjustment
+ * @hw: pointer to HW struct
+ * @incval: new increment value to prepare
+ *
+ * Prepare each of the PHY ports for a new increment value by programming the
+ * port's TIMETUS registers. The new increment value will be updated after
+ * issuing an INIT_INCVAL command.
+ */
+static int
+ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval)
+{
+ int err;
+ u8 port;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L,
+ incval);
+ if (err)
+ goto exit_err;
+ }
+
+ return 0;
+
+exit_err:
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n",
+ port, err);
+
+ return err;
+}
+
+/**
+ * ice_ptp_read_port_capture - Read a port's local time capture
+ * @hw: pointer to HW struct
+ * @port: Port number to read
+ * @tx_ts: on return, the Tx port time capture
+ * @rx_ts: on return, the Rx port time capture
+ *
+ * Read the port's Tx and Rx local time capture values.
+ *
+ * Note this has no equivalent for the E810 devices.
+ */
+static int
+ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
+{
+ int err;
+
+ /* Tx case */
+ err = ice_read_64b_phy_reg_e822(hw, port, P_REG_TX_CAPTURE_L, tx_ts);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n",
+ err);
+ return err;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "tx_init = 0x%016llx\n",
+ (unsigned long long)*tx_ts);
+
+ /* Rx case */
+ err = ice_read_64b_phy_reg_e822(hw, port, P_REG_RX_CAPTURE_L, rx_ts);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n",
+ err);
+ return err;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "rx_init = 0x%016llx\n",
+ (unsigned long long)*rx_ts);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_one_port_cmd - Prepare a single PHY port for a timer command
+ * @hw: pointer to HW struct
+ * @port: Port to which cmd has to be sent
+ * @cmd: Command to be sent to the port
+ *
+ * Prepare the requested port for an upcoming timer sync command.
+ *
+ * Note there is no equivalent of this operation on E810, as that device
+ * always handles all external PHYs internally.
+ */
+static int
+ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 cmd_val, val;
+ u8 tmr_idx;
+ int err;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+ cmd_val = tmr_idx << SEL_PHY_SRC;
+ switch (cmd) {
+ case INIT_TIME:
+ cmd_val |= PHY_CMD_INIT_TIME;
+ break;
+ case INIT_INCVAL:
+ cmd_val |= PHY_CMD_INIT_INCVAL;
+ break;
+ case ADJ_TIME:
+ cmd_val |= PHY_CMD_ADJ_TIME;
+ break;
+ case READ_TIME:
+ cmd_val |= PHY_CMD_READ_TIME;
+ break;
+ case ADJ_TIME_AT_TIME:
+ cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
+ break;
+ }
+
+ /* Tx case */
+ /* Read, modify, write */
+ err = ice_read_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ /* Modify necessary bits only and perform write */
+ val &= ~TS_CMD_MASK;
+ val |= cmd_val;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ /* Rx case */
+ /* Read, modify, write */
+ err = ice_read_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ /* Modify necessary bits only and perform write */
+ val &= ~TS_CMD_MASK;
+ val |= cmd_val;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_port_cmd_e822 - Prepare all ports for a timer command
+ * @hw: pointer to the HW struct
+ * @cmd: timer command to prepare
+ *
+ * Prepare all ports connected to this device for an upcoming timer sync
+ * command.
+ */
+static int
+ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u8 port;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ int err;
+
+ err = ice_ptp_one_port_cmd(hw, port, cmd);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* E822 Vernier calibration functions
+ *
+ * The following functions are used as part of the vernier calibration of
+ * a port. This calibration increases the precision of the timestamps on the
+ * port.
+ */
+
+/**
+ * ice_phy_get_speed_and_fec_e822 - Get link speed and FEC based on serdes mode
+ * @hw: pointer to HW struct
+ * @port: the port to read from
+ * @link_out: if non-NULL, holds link speed on success
+ * @fec_out: if non-NULL, holds FEC algorithm on success
+ *
+ * Read the serdes data for the PHY port and extract the link speed and FEC
+ * algorithm.
+ */
+static int
+ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
+ enum ice_ptp_link_spd *link_out,
+ enum ice_ptp_fec_mode *fec_out)
+{
+ enum ice_ptp_link_spd link;
+ enum ice_ptp_fec_mode fec;
+ u32 serdes;
+ int err;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_LINK_SPEED, &serdes);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n");
+ return err;
+ }
+
+ /* Determine the FEC algorithm */
+ fec = (enum ice_ptp_fec_mode)P_REG_LINK_SPEED_FEC_MODE(serdes);
+
+ serdes &= P_REG_LINK_SPEED_SERDES_M;
+
+ /* Determine the link speed */
+ if (fec == ICE_PTP_FEC_MODE_RS_FEC) {
+ switch (serdes) {
+ case ICE_PTP_SERDES_25G:
+ link = ICE_PTP_LNK_SPD_25G_RS;
+ break;
+ case ICE_PTP_SERDES_50G:
+ link = ICE_PTP_LNK_SPD_50G_RS;
+ break;
+ case ICE_PTP_SERDES_100G:
+ link = ICE_PTP_LNK_SPD_100G_RS;
+ break;
+ default:
+ return -EIO;
+ }
+ } else {
+ switch (serdes) {
+ case ICE_PTP_SERDES_1G:
+ link = ICE_PTP_LNK_SPD_1G;
+ break;
+ case ICE_PTP_SERDES_10G:
+ link = ICE_PTP_LNK_SPD_10G;
+ break;
+ case ICE_PTP_SERDES_25G:
+ link = ICE_PTP_LNK_SPD_25G;
+ break;
+ case ICE_PTP_SERDES_40G:
+ link = ICE_PTP_LNK_SPD_40G;
+ break;
+ case ICE_PTP_SERDES_50G:
+ link = ICE_PTP_LNK_SPD_50G;
+ break;
+ default:
+ return -EIO;
+ }
+ }
+
+ if (link_out)
+ *link_out = link;
+ if (fec_out)
+ *fec_out = fec;
+
+ return 0;
+}
+
+/**
+ * ice_phy_cfg_lane_e822 - Configure PHY quad for single/multi-lane timestamp
+ * @hw: pointer to HW struct
+ * @port: to configure the quad for
+ */
+static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
+{
+ enum ice_ptp_link_spd link_spd;
+ int err;
+ u32 val;
+ u8 quad;
+
+ err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, NULL);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n",
+ err);
+ return;
+ }
+
+ quad = port / ICE_PORTS_PER_QUAD;
+
+ err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n",
+ err);
+ return;
+ }
+
+ if (link_spd >= ICE_PTP_LNK_SPD_40G)
+ val &= ~Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
+ else
+ val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
+
+ err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n",
+ err);
+ return;
+ }
+}
+
+/**
+ * ice_phy_cfg_uix_e822 - Configure Serdes UI to TU conversion for E822
+ * @hw: pointer to the HW structure
+ * @port: the port to configure
+ *
+ * Program the conversion ration of Serdes clock "unit intervals" (UIs) to PHC
+ * hardware clock time units (TUs). That is, determine the number of TUs per
+ * serdes unit interval, and program the UIX registers with this conversion.
+ *
+ * This conversion is used as part of the calibration process when determining
+ * the additional error of a timestamp vs the real time of transmission or
+ * receipt of the packet.
+ *
+ * Hardware uses the number of TUs per 66 UIs, written to the UIX registers
+ * for the two main serdes clock rates, 10G/40G and 25G/100G serdes clocks.
+ *
+ * To calculate the conversion ratio, we use the following facts:
+ *
+ * a) the clock frequency in Hz (cycles per second)
+ * b) the number of TUs per cycle (the increment value of the clock)
+ * c) 1 second per 1 billion nanoseconds
+ * d) the duration of 66 UIs in nanoseconds
+ *
+ * Given these facts, we can use the following table to work out what ratios
+ * to multiply in order to get the number of TUs per 66 UIs:
+ *
+ * cycles | 1 second | incval (TUs) | nanoseconds
+ * -------+--------------+--------------+-------------
+ * second | 1 billion ns | cycle | 66 UIs
+ *
+ * To perform the multiplication using integers without too much loss of
+ * precision, we can take use the following equation:
+ *
+ * (freq * incval * 6600 LINE_UI ) / ( 100 * 1 billion)
+ *
+ * We scale up to using 6600 UI instead of 66 in order to avoid fractional
+ * nanosecond UIs (66 UI at 10G/40G is 6.4 ns)
+ *
+ * The increment value has a maximum expected range of about 34 bits, while
+ * the frequency value is about 29 bits. Multiplying these values shouldn't
+ * overflow the 64 bits. However, we must then further multiply them again by
+ * the Serdes unit interval duration. To avoid overflow here, we split the
+ * overall divide by 1e11 into a divide by 256 (shift down by 8 bits) and
+ * a divide by 390,625,000. This does lose some precision, but avoids
+ * miscalculation due to arithmetic overflow.
+ */
+static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
+{
+ u64 cur_freq, clk_incval, tu_per_sec, uix;
+ int err;
+
+ cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ clk_incval = ice_ptp_read_src_incval(hw);
+
+ /* Calculate TUs per second divided by 256 */
+ tu_per_sec = (cur_freq * clk_incval) >> 8;
+
+#define LINE_UI_10G_40G 640 /* 6600 UIs is 640 nanoseconds at 10Gb/40Gb */
+#define LINE_UI_25G_100G 256 /* 6600 UIs is 256 nanoseconds at 25Gb/100Gb */
+
+ /* Program the 10Gb/40Gb conversion ratio */
+ uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000);
+
+ err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L,
+ uix);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n",
+ err);
+ return err;
+ }
+
+ /* Program the 25Gb/100Gb conversion ratio */
+ uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000);
+
+ err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L,
+ uix);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_phy_cfg_parpcs_e822 - Configure TUs per PAR/PCS clock cycle
+ * @hw: pointer to the HW struct
+ * @port: port to configure
+ *
+ * Configure the number of TUs for the PAR and PCS clocks used as part of the
+ * timestamp calibration process. This depends on the link speed, as the PHY
+ * uses different markers depending on the speed.
+ *
+ * 1Gb/10Gb/25Gb:
+ * - Tx/Rx PAR/PCS markers
+ *
+ * 25Gb RS:
+ * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
+ *
+ * 40Gb/50Gb:
+ * - Tx/Rx PAR/PCS markers
+ * - Rx Deskew PAR/PCS markers
+ *
+ * 50G RS and 100GB RS:
+ * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
+ * - Rx Deskew PAR/PCS markers
+ * - Tx PAR/PCS markers
+ *
+ * To calculate the conversion, we use the PHC clock frequency (cycles per
+ * second), the increment value (TUs per cycle), and the related PHY clock
+ * frequency to calculate the TUs per unit of the PHY link clock. The
+ * following table shows how the units convert:
+ *
+ * cycles | TUs | second
+ * -------+-------+--------
+ * second | cycle | cycles
+ *
+ * For each conversion register, look up the appropriate frequency from the
+ * e822 PAR/PCS table and calculate the TUs per unit of that clock. Program
+ * this to the appropriate register, preparing hardware to perform timestamp
+ * calibration to calculate the total Tx or Rx offset to adjust the timestamp
+ * in order to calibrate for the internal PHY delays.
+ *
+ * Note that the increment value ranges up to ~34 bits, and the clock
+ * frequency is ~29 bits, so multiplying them together should fit within the
+ * 64 bit arithmetic.
+ */
+static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
+{
+ u64 cur_freq, clk_incval, tu_per_sec, phy_tus;
+ enum ice_ptp_link_spd link_spd;
+ enum ice_ptp_fec_mode fec_mode;
+ int err;
+
+ err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ if (err)
+ return err;
+
+ cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ clk_incval = ice_ptp_read_src_incval(hw);
+
+ /* Calculate TUs per cycle of the PHC clock */
+ tu_per_sec = cur_freq * clk_incval;
+
+ /* For each PHY conversion register, look up the appropriate link
+ * speed frequency and determine the TUs per that clock's cycle time.
+ * Split this into a high and low value and then program the
+ * appropriate register. If that link speed does not use the
+ * associated register, write zeros to clear it instead.
+ */
+
+ /* P_REG_PAR_TX_TUS */
+ if (e822_vernier[link_spd].tx_par_clk)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].tx_par_clk);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_TX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_PAR_RX_TUS */
+ if (e822_vernier[link_spd].rx_par_clk)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].rx_par_clk);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_RX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_PCS_TX_TUS */
+ if (e822_vernier[link_spd].tx_pcs_clk)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].tx_pcs_clk);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_TX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_PCS_RX_TUS */
+ if (e822_vernier[link_spd].rx_pcs_clk)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].rx_pcs_clk);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_RX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_DESK_PAR_TX_TUS */
+ if (e822_vernier[link_spd].tx_desk_rsgb_par)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].tx_desk_rsgb_par);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_TX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_DESK_PAR_RX_TUS */
+ if (e822_vernier[link_spd].rx_desk_rsgb_par)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].rx_desk_rsgb_par);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_RX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_DESK_PCS_TX_TUS */
+ if (e822_vernier[link_spd].tx_desk_rsgb_pcs)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].tx_desk_rsgb_pcs);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_TX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_DESK_PCS_RX_TUS */
+ if (e822_vernier[link_spd].rx_desk_rsgb_pcs)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].rx_desk_rsgb_pcs);
+ else
+ phy_tus = 0;
+
+ return ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_RX_TUS_L,
+ phy_tus);
+}
+
+/**
+ * ice_calc_fixed_tx_offset_e822 - Calculated Fixed Tx offset for a port
+ * @hw: pointer to the HW struct
+ * @link_spd: the Link speed to calculate for
+ *
+ * Calculate the fixed offset due to known static latency data.
+ */
+static u64
+ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
+{
+ u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
+
+ cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ clk_incval = ice_ptp_read_src_incval(hw);
+
+ /* Calculate TUs per second */
+ tu_per_sec = cur_freq * clk_incval;
+
+ /* Calculate number of TUs to add for the fixed Tx latency. Since the
+ * latency measurement is in 1/100th of a nanosecond, we need to
+ * multiply by tu_per_sec and then divide by 1e11. This calculation
+ * overflows 64 bit integer arithmetic, so break it up into two
+ * divisions by 1e4 first then by 1e7.
+ */
+ fixed_offset = div_u64(tu_per_sec, 10000);
+ fixed_offset *= e822_vernier[link_spd].tx_fixed_delay;
+ fixed_offset = div_u64(fixed_offset, 10000000);
+
+ return fixed_offset;
+}
+
+/**
+ * ice_phy_cfg_tx_offset_e822 - Configure total Tx timestamp offset
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to configure
+ *
+ * Program the P_REG_TOTAL_TX_OFFSET register with the total number of TUs to
+ * adjust Tx timestamps by. This is calculated by combining some known static
+ * latency along with the Vernier offset computations done by hardware.
+ *
+ * This function must be called only after the offset registers are valid,
+ * i.e. after the Vernier calibration wait has passed, to ensure that the PHY
+ * has measured the offset.
+ *
+ * To avoid overflow, when calculating the offset based on the known static
+ * latency values, we use measurements in 1/100th of a nanosecond, and divide
+ * the TUs per second up front. This avoids overflow while allowing
+ * calculation of the adjustment using integer arithmetic.
+ */
+static int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
+{
+ enum ice_ptp_link_spd link_spd;
+ enum ice_ptp_fec_mode fec_mode;
+ u64 total_offset, val;
+ int err;
+
+ err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ if (err)
+ return err;
+
+ total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd);
+
+ /* Read the first Vernier offset from the PHY register and add it to
+ * the total offset.
+ */
+ if (link_spd == ICE_PTP_LNK_SPD_1G ||
+ link_spd == ICE_PTP_LNK_SPD_10G ||
+ link_spd == ICE_PTP_LNK_SPD_25G ||
+ link_spd == ICE_PTP_LNK_SPD_25G_RS ||
+ link_spd == ICE_PTP_LNK_SPD_40G ||
+ link_spd == ICE_PTP_LNK_SPD_50G) {
+ err = ice_read_64b_phy_reg_e822(hw, port,
+ P_REG_PAR_PCS_TX_OFFSET_L,
+ &val);
+ if (err)
+ return err;
+
+ total_offset += val;
+ }
+
+ /* For Tx, we only need to use the second Vernier offset for
+ * multi-lane link speeds with RS-FEC. The lanes will always be
+ * aligned.
+ */
+ if (link_spd == ICE_PTP_LNK_SPD_50G_RS ||
+ link_spd == ICE_PTP_LNK_SPD_100G_RS) {
+ err = ice_read_64b_phy_reg_e822(hw, port,
+ P_REG_PAR_TX_TIME_L,
+ &val);
+ if (err)
+ return err;
+
+ total_offset += val;
+ }
+
+ /* Now that the total offset has been calculated, program it to the
+ * PHY and indicate that the Tx offset is ready. After this,
+ * timestamps will be enabled.
+ */
+ err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L,
+ total_offset);
+ if (err)
+ return err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_phy_cfg_fixed_tx_offset_e822 - Configure Tx offset for bypass mode
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to configure
+ *
+ * Calculate and program the fixed Tx offset, and indicate that the offset is
+ * ready. This can be used when operating in bypass mode.
+ */
+static int
+ice_phy_cfg_fixed_tx_offset_e822(struct ice_hw *hw, u8 port)
+{
+ enum ice_ptp_link_spd link_spd;
+ enum ice_ptp_fec_mode fec_mode;
+ u64 total_offset;
+ int err;
+
+ err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ if (err)
+ return err;
+
+ total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd);
+
+ /* Program the fixed Tx offset into the P_REG_TOTAL_TX_OFFSET_L
+ * register, then indicate that the Tx offset is ready. After this,
+ * timestamps will be enabled.
+ *
+ * Note that this skips including the more precise offsets generated
+ * by the Vernier calibration.
+ */
+ err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L,
+ total_offset);
+ if (err)
+ return err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_phy_calc_pmd_adj_e822 - Calculate PMD adjustment for Rx
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to adjust for
+ * @link_spd: the current link speed of the PHY
+ * @fec_mode: the current FEC mode of the PHY
+ * @pmd_adj: on return, the amount to adjust the Rx total offset by
+ *
+ * Calculates the adjustment to Rx timestamps due to PMD alignment in the PHY.
+ * This varies by link speed and FEC mode. The value calculated accounts for
+ * various delays caused when receiving a packet.
+ */
+static int
+ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
+ enum ice_ptp_link_spd link_spd,
+ enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj)
+{
+ u64 cur_freq, clk_incval, tu_per_sec, mult, adj;
+ u8 pmd_align;
+ u32 val;
+ int err;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_PMD_ALIGNMENT, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n",
+ err);
+ return err;
+ }
+
+ pmd_align = (u8)val;
+
+ cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ clk_incval = ice_ptp_read_src_incval(hw);
+
+ /* Calculate TUs per second */
+ tu_per_sec = cur_freq * clk_incval;
+
+ /* The PMD alignment adjustment measurement depends on the link speed,
+ * and whether FEC is enabled. For each link speed, the alignment
+ * adjustment is calculated by dividing a value by the length of
+ * a Time Unit in nanoseconds.
+ *
+ * 1G: align == 4 ? 10 * 0.8 : (align + 6 % 10) * 0.8
+ * 10G: align == 65 ? 0 : (align * 0.1 * 32/33)
+ * 10G w/FEC: align * 0.1 * 32/33
+ * 25G: align == 65 ? 0 : (align * 0.4 * 32/33)
+ * 25G w/FEC: align * 0.4 * 32/33
+ * 40G: align == 65 ? 0 : (align * 0.1 * 32/33)
+ * 40G w/FEC: align * 0.1 * 32/33
+ * 50G: align == 65 ? 0 : (align * 0.4 * 32/33)
+ * 50G w/FEC: align * 0.8 * 32/33
+ *
+ * For RS-FEC, if align is < 17 then we must also add 1.6 * 32/33.
+ *
+ * To allow for calculating this value using integer arithmetic, we
+ * instead start with the number of TUs per second, (inverse of the
+ * length of a Time Unit in nanoseconds), multiply by a value based
+ * on the PMD alignment register, and then divide by the right value
+ * calculated based on the table above. To avoid integer overflow this
+ * division is broken up into a step of dividing by 125 first.
+ */
+ if (link_spd == ICE_PTP_LNK_SPD_1G) {
+ if (pmd_align == 4)
+ mult = 10;
+ else
+ mult = (pmd_align + 6) % 10;
+ } else if (link_spd == ICE_PTP_LNK_SPD_10G ||
+ link_spd == ICE_PTP_LNK_SPD_25G ||
+ link_spd == ICE_PTP_LNK_SPD_40G ||
+ link_spd == ICE_PTP_LNK_SPD_50G) {
+ /* If Clause 74 FEC, always calculate PMD adjust */
+ if (pmd_align != 65 || fec_mode == ICE_PTP_FEC_MODE_CLAUSE74)
+ mult = pmd_align;
+ else
+ mult = 0;
+ } else if (link_spd == ICE_PTP_LNK_SPD_25G_RS ||
+ link_spd == ICE_PTP_LNK_SPD_50G_RS ||
+ link_spd == ICE_PTP_LNK_SPD_100G_RS) {
+ if (pmd_align < 17)
+ mult = pmd_align + 40;
+ else
+ mult = pmd_align;
+ } else {
+ ice_debug(hw, ICE_DBG_PTP, "Unknown link speed %d, skipping PMD adjustment\n",
+ link_spd);
+ mult = 0;
+ }
+
+ /* In some cases, there's no need to adjust for the PMD alignment */
+ if (!mult) {
+ *pmd_adj = 0;
+ return 0;
+ }
+
+ /* Calculate the adjustment by multiplying TUs per second by the
+ * appropriate multiplier and divisor. To avoid overflow, we first
+ * divide by 125, and then handle remaining divisor based on the link
+ * speed pmd_adj_divisor value.
+ */
+ adj = div_u64(tu_per_sec, 125);
+ adj *= mult;
+ adj = div_u64(adj, e822_vernier[link_spd].pmd_adj_divisor);
+
+ /* Finally, for 25G-RS and 50G-RS, a further adjustment for the Rx
+ * cycle count is necessary.
+ */
+ if (link_spd == ICE_PTP_LNK_SPD_25G_RS) {
+ u64 cycle_adj;
+ u8 rx_cycle;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_RX_40_TO_160_CNT,
+ &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n",
+ err);
+ return err;
+ }
+
+ rx_cycle = val & P_REG_RX_40_TO_160_CNT_RXCYC_M;
+ if (rx_cycle) {
+ mult = (4 - rx_cycle) * 40;
+
+ cycle_adj = div_u64(tu_per_sec, 125);
+ cycle_adj *= mult;
+ cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
+
+ adj += cycle_adj;
+ }
+ } else if (link_spd == ICE_PTP_LNK_SPD_50G_RS) {
+ u64 cycle_adj;
+ u8 rx_cycle;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_RX_80_TO_160_CNT,
+ &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n",
+ err);
+ return err;
+ }
+
+ rx_cycle = val & P_REG_RX_80_TO_160_CNT_RXCYC_M;
+ if (rx_cycle) {
+ mult = rx_cycle * 40;
+
+ cycle_adj = div_u64(tu_per_sec, 125);
+ cycle_adj *= mult;
+ cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
+
+ adj += cycle_adj;
+ }
+ }
+
+ /* Return the calculated adjustment */
+ *pmd_adj = adj;
+
+ return 0;
+}
+
+/**
+ * ice_calc_fixed_rx_offset_e822 - Calculated the fixed Rx offset for a port
+ * @hw: pointer to HW struct
+ * @link_spd: The Link speed to calculate for
+ *
+ * Determine the fixed Rx latency for a given link speed.
+ */
+static u64
+ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
+{
+ u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
+
+ cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ clk_incval = ice_ptp_read_src_incval(hw);
+
+ /* Calculate TUs per second */
+ tu_per_sec = cur_freq * clk_incval;
+
+ /* Calculate number of TUs to add for the fixed Rx latency. Since the
+ * latency measurement is in 1/100th of a nanosecond, we need to
+ * multiply by tu_per_sec and then divide by 1e11. This calculation
+ * overflows 64 bit integer arithmetic, so break it up into two
+ * divisions by 1e4 first then by 1e7.
+ */
+ fixed_offset = div_u64(tu_per_sec, 10000);
+ fixed_offset *= e822_vernier[link_spd].rx_fixed_delay;
+ fixed_offset = div_u64(fixed_offset, 10000000);
+
+ return fixed_offset;
+}
+
+/**
+ * ice_phy_cfg_rx_offset_e822 - Configure total Rx timestamp offset
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to configure
+ *
+ * Program the P_REG_TOTAL_RX_OFFSET register with the number of Time Units to
+ * adjust Rx timestamps by. This combines calculations from the Vernier offset
+ * measurements taken in hardware with some data about known fixed delay as
+ * well as adjusting for multi-lane alignment delay.
+ *
+ * This function must be called only after the offset registers are valid,
+ * i.e. after the Vernier calibration wait has passed, to ensure that the PHY
+ * has measured the offset.
+ *
+ * To avoid overflow, when calculating the offset based on the known static
+ * latency values, we use measurements in 1/100th of a nanosecond, and divide
+ * the TUs per second up front. This avoids overflow while allowing
+ * calculation of the adjustment using integer arithmetic.
+ */
+static int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
+{
+ enum ice_ptp_link_spd link_spd;
+ enum ice_ptp_fec_mode fec_mode;
+ u64 total_offset, pmd, val;
+ int err;
+
+ err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ if (err)
+ return err;
+
+ total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd);
+
+ /* Read the first Vernier offset from the PHY register and add it to
+ * the total offset.
+ */
+ err = ice_read_64b_phy_reg_e822(hw, port,
+ P_REG_PAR_PCS_RX_OFFSET_L,
+ &val);
+ if (err)
+ return err;
+
+ total_offset += val;
+
+ /* For Rx, all multi-lane link speeds include a second Vernier
+ * calibration, because the lanes might not be aligned.
+ */
+ if (link_spd == ICE_PTP_LNK_SPD_40G ||
+ link_spd == ICE_PTP_LNK_SPD_50G ||
+ link_spd == ICE_PTP_LNK_SPD_50G_RS ||
+ link_spd == ICE_PTP_LNK_SPD_100G_RS) {
+ err = ice_read_64b_phy_reg_e822(hw, port,
+ P_REG_PAR_RX_TIME_L,
+ &val);
+ if (err)
+ return err;
+
+ total_offset += val;
+ }
+
+ /* In addition, Rx must account for the PMD alignment */
+ err = ice_phy_calc_pmd_adj_e822(hw, port, link_spd, fec_mode, &pmd);
+ if (err)
+ return err;
+
+ /* For RS-FEC, this adjustment adds delay, but for other modes, it
+ * subtracts delay.
+ */
+ if (fec_mode == ICE_PTP_FEC_MODE_RS_FEC)
+ total_offset += pmd;
+ else
+ total_offset -= pmd;
+
+ /* Now that the total offset has been calculated, program it to the
+ * PHY and indicate that the Rx offset is ready. After this,
+ * timestamps will be enabled.
+ */
+ err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L,
+ total_offset);
+ if (err)
+ return err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_phy_cfg_fixed_rx_offset_e822 - Configure fixed Rx offset for bypass mode
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to configure
+ *
+ * Calculate and program the fixed Rx offset, and indicate that the offset is
+ * ready. This can be used when operating in bypass mode.
+ */
+static int
+ice_phy_cfg_fixed_rx_offset_e822(struct ice_hw *hw, u8 port)
+{
+ enum ice_ptp_link_spd link_spd;
+ enum ice_ptp_fec_mode fec_mode;
+ u64 total_offset;
+ int err;
+
+ err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ if (err)
+ return err;
+
+ total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd);
+
+ /* Program the fixed Rx offset into the P_REG_TOTAL_RX_OFFSET_L
+ * register, then indicate that the Rx offset is ready. After this,
+ * timestamps will be enabled.
+ *
+ * Note that this skips including the more precise offsets generated
+ * by Vernier calibration.
+ */
+ err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L,
+ total_offset);
+ if (err)
+ return err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_read_phy_and_phc_time_e822 - Simultaneously capture PHC and PHY time
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to read
+ * @phy_time: on return, the 64bit PHY timer value
+ * @phc_time: on return, the lower 64bits of PHC time
+ *
+ * Issue a READ_TIME timer command to simultaneously capture the PHY and PHC
+ * timer values.
+ */
+static int
+ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
+ u64 *phc_time)
+{
+ u64 tx_time, rx_time;
+ u32 zo, lo;
+ u8 tmr_idx;
+ int err;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ /* Prepare the PHC timer for a READ_TIME capture command */
+ ice_ptp_src_cmd(hw, READ_TIME);
+
+ /* Prepare the PHY timer for a READ_TIME capture command */
+ err = ice_ptp_one_port_cmd(hw, port, READ_TIME);
+ if (err)
+ return err;
+
+ /* Issue the sync to start the READ_TIME capture */
+ ice_ptp_exec_tmr_cmd(hw);
+
+ /* Read the captured PHC time from the shadow time registers */
+ zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
+ lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
+ *phc_time = (u64)lo << 32 | zo;
+
+ /* Read the captured PHY time from the PHY shadow registers */
+ err = ice_ptp_read_port_capture(hw, port, &tx_time, &rx_time);
+ if (err)
+ return err;
+
+ /* If the PHY Tx and Rx timers don't match, log a warning message.
+ * Note that this should not happen in normal circumstances since the
+ * driver always programs them together.
+ */
+ if (tx_time != rx_time)
+ dev_warn(ice_hw_to_dev(hw),
+ "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n",
+ port, (unsigned long long)tx_time,
+ (unsigned long long)rx_time);
+
+ *phy_time = tx_time;
+
+ return 0;
+}
+
+/**
+ * ice_sync_phy_timer_e822 - Synchronize the PHY timer with PHC timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to synchronize
+ *
+ * Perform an adjustment to ensure that the PHY and PHC timers are in sync.
+ * This is done by issuing a READ_TIME command which triggers a simultaneous
+ * read of the PHY timer and PHC timer. Then we use the difference to
+ * calculate an appropriate 2s complement addition to add to the PHY timer in
+ * order to ensure it reads the same value as the primary PHC timer.
+ */
+static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
+{
+ u64 phc_time, phy_time, difference;
+ int err;
+
+ if (!ice_ptp_lock(hw)) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n");
+ return -EBUSY;
+ }
+
+ err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
+ if (err)
+ goto err_unlock;
+
+ /* Calculate the amount required to add to the port time in order for
+ * it to match the PHC time.
+ *
+ * Note that the port adjustment is done using 2s complement
+ * arithmetic. This is convenient since it means that we can simply
+ * calculate the difference between the PHC time and the port time,
+ * and it will be interpreted correctly.
+ */
+ difference = phc_time - phy_time;
+
+ err = ice_ptp_prep_port_adj_e822(hw, port, (s64)difference);
+ if (err)
+ goto err_unlock;
+
+ err = ice_ptp_one_port_cmd(hw, port, ADJ_TIME);
+ if (err)
+ goto err_unlock;
+
+ /* Issue the sync to activate the time adjustment */
+ ice_ptp_exec_tmr_cmd(hw);
+
+ /* Re-capture the timer values to flush the command registers and
+ * verify that the time was properly adjusted.
+ */
+ err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
+ if (err)
+ goto err_unlock;
+
+ dev_info(ice_hw_to_dev(hw),
+ "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n",
+ port, (unsigned long long)phy_time,
+ (unsigned long long)phc_time);
+
+ ice_ptp_unlock(hw);
+
+ return 0;
+
+err_unlock:
+ ice_ptp_unlock(hw);
+ return err;
+}
+
+/**
+ * ice_stop_phy_timer_e822 - Stop the PHY clock timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to stop
+ * @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS
+ *
+ * Stop the clock of a PHY port. This must be done as part of the flow to
+ * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
+ * initialized or when link speed changes.
+ */
+int
+ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset)
+{
+ int err;
+ u32 val;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 0);
+ if (err)
+ return err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 0);
+ if (err)
+ return err;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
+ if (err)
+ return err;
+
+ val &= ~P_REG_PS_START_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ val &= ~P_REG_PS_ENA_CLK_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ if (soft_reset) {
+ val |= P_REG_PS_SFT_RESET_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port);
+
+ return 0;
+}
+
+/**
+ * ice_start_phy_timer_e822 - Start the PHY clock timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to start
+ * @bypass: if true, start the PHY in bypass mode
+ *
+ * Start the clock of a PHY port. This must be done as part of the flow to
+ * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
+ * initialized or when link speed changes.
+ *
+ * Bypass mode enables timestamps immediately without waiting for Vernier
+ * calibration to complete. Hardware will still continue taking Vernier
+ * measurements on Tx or Rx of packets, but they will not be applied to
+ * timestamps. Use ice_phy_exit_bypass_e822 to exit bypass mode once hardware
+ * has completed offset calculation.
+ */
+int
+ice_start_phy_timer_e822(struct ice_hw *hw, u8 port, bool bypass)
+{
+ u32 lo, hi, val;
+ u64 incval;
+ u8 tmr_idx;
+ int err;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ err = ice_stop_phy_timer_e822(hw, port, false);
+ if (err)
+ return err;
+
+ ice_phy_cfg_lane_e822(hw, port);
+
+ err = ice_phy_cfg_uix_e822(hw, port);
+ if (err)
+ return err;
+
+ err = ice_phy_cfg_parpcs_e822(hw, port);
+ if (err)
+ return err;
+
+ lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+ incval = (u64)hi << 32 | lo;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, incval);
+ if (err)
+ return err;
+
+ err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
+ if (err)
+ return err;
+
+ ice_ptp_exec_tmr_cmd(hw);
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
+ if (err)
+ return err;
+
+ val |= P_REG_PS_SFT_RESET_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ val |= P_REG_PS_START_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ val &= ~P_REG_PS_SFT_RESET_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
+ if (err)
+ return err;
+
+ ice_ptp_exec_tmr_cmd(hw);
+
+ val |= P_REG_PS_ENA_CLK_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ val |= P_REG_PS_LOAD_OFFSET_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ ice_ptp_exec_tmr_cmd(hw);
+
+ err = ice_sync_phy_timer_e822(hw, port);
+ if (err)
+ return err;
+
+ if (bypass) {
+ val |= P_REG_PS_BYPASS_MODE_M;
+ /* Enter BYPASS mode, enabling timestamps immediately. */
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ /* Program the fixed Tx offset */
+ err = ice_phy_cfg_fixed_tx_offset_e822(hw, port);
+ if (err)
+ return err;
+
+ /* Program the fixed Rx offset */
+ err = ice_phy_cfg_fixed_rx_offset_e822(hw, port);
+ if (err)
+ return err;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port);
+
+ return 0;
+}
+
+/**
+ * ice_phy_exit_bypass_e822 - Exit bypass mode, after vernier calculations
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to configure
+ *
+ * After hardware finishes vernier calculations for the Tx and Rx offset, this
+ * function can be used to exit bypass mode by updating the total Tx and Rx
+ * offsets, and then disabling bypass. This will enable hardware to include
+ * the more precise offset calibrations, increasing precision of the generated
+ * timestamps.
+ *
+ * This cannot be done until hardware has measured the offsets, which requires
+ * waiting until at least one packet has been sent and received by the device.
+ */
+int ice_phy_exit_bypass_e822(struct ice_hw *hw, u8 port)
+{
+ int err;
+ u32 val;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OV_STATUS, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ if (!(val & P_REG_TX_OV_STATUS_OV_M)) {
+ ice_debug(hw, ICE_DBG_PTP, "Tx offset is not yet valid for port %u\n",
+ port);
+ return -EBUSY;
+ }
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OV_STATUS, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ if (!(val & P_REG_TX_OV_STATUS_OV_M)) {
+ ice_debug(hw, ICE_DBG_PTP, "Rx offset is not yet valid for port %u\n",
+ port);
+ return -EBUSY;
+ }
+
+ err = ice_phy_cfg_tx_offset_e822(hw, port);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to program total Tx offset for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ err = ice_phy_cfg_rx_offset_e822(hw, port);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to program total Rx offset for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ /* Exit bypass mode now that the offset has been updated */
+ err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read P_REG_PS for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ if (!(val & P_REG_PS_BYPASS_MODE_M))
+ ice_debug(hw, ICE_DBG_PTP, "Port %u not in bypass mode\n",
+ port);
+
+ val &= ~P_REG_PS_BYPASS_MODE_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to disable bypass for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ dev_info(ice_hw_to_dev(hw), "Exiting bypass mode on PHY port %u\n",
+ port);
+
+ return 0;
+}
+
/* E810 functions
*
* The following functions operate on the E810 series devices which use
@@ -68,18 +2538,18 @@ u8 ice_get_ptp_src_clock_index(struct ice_hw *hw)
static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
{
struct ice_sbq_msg_input msg = {0};
- int status;
+ int err;
msg.msg_addr_low = lower_16_bits(addr);
msg.msg_addr_high = upper_16_bits(addr);
msg.opcode = ice_sbq_msg_rd;
msg.dest_dev = rmn_0;
- status = ice_sbq_rw_reg(hw, &msg);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, status %d\n",
- status);
- return status;
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
}
*val = msg.data;
@@ -98,7 +2568,7 @@ static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
{
struct ice_sbq_msg_input msg = {0};
- int status;
+ int err;
msg.msg_addr_low = lower_16_bits(addr);
msg.msg_addr_high = upper_16_bits(addr);
@@ -106,11 +2576,11 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
msg.dest_dev = rmn_0;
msg.data = val;
- status = ice_sbq_rw_reg(hw, &msg);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, status %d\n",
- status);
- return status;
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
}
return 0;
@@ -130,23 +2600,23 @@ static int
ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
{
u32 lo_addr, hi_addr, lo, hi;
- int status;
+ int err;
lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
- status = ice_read_phy_reg_e810(hw, lo_addr, &lo);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, status %d\n",
- status);
- return status;
+ err = ice_read_phy_reg_e810(hw, lo_addr, &lo);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
+ err);
+ return err;
}
- status = ice_read_phy_reg_e810(hw, hi_addr, &hi);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, status %d\n",
- status);
- return status;
+ err = ice_read_phy_reg_e810(hw, hi_addr, &hi);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
+ err);
+ return err;
}
/* For E810 devices, the timestamp is reported with the lower 32 bits
@@ -169,23 +2639,23 @@ ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
{
u32 lo_addr, hi_addr;
- int status;
+ int err;
lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
- status = ice_write_phy_reg_e810(hw, lo_addr, 0);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, lo_addr, 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
+ err);
+ return err;
}
- status = ice_write_phy_reg_e810(hw, hi_addr, 0);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, hi_addr, 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
+ err);
+ return err;
}
return 0;
@@ -200,17 +2670,32 @@ static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
*/
int ice_ptp_init_phy_e810(struct ice_hw *hw)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
- GLTSYN_ENA_TSYN_ENA_M);
- if (status)
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
+ GLTSYN_ENA_TSYN_ENA_M);
+ if (err)
ice_debug(hw, ICE_DBG_PTP, "PTP failed in ena_phy_time_syn %d\n",
- status);
+ err);
- return status;
+ return err;
+}
+
+/**
+ * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization
+ * @hw: pointer to HW struct
+ *
+ * Perform E810-specific PTP hardware clock initialization steps.
+ */
+static int ice_ptp_init_phc_e810(struct ice_hw *hw)
+{
+ /* Ensure synchronization delay is zero */
+ wr32(hw, GLTSYN_SYNC_DLAY, 0);
+
+ /* Initialize the PHY */
+ return ice_ptp_init_phy_e810(hw);
}
/**
@@ -227,22 +2712,22 @@ int ice_ptp_init_phy_e810(struct ice_hw *hw)
*/
static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, err %d\n",
+ err);
+ return err;
}
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, err %d\n",
+ err);
+ return err;
}
return 0;
@@ -263,26 +2748,26 @@ static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
*/
static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Adjustments are represented as signed 2's complement values in
* nanoseconds. Sub-nanosecond adjustment is not supported.
*/
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, err %d\n",
+ err);
+ return err;
}
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, err %d\n",
+ err);
+ return err;
}
return 0;
@@ -300,25 +2785,25 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
{
u32 high, low;
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
low = lower_32_bits(incval);
high = upper_32_bits(incval);
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, err %d\n",
+ err);
+ return err;
}
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, err %d\n",
+ err);
+ return err;
}
return 0;
@@ -335,7 +2820,7 @@ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
u32 cmd_val, val;
- int status;
+ int err;
switch (cmd) {
case INIT_TIME:
@@ -356,20 +2841,20 @@ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
}
/* Read, modify, write */
- status = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, status %d\n", status);
- return status;
+ err = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, err %d\n", err);
+ return err;
}
/* Modify necessary bits only and perform write */
val &= ~TS_CMD_MASK_E810;
val |= cmd_val;
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, status %d\n", status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, err %d\n", err);
+ return err;
}
return 0;
@@ -377,12 +2862,9 @@ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
/* Device agnostic functions
*
- * The following functions implement useful behavior to hide the differences
- * between E810 and other devices. They call the device-specific
- * implementations where necessary.
- *
- * Currently, the driver only supports E810, but future work will enable
- * support for E822-based devices.
+ * The following functions implement shared behavior common to both E822 and
+ * E810 devices, possibly calling a device specific implementation where
+ * necessary.
*/
/**
@@ -433,42 +2915,6 @@ void ice_ptp_unlock(struct ice_hw *hw)
}
/**
- * ice_ptp_src_cmd - Prepare source timer for a timer command
- * @hw: pointer to HW structure
- * @cmd: Timer command
- *
- * Prepare the source timer for an upcoming timer sync command.
- */
-static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
-{
- u32 cmd_val;
- u8 tmr_idx;
-
- tmr_idx = ice_get_ptp_src_clock_index(hw);
- cmd_val = tmr_idx << SEL_CPK_SRC;
-
- switch (cmd) {
- case INIT_TIME:
- cmd_val |= GLTSYN_CMD_INIT_TIME;
- break;
- case INIT_INCVAL:
- cmd_val |= GLTSYN_CMD_INIT_INCVAL;
- break;
- case ADJ_TIME:
- cmd_val |= GLTSYN_CMD_ADJ_TIME;
- break;
- case ADJ_TIME_AT_TIME:
- cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
- break;
- case READ_TIME:
- cmd_val |= GLTSYN_CMD_READ_TIME;
- break;
- }
-
- wr32(hw, GLTSYN_CMD, cmd_val);
-}
-
-/**
* ice_ptp_tmr_cmd - Prepare and trigger a timer sync command
* @hw: pointer to HW struct
* @cmd: the command to issue
@@ -480,23 +2926,26 @@ static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
*/
static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
- int status;
+ int err;
/* First, prepare the source timer */
ice_ptp_src_cmd(hw, cmd);
/* Next, prepare the ports */
- status = ice_ptp_port_cmd_e810(hw, cmd);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, status %d\n",
- cmd, status);
- return status;
+ if (ice_is_e810(hw))
+ err = ice_ptp_port_cmd_e810(hw, cmd);
+ else
+ err = ice_ptp_port_cmd_e822(hw, cmd);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n",
+ cmd, err);
+ return err;
}
- /* Write the sync command register to drive both source and PHY timer commands
- * synchronously
+ /* Write the sync command register to drive both source and PHY timer
+ * commands synchronously
*/
- wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
+ ice_ptp_exec_tmr_cmd(hw);
return 0;
}
@@ -516,8 +2965,8 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
*/
int ice_ptp_init_time(struct ice_hw *hw, u64 time)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
@@ -528,9 +2977,12 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
/* PHY timers */
/* Fill Rx and Tx ports and send msg to PHY */
- status = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
- if (status)
- return status;
+ if (ice_is_e810(hw))
+ err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
+ else
+ err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF);
+ if (err)
+ return err;
return ice_ptp_tmr_cmd(hw, INIT_TIME);
}
@@ -551,8 +3003,8 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
*/
int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
@@ -560,9 +3012,12 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
- status = ice_ptp_prep_phy_incval_e810(hw, incval);
- if (status)
- return status;
+ if (ice_is_e810(hw))
+ err = ice_ptp_prep_phy_incval_e810(hw, incval);
+ else
+ err = ice_ptp_prep_phy_incval_e822(hw, incval);
+ if (err)
+ return err;
return ice_ptp_tmr_cmd(hw, INIT_INCVAL);
}
@@ -576,16 +3031,16 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
*/
int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
{
- int status;
+ int err;
if (!ice_ptp_lock(hw))
return -EBUSY;
- status = ice_ptp_write_incval(hw, incval);
+ err = ice_ptp_write_incval(hw, incval);
ice_ptp_unlock(hw);
- return status;
+ return err;
}
/**
@@ -603,8 +3058,8 @@ int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
*/
int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
@@ -616,9 +3071,12 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
- status = ice_ptp_prep_phy_adj_e810(hw, adj);
- if (status)
- return status;
+ if (ice_is_e810(hw))
+ err = ice_ptp_prep_phy_adj_e810(hw, adj);
+ else
+ err = ice_ptp_prep_phy_adj_e822(hw, adj);
+ if (err)
+ return err;
return ice_ptp_tmr_cmd(hw, ADJ_TIME);
}
@@ -630,11 +3088,16 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
* @idx: the timestamp index to read
* @tstamp: on return, the 40bit timestamp value
*
- * Read a 40bit timestamp value out of the timestamp block.
+ * Read a 40bit timestamp value out of the timestamp block. For E822 devices,
+ * the block is the quad to read from. For E810 devices, the block is the
+ * logical port to read from.
*/
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
{
- return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
+ if (ice_is_e810(hw))
+ return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
+ else
+ return ice_read_phy_tstamp_e822(hw, block, idx, tstamp);
}
/**
@@ -643,11 +3106,16 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
* @block: the block to read from
* @idx: the timestamp index to reset
*
- * Clear a timestamp, resetting its valid bit, from the timestamp block.
+ * Clear a timestamp, resetting its valid bit, from the timestamp block. For
+ * E822 devices, the block is the quad to clear from. For E810 devices, the
+ * block is the logical port to clear from.
*/
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
- return ice_clear_phy_tstamp_e810(hw, block, idx);
+ if (ice_is_e810(hw))
+ return ice_clear_phy_tstamp_e810(hw, block, idx);
+ else
+ return ice_clear_phy_tstamp_e822(hw, block, idx);
}
/* E810T SMA functions
@@ -800,3 +3268,25 @@ bool ice_is_pca9575_present(struct ice_hw *hw)
return !status && handle;
}
+
+/**
+ * ice_ptp_init_phc - Initialize PTP hardware clock
+ * @hw: pointer to the HW struct
+ *
+ * Perform the steps required to initialize the PTP hardware clock.
+ */
+int ice_ptp_init_phc(struct ice_hw *hw)
+{
+ u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ /* Enable source clocks */
+ wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
+
+ /* Clear event err indications for auxiliary pins */
+ (void)rd32(hw, GLTSYN_STAT(src_idx));
+
+ if (ice_is_e810(hw))
+ return ice_ptp_init_phc_e810(hw);
+ else
+ return ice_ptp_init_phc_e822(hw);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index b2984b5c22c1..519e75462e67 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -12,6 +12,112 @@ enum ice_ptp_tmr_cmd {
READ_TIME
};
+enum ice_ptp_serdes {
+ ICE_PTP_SERDES_1G,
+ ICE_PTP_SERDES_10G,
+ ICE_PTP_SERDES_25G,
+ ICE_PTP_SERDES_40G,
+ ICE_PTP_SERDES_50G,
+ ICE_PTP_SERDES_100G
+};
+
+enum ice_ptp_link_spd {
+ ICE_PTP_LNK_SPD_1G,
+ ICE_PTP_LNK_SPD_10G,
+ ICE_PTP_LNK_SPD_25G,
+ ICE_PTP_LNK_SPD_25G_RS,
+ ICE_PTP_LNK_SPD_40G,
+ ICE_PTP_LNK_SPD_50G,
+ ICE_PTP_LNK_SPD_50G_RS,
+ ICE_PTP_LNK_SPD_100G_RS,
+ NUM_ICE_PTP_LNK_SPD /* Must be last */
+};
+
+enum ice_ptp_fec_mode {
+ ICE_PTP_FEC_MODE_NONE,
+ ICE_PTP_FEC_MODE_CLAUSE74,
+ ICE_PTP_FEC_MODE_RS_FEC
+};
+
+/**
+ * struct ice_time_ref_info_e822
+ * @pll_freq: Frequency of PLL that drives timer ticks in Hz
+ * @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L
+ * @pps_delay: propagation delay of the PPS output signal
+ *
+ * Characteristic information for the various TIME_REF sources possible in the
+ * E822 devices
+ */
+struct ice_time_ref_info_e822 {
+ u64 pll_freq;
+ u64 nominal_incval;
+ u8 pps_delay;
+};
+
+/**
+ * struct ice_vernier_info_e822
+ * @tx_par_clk: Frequency used to calculate P_REG_PAR_TX_TUS
+ * @rx_par_clk: Frequency used to calculate P_REG_PAR_RX_TUS
+ * @tx_pcs_clk: Frequency used to calculate P_REG_PCS_TX_TUS
+ * @rx_pcs_clk: Frequency used to calculate P_REG_PCS_RX_TUS
+ * @tx_desk_rsgb_par: Frequency used to calculate P_REG_DESK_PAR_TX_TUS
+ * @rx_desk_rsgb_par: Frequency used to calculate P_REG_DESK_PAR_RX_TUS
+ * @tx_desk_rsgb_pcs: Frequency used to calculate P_REG_DESK_PCS_TX_TUS
+ * @rx_desk_rsgb_pcs: Frequency used to calculate P_REG_DESK_PCS_RX_TUS
+ * @tx_fixed_delay: Fixed Tx latency measured in 1/100th nanoseconds
+ * @pmd_adj_divisor: Divisor used to calculate PDM alignment adjustment
+ * @rx_fixed_delay: Fixed Rx latency measured in 1/100th nanoseconds
+ *
+ * Table of constants used during as part of the Vernier calibration of the Tx
+ * and Rx timestamps. This includes frequency values used to compute TUs per
+ * PAR/PCS clock cycle, and static delay values measured during hardware
+ * design.
+ *
+ * Note that some values are not used for all link speeds, and the
+ * P_REG_DESK_PAR* registers may represent different clock markers at
+ * different link speeds, either the deskew marker for multi-lane link speeds
+ * or the Reed Solomon gearbox marker for RS-FEC.
+ */
+struct ice_vernier_info_e822 {
+ u32 tx_par_clk;
+ u32 rx_par_clk;
+ u32 tx_pcs_clk;
+ u32 rx_pcs_clk;
+ u32 tx_desk_rsgb_par;
+ u32 rx_desk_rsgb_par;
+ u32 tx_desk_rsgb_pcs;
+ u32 rx_desk_rsgb_pcs;
+ u32 tx_fixed_delay;
+ u32 pmd_adj_divisor;
+ u32 rx_fixed_delay;
+};
+
+/**
+ * struct ice_cgu_pll_params_e822
+ * @refclk_pre_div: Reference clock pre-divisor
+ * @feedback_div: Feedback divisor
+ * @frac_n_div: Fractional divisor
+ * @post_pll_div: Post PLL divisor
+ *
+ * Clock Generation Unit parameters used to program the PLL based on the
+ * selected TIME_REF frequency.
+ */
+struct ice_cgu_pll_params_e822 {
+ u32 refclk_pre_div;
+ u32 feedback_div;
+ u32 frac_n_div;
+ u32 post_pll_div;
+};
+
+extern const struct
+ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
+
+/* Table of constants related to possible TIME_REF sources */
+extern const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ];
+
+/* Table of constants for Vernier calibration on E822 */
+extern const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD];
+
/* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for
* the E810 devices. Based off of a PLL with an 812.5 MHz frequency.
*/
@@ -27,6 +133,59 @@ int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval);
int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj);
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp);
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx);
+int ice_ptp_init_phc(struct ice_hw *hw);
+
+/* E822 family functions */
+int ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val);
+int ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val);
+int ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val);
+int ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val);
+int ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time);
+
+/**
+ * ice_e822_time_ref - Get the current TIME_REF from capabilities
+ * @hw: pointer to the HW structure
+ *
+ * Returns the current TIME_REF from the capabilities structure.
+ */
+static inline enum ice_time_ref_freq ice_e822_time_ref(struct ice_hw *hw)
+{
+ return hw->func_caps.ts_func_info.time_ref;
+}
+
+/**
+ * ice_set_e822_time_ref - Set new TIME_REF
+ * @hw: pointer to the HW structure
+ * @time_ref: new TIME_REF to set
+ *
+ * Update the TIME_REF in the capabilities structure in response to some
+ * change, such as an update to the CGU registers.
+ */
+static inline void
+ice_set_e822_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref)
+{
+ hw->func_caps.ts_func_info.time_ref = time_ref;
+}
+
+static inline u64 ice_e822_pll_freq(enum ice_time_ref_freq time_ref)
+{
+ return e822_time_ref[time_ref].pll_freq;
+}
+
+static inline u64 ice_e822_nominal_incval(enum ice_time_ref_freq time_ref)
+{
+ return e822_time_ref[time_ref].nominal_incval;
+}
+
+static inline u64 ice_e822_pps_delay(enum ice_time_ref_freq time_ref)
+{
+ return e822_time_ref[time_ref].pps_delay;
+}
+
+/* E822 Vernier calibration functions */
+int ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset);
+int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port, bool bypass);
+int ice_phy_exit_bypass_e822(struct ice_hw *hw, u8 port);
/* E810 family functions */
int ice_ptp_init_phy_e810(struct ice_hw *hw);
@@ -36,19 +195,194 @@ bool ice_is_pca9575_present(struct ice_hw *hw);
#define PFTSYN_SEM_BYTES 4
+#define ICE_PTP_CLOCK_INDEX_0 0x00
+#define ICE_PTP_CLOCK_INDEX_1 0x01
+
/* PHY timer commands */
#define SEL_CPK_SRC 8
+#define SEL_PHY_SRC 3
/* Time Sync command Definitions */
#define GLTSYN_CMD_INIT_TIME BIT(0)
#define GLTSYN_CMD_INIT_INCVAL BIT(1)
+#define GLTSYN_CMD_INIT_TIME_INCVAL (BIT(0) | BIT(1))
#define GLTSYN_CMD_ADJ_TIME BIT(2)
#define GLTSYN_CMD_ADJ_INIT_TIME (BIT(2) | BIT(3))
#define GLTSYN_CMD_READ_TIME BIT(7)
+/* PHY port Time Sync command definitions */
+#define PHY_CMD_INIT_TIME BIT(0)
+#define PHY_CMD_INIT_INCVAL BIT(1)
+#define PHY_CMD_ADJ_TIME (BIT(0) | BIT(1))
+#define PHY_CMD_ADJ_TIME_AT_TIME (BIT(0) | BIT(2))
+#define PHY_CMD_READ_TIME (BIT(0) | BIT(1) | BIT(2))
+
#define TS_CMD_MASK_E810 0xFF
+#define TS_CMD_MASK 0xF
#define SYNC_EXEC_CMD 0x3
+/* Macros to derive port low and high addresses on both quads */
+#define P_Q0_L(a, p) ((((a) + (0x2000 * (p)))) & 0xFFFF)
+#define P_Q0_H(a, p) ((((a) + (0x2000 * (p)))) >> 16)
+#define P_Q1_L(a, p) ((((a) - (0x2000 * ((p) - ICE_PORTS_PER_QUAD)))) & 0xFFFF)
+#define P_Q1_H(a, p) ((((a) - (0x2000 * ((p) - ICE_PORTS_PER_QUAD)))) >> 16)
+
+/* PHY QUAD register base addresses */
+#define Q_0_BASE 0x94000
+#define Q_1_BASE 0x114000
+
+/* Timestamp memory reset registers */
+#define Q_REG_TS_CTRL 0x618
+#define Q_REG_TS_CTRL_S 0
+#define Q_REG_TS_CTRL_M BIT(0)
+
+/* Timestamp availability status registers */
+#define Q_REG_TX_MEMORY_STATUS_L 0xCF0
+#define Q_REG_TX_MEMORY_STATUS_U 0xCF4
+
+/* Tx FIFO status registers */
+#define Q_REG_FIFO23_STATUS 0xCF8
+#define Q_REG_FIFO01_STATUS 0xCFC
+#define Q_REG_FIFO02_S 0
+#define Q_REG_FIFO02_M ICE_M(0x3FF, 0)
+#define Q_REG_FIFO13_S 10
+#define Q_REG_FIFO13_M ICE_M(0x3FF, 10)
+
+/* Interrupt control Config registers */
+#define Q_REG_TX_MEM_GBL_CFG 0xC08
+#define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_S 0
+#define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M BIT(0)
+#define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_S 1
+#define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_M ICE_M(0xFF, 1)
+#define Q_REG_TX_MEM_GBL_CFG_INTR_THR_S 9
+#define Q_REG_TX_MEM_GBL_CFG_INTR_THR_M ICE_M(0x3F, 9)
+#define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_S 15
+#define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M BIT(15)
+
+/* Tx Timestamp data registers */
+#define Q_REG_TX_MEMORY_BANK_START 0xA00
+
+/* PHY port register base addresses */
+#define P_0_BASE 0x80000
+#define P_4_BASE 0x106000
+
+/* Timestamp init registers */
+#define P_REG_RX_TIMER_INC_PRE_L 0x46C
+#define P_REG_RX_TIMER_INC_PRE_U 0x470
+#define P_REG_TX_TIMER_INC_PRE_L 0x44C
+#define P_REG_TX_TIMER_INC_PRE_U 0x450
+
+/* Timestamp match and adjust target registers */
+#define P_REG_RX_TIMER_CNT_ADJ_L 0x474
+#define P_REG_RX_TIMER_CNT_ADJ_U 0x478
+#define P_REG_TX_TIMER_CNT_ADJ_L 0x454
+#define P_REG_TX_TIMER_CNT_ADJ_U 0x458
+
+/* Timestamp capture registers */
+#define P_REG_RX_CAPTURE_L 0x4D8
+#define P_REG_RX_CAPTURE_U 0x4DC
+#define P_REG_TX_CAPTURE_L 0x4B4
+#define P_REG_TX_CAPTURE_U 0x4B8
+
+/* Timestamp PHY incval registers */
+#define P_REG_TIMETUS_L 0x410
+#define P_REG_TIMETUS_U 0x414
+
+#define P_REG_40B_LOW_M 0xFF
+#define P_REG_40B_HIGH_S 8
+
+/* PHY window length registers */
+#define P_REG_WL 0x40C
+
+#define PTP_VERNIER_WL 0x111ed
+
+/* PHY start registers */
+#define P_REG_PS 0x408
+#define P_REG_PS_START_S 0
+#define P_REG_PS_START_M BIT(0)
+#define P_REG_PS_BYPASS_MODE_S 1
+#define P_REG_PS_BYPASS_MODE_M BIT(1)
+#define P_REG_PS_ENA_CLK_S 2
+#define P_REG_PS_ENA_CLK_M BIT(2)
+#define P_REG_PS_LOAD_OFFSET_S 3
+#define P_REG_PS_LOAD_OFFSET_M BIT(3)
+#define P_REG_PS_SFT_RESET_S 11
+#define P_REG_PS_SFT_RESET_M BIT(11)
+
+/* PHY offset valid registers */
+#define P_REG_TX_OV_STATUS 0x4D4
+#define P_REG_TX_OV_STATUS_OV_S 0
+#define P_REG_TX_OV_STATUS_OV_M BIT(0)
+#define P_REG_RX_OV_STATUS 0x4F8
+#define P_REG_RX_OV_STATUS_OV_S 0
+#define P_REG_RX_OV_STATUS_OV_M BIT(0)
+
+/* PHY offset ready registers */
+#define P_REG_TX_OR 0x45C
+#define P_REG_RX_OR 0x47C
+
+/* PHY total offset registers */
+#define P_REG_TOTAL_RX_OFFSET_L 0x460
+#define P_REG_TOTAL_RX_OFFSET_U 0x464
+#define P_REG_TOTAL_TX_OFFSET_L 0x440
+#define P_REG_TOTAL_TX_OFFSET_U 0x444
+
+/* Timestamp PAR/PCS registers */
+#define P_REG_UIX66_10G_40G_L 0x480
+#define P_REG_UIX66_10G_40G_U 0x484
+#define P_REG_UIX66_25G_100G_L 0x488
+#define P_REG_UIX66_25G_100G_U 0x48C
+#define P_REG_DESK_PAR_RX_TUS_L 0x490
+#define P_REG_DESK_PAR_RX_TUS_U 0x494
+#define P_REG_DESK_PAR_TX_TUS_L 0x498
+#define P_REG_DESK_PAR_TX_TUS_U 0x49C
+#define P_REG_DESK_PCS_RX_TUS_L 0x4A0
+#define P_REG_DESK_PCS_RX_TUS_U 0x4A4
+#define P_REG_DESK_PCS_TX_TUS_L 0x4A8
+#define P_REG_DESK_PCS_TX_TUS_U 0x4AC
+#define P_REG_PAR_RX_TUS_L 0x420
+#define P_REG_PAR_RX_TUS_U 0x424
+#define P_REG_PAR_TX_TUS_L 0x428
+#define P_REG_PAR_TX_TUS_U 0x42C
+#define P_REG_PCS_RX_TUS_L 0x430
+#define P_REG_PCS_RX_TUS_U 0x434
+#define P_REG_PCS_TX_TUS_L 0x438
+#define P_REG_PCS_TX_TUS_U 0x43C
+#define P_REG_PAR_RX_TIME_L 0x4F0
+#define P_REG_PAR_RX_TIME_U 0x4F4
+#define P_REG_PAR_TX_TIME_L 0x4CC
+#define P_REG_PAR_TX_TIME_U 0x4D0
+#define P_REG_PAR_PCS_RX_OFFSET_L 0x4E8
+#define P_REG_PAR_PCS_RX_OFFSET_U 0x4EC
+#define P_REG_PAR_PCS_TX_OFFSET_L 0x4C4
+#define P_REG_PAR_PCS_TX_OFFSET_U 0x4C8
+#define P_REG_LINK_SPEED 0x4FC
+#define P_REG_LINK_SPEED_SERDES_S 0
+#define P_REG_LINK_SPEED_SERDES_M ICE_M(0x7, 0)
+#define P_REG_LINK_SPEED_FEC_MODE_S 3
+#define P_REG_LINK_SPEED_FEC_MODE_M ICE_M(0x3, 3)
+#define P_REG_LINK_SPEED_FEC_MODE(reg) \
+ (((reg) & P_REG_LINK_SPEED_FEC_MODE_M) >> \
+ P_REG_LINK_SPEED_FEC_MODE_S)
+
+/* PHY timestamp related registers */
+#define P_REG_PMD_ALIGNMENT 0x0FC
+#define P_REG_RX_80_TO_160_CNT 0x6FC
+#define P_REG_RX_80_TO_160_CNT_RXCYC_S 0
+#define P_REG_RX_80_TO_160_CNT_RXCYC_M BIT(0)
+#define P_REG_RX_40_TO_160_CNT 0x8FC
+#define P_REG_RX_40_TO_160_CNT_RXCYC_S 0
+#define P_REG_RX_40_TO_160_CNT_RXCYC_M ICE_M(0x3, 0)
+
+/* Rx FIFO status registers */
+#define P_REG_RX_OV_FS 0x4F8
+#define P_REG_RX_OV_FS_FIFO_STATUS_S 2
+#define P_REG_RX_OV_FS_FIFO_STATUS_M ICE_M(0x3FF, 2)
+
+/* Timestamp command registers */
+#define P_REG_TX_TMR_CMD 0x448
+#define P_REG_RX_TMR_CMD 0x468
+
/* E810 timesync enable register */
#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4))
@@ -68,9 +402,20 @@ bool ice_is_pca9575_present(struct ice_hw *hw);
/* Timestamp block macros */
#define TS_LOW_M 0xFFFFFFFF
+#define TS_HIGH_M 0xFF
#define TS_HIGH_S 32
+#define TS_PHY_LOW_M 0xFF
+#define TS_PHY_HIGH_M 0xFFFFFFFF
+#define TS_PHY_HIGH_S 8
+
#define BYTES_PER_IDX_ADDR_L_U 8
+#define BYTES_PER_IDX_ADDR_L 4
+
+/* Internal PHY timestamp address */
+#define TS_L(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U))
+#define TS_H(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U + \
+ BYTES_PER_IDX_ADDR_L))
/* External PHY timestamp address */
#define TS_EXT(a, port, idx) ((a) + (0x1000 * (port)) + \
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 58b1907e3ff1..caf0a02b25f5 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -298,9 +298,30 @@ struct ice_hw_common_caps {
#define ICE_TS_TMR_IDX_ASSOC_S 24
#define ICE_TS_TMR_IDX_ASSOC_M BIT(24)
+/* TIME_REF clock rate specification */
+enum ice_time_ref_freq {
+ ICE_TIME_REF_FREQ_25_000 = 0,
+ ICE_TIME_REF_FREQ_122_880 = 1,
+ ICE_TIME_REF_FREQ_125_000 = 2,
+ ICE_TIME_REF_FREQ_153_600 = 3,
+ ICE_TIME_REF_FREQ_156_250 = 4,
+ ICE_TIME_REF_FREQ_245_760 = 5,
+
+ NUM_ICE_TIME_REF_FREQ
+};
+
+/* Clock source specification */
+enum ice_clk_src {
+ ICE_CLK_SRC_TCX0 = 0, /* Temperature compensated oscillator */
+ ICE_CLK_SRC_TIME_REF = 1, /* Use TIME_REF reference clock */
+
+ NUM_ICE_CLK_SRC
+};
+
struct ice_ts_func_info {
/* Function specific info */
- u32 clk_freq;
+ enum ice_time_ref_freq time_ref;
+ u8 clk_freq;
u8 clk_src;
u8 tmr_index_assoc;
u8 ena;
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 9265901455cd..b9b9d35494d2 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -792,7 +792,6 @@ s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
**/
s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
{
- s32 ret_val = 0;
struct e1000_nvm_info *nvm = &hw->nvm;
nvm->ops.acquire = igb_acquire_nvm_i210;
@@ -813,7 +812,7 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
nvm->ops.validate = NULL;
nvm->ops.update = NULL;
}
- return ret_val;
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 4d988da68394..b78407289741 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1520,7 +1520,7 @@ static void igbvf_reset(struct igbvf_adapter *adapter)
/* Allow time for pending master requests to run */
if (mac->ops.reset_hw(hw))
- dev_warn(&adapter->pdev->dev, "PF still resetting\n");
+ dev_info(&adapter->pdev->dev, "PF still resetting\n");
mac->ops.init_hw(hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index c7fe61509d5b..5c66b97c0cfa 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -85,9 +85,6 @@
#define IGC_WUFC_EXT_FILTER_MASK GENMASK(31, 8)
-/* Physical Func Reset Done Indication */
-#define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000
-
/* Loop limit on how long we wait for auto-negotiation to complete */
#define COPPER_LINK_UP_LIMIT 10
#define PHY_AUTO_NEG_LIMIT 45
@@ -584,7 +581,6 @@
#define IGC_GEN_POLL_TIMEOUT 1920
/* PHY Control Register */
-#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
#define MII_CR_POWER_DOWN 0x0800 /* Power down */
#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
@@ -605,9 +601,6 @@
#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
-/* Bit definitions for valid PHY IDs. I = Integrated E = External */
-#define I225_I_PHY_ID 0x67C9DC00
-
/* MDI Control */
#define IGC_MDIC_DATA_MASK 0x0000FFFF
#define IGC_MDIC_REG_MASK 0x001F0000
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 587db7483f25..b1e72ec5f131 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -55,7 +55,6 @@ enum igc_mac_type {
enum igc_phy_type {
igc_phy_unknown = 0,
- igc_phy_none,
igc_phy_i225,
};
@@ -68,8 +67,6 @@ enum igc_media_type {
enum igc_nvm_type {
igc_nvm_unknown = 0,
igc_nvm_eeprom_spi,
- igc_nvm_flash_hw,
- igc_nvm_invm,
};
struct igc_info {
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index b6807e16eea9..66ea566488d1 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -473,13 +473,11 @@ s32 igc_init_nvm_params_i225(struct igc_hw *hw)
/* NVM Function Pointers */
if (igc_get_flash_presence_i225(hw)) {
- hw->nvm.type = igc_nvm_flash_hw;
nvm->ops.read = igc_read_nvm_srrd_i225;
nvm->ops.write = igc_write_nvm_srwr_i225;
nvm->ops.validate = igc_validate_nvm_checksum_i225;
nvm->ops.update = igc_update_nvm_checksum_i225;
} else {
- hw->nvm.type = igc_nvm_invm;
nvm->ops.read = igc_read_nvm_eerd;
nvm->ops.write = NULL;
nvm->ops.validate = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 1c98652b244a..d1093bb2d436 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -546,6 +546,13 @@ static int mlx5_devlink_enable_remote_dev_reset_get(struct devlink *devlink, u32
return 0;
}
+static int mlx5_devlink_eq_depth_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ return (val.vu16 >= 64 && val.vu16 <= 4096) ? 0 : -EINVAL;
+}
+
static const struct devlink_param mlx5_devlink_params[] = {
DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
"flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
@@ -570,6 +577,10 @@ static const struct devlink_param mlx5_devlink_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
mlx5_devlink_enable_remote_dev_reset_get,
mlx5_devlink_enable_remote_dev_reset_set, NULL),
+ DEVLINK_PARAM_GENERIC(IO_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, mlx5_devlink_eq_depth_validate),
+ DEVLINK_PARAM_GENERIC(EVENT_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, mlx5_devlink_eq_depth_validate),
};
static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
@@ -608,6 +619,16 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
value);
}
#endif
+
+ value.vu32 = MLX5_COMP_EQ_SIZE;
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
+ value);
+
+ value.vu32 = MLX5_NUM_ASYNC_EQE;
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
+ value);
}
static const struct devlink_param enable_eth_param =
@@ -752,6 +773,66 @@ static void mlx5_devlink_auxdev_params_unregister(struct devlink *devlink)
mlx5_devlink_eth_param_unregister(devlink);
}
+static int mlx5_devlink_max_uc_list_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (val.vu32 == 0) {
+ NL_SET_ERR_MSG_MOD(extack, "max_macs value must be greater than 0");
+ return -EINVAL;
+ }
+
+ if (!is_power_of_2(val.vu32)) {
+ NL_SET_ERR_MSG_MOD(extack, "Only power of 2 values are supported for max_macs");
+ return -EINVAL;
+ }
+
+ if (ilog2(val.vu32) >
+ MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list)) {
+ NL_SET_ERR_MSG_MOD(extack, "max_macs value is out of the supported range");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param max_uc_list_param =
+ DEVLINK_PARAM_GENERIC(MAX_MACS, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, mlx5_devlink_max_uc_list_validate);
+
+static int mlx5_devlink_max_uc_list_param_register(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ union devlink_param_value value;
+ int err;
+
+ if (!MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list_wr_supported))
+ return 0;
+
+ err = devlink_param_register(devlink, &max_uc_list_param);
+ if (err)
+ return err;
+
+ value.vu32 = 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list);
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ value);
+ return 0;
+}
+
+static void
+mlx5_devlink_max_uc_list_param_unregister(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (!MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list_wr_supported))
+ return;
+
+ devlink_param_unregister(devlink, &max_uc_list_param);
+}
+
#define MLX5_TRAP_DROP(_id, _group_id) \
DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
@@ -811,6 +892,10 @@ int mlx5_devlink_register(struct devlink *devlink)
if (err)
goto auxdev_reg_err;
+ err = mlx5_devlink_max_uc_list_param_register(devlink);
+ if (err)
+ goto max_uc_list_err;
+
err = mlx5_devlink_traps_register(devlink);
if (err)
goto traps_reg_err;
@@ -821,6 +906,8 @@ int mlx5_devlink_register(struct devlink *devlink)
return 0;
traps_reg_err:
+ mlx5_devlink_max_uc_list_param_unregister(devlink);
+max_uc_list_err:
mlx5_devlink_auxdev_params_unregister(devlink);
auxdev_reg_err:
devlink_params_unregister(devlink, mlx5_devlink_params,
@@ -831,6 +918,7 @@ auxdev_reg_err:
void mlx5_devlink_unregister(struct devlink *devlink)
{
mlx5_devlink_traps_unregister(devlink);
+ mlx5_devlink_max_uc_list_param_unregister(devlink);
mlx5_devlink_auxdev_params_unregister(devlink);
devlink_params_unregister(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index e77c4159713f..33679467c63a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -145,7 +145,6 @@ struct page_pool;
#define MLX5E_MIN_NUM_CHANNELS 0x1
#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE / 2)
-#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_TX_XSK_POLL_BUDGET 64
#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
@@ -875,10 +874,8 @@ struct mlx5e_trap;
struct mlx5e_priv {
/* priv data path fields - start */
- /* +1 for port ptp ts */
- struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC +
- MLX5E_QOS_MAX_LEAF_NODES];
- int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
+ struct mlx5e_txqsq **txq2sq;
+ int **channel_tc2realtxq;
int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC];
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx_dp dcbx_dp;
@@ -893,7 +890,7 @@ struct mlx5e_priv {
struct mlx5e_channels channels;
u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC];
struct mlx5e_rx_res *rx_res;
- u32 tx_rates[MLX5E_MAX_NUM_SQS];
+ u32 *tx_rates;
struct mlx5e_flow_steering fs;
@@ -909,7 +906,7 @@ struct mlx5e_priv {
struct net_device *netdev;
struct mlx5e_trap *en_trap;
struct mlx5e_stats stats;
- struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
+ struct mlx5e_channel_stats **channel_stats;
struct mlx5e_channel_stats trap_stats;
struct mlx5e_ptp_stats ptp_stats;
u16 stats_nch;
@@ -956,6 +953,12 @@ struct mlx5e_rx_handlers {
extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic;
+enum mlx5e_profile_feature {
+ MLX5E_PROFILE_FEATURE_PTP_RX,
+ MLX5E_PROFILE_FEATURE_PTP_TX,
+ MLX5E_PROFILE_FEATURE_QOS_HTB,
+};
+
struct mlx5e_profile {
int (*init)(struct mlx5_core_dev *mdev,
struct net_device *netdev);
@@ -969,14 +972,18 @@ struct mlx5e_profile {
int (*update_rx)(struct mlx5e_priv *priv);
void (*update_stats)(struct mlx5e_priv *priv);
void (*update_carrier)(struct mlx5e_priv *priv);
+ int (*max_nch_limit)(struct mlx5_core_dev *mdev);
unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
mlx5e_stats_grp_t *stats_grps;
const struct mlx5e_rx_handlers *rx_handlers;
int max_tc;
u8 rq_groups;
- bool rx_ptp_support;
+ u32 features;
};
+#define mlx5e_profile_feature_cap(profile, feature) \
+ ((profile)->features & (MLX5E_PROFILE_FEATURE_## feature))
+
void mlx5e_build_ptys2ethtool_map(void);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
@@ -1188,8 +1195,7 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
struct mlx5_core_dev *mdev);
void mlx5e_priv_cleanup(struct mlx5e_priv *priv);
struct net_device *
-mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
- unsigned int txqs, unsigned int rxqs);
+mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile);
int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
index d290d7276b8d..074ffa4fa5af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
@@ -20,7 +20,7 @@ mlx5e_hv_vhca_fill_ring_stats(struct mlx5e_priv *priv, int ch,
struct mlx5e_channel_stats *stats;
int tc;
- stats = &priv->channel_stats[ch];
+ stats = priv->channel_stats[ch];
data->rx_packets = stats->rq.packets;
data->rx_bytes = stats->rq.bytes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 18d542b1c5cb..82baafd3c00c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -768,7 +768,7 @@ int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv)
{
struct mlx5e_ptp_fs *ptp_fs;
- if (!priv->profile->rx_ptp_support)
+ if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
return 0;
ptp_fs = kzalloc(sizeof(*ptp_fs), GFP_KERNEL);
@@ -783,7 +783,7 @@ void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv)
{
struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
- if (!priv->profile->rx_ptp_support)
+ if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
return;
mlx5e_ptp_rx_unset_fs(priv);
@@ -794,7 +794,7 @@ int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set)
{
struct mlx5e_ptp *c = priv->channels.ptp;
- if (!priv->profile->rx_ptp_support)
+ if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
return 0;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index fcb0892c08a9..0991345c4ae5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -517,6 +517,9 @@ int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *
void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
{
+ if (!netdev)
+ return -EOPNOTSUPP;
+
switch (type) {
case TC_SETUP_BLOCK:
return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index 0015a81eb9a1..24c32f73040a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -37,7 +37,6 @@ struct mlx5e_rx_res {
/* API for rx_res_rss_* */
static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
unsigned int init_nch)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
@@ -52,7 +51,7 @@ static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
return -ENOMEM;
err = mlx5e_rss_init(rss, res->mdev, inner_ft_support, res->drop_rqn,
- init_pkt_merge_param);
+ &res->pkt_merge_param);
if (err)
goto err_rss_free;
@@ -277,8 +276,7 @@ struct mlx5e_rx_res *mlx5e_rx_res_alloc(void)
return kvzalloc(sizeof(struct mlx5e_rx_res), GFP_KERNEL);
}
-static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param)
+static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
struct mlx5e_tir_builder *builder;
@@ -309,7 +307,7 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
inner_ft_support);
- mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
+ mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
mlx5e_tir_builder_build_direct(builder);
err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true);
@@ -339,7 +337,7 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
inner_ft_support);
- mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
+ mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
mlx5e_tir_builder_build_direct(builder);
err = mlx5e_tir_init(&res->channels[ix].xsk_tir, builder, res->mdev, true);
@@ -454,11 +452,11 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
res->pkt_merge_param = *init_pkt_merge_param;
init_rwsem(&res->pkt_merge_param_sem);
- err = mlx5e_rx_res_rss_init_def(res, init_pkt_merge_param, init_nch);
+ err = mlx5e_rx_res_rss_init_def(res, init_nch);
if (err)
goto err_out;
- err = mlx5e_rx_res_channels_init(res, init_pkt_merge_param);
+ err = mlx5e_rx_res_channels_init(res);
if (err)
goto err_rss_destroy;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 538bc2419bd8..5f2b67b9c189 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -67,7 +67,7 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->xdpsq = &c->rq_xdpsq;
rq->xsk_pool = pool;
- rq->stats = &c->priv->channel_stats[c->ix].xskrq;
+ rq->stats = &c->priv->channel_stats[c->ix]->xskrq;
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
rq_xdp_ix = c->ix + params->num_channels * MLX5E_RQ_GROUP_XSK;
err = mlx5e_rq_set_handlers(rq, params, xsk);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 15711814d2d2..96064a2033f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -611,7 +611,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
priv_rx->rxq = rxq;
priv_rx->sk = sk;
- priv_rx->rq_stats = &priv->channel_stats[rxq].rq;
+ priv_rx->rq_stats = &priv->channel_stats[rxq]->rq;
priv_rx->sw_stats = &priv->tls->sw_stats;
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index fe5d82fa6e92..49cca6bd49a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -556,7 +556,7 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- priv->channel_stats[arfs_rule->rxq].rq.arfs_err++;
+ priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++;
mlx5e_dbg(HW, priv,
"%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n",
__func__, arfs_rule->filter_id, arfs_rule->rxq,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index c8757c5a812b..536fcb2c5e90 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1934,7 +1934,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
if (curr_val == new_val)
return 0;
- if (new_val && !priv->profile->rx_ptp_support && rx_filter) {
+ if (new_val && !mlx5e_profile_feature_cap(priv->profile, PTP_RX) && rx_filter) {
netdev_err(priv->netdev,
"Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 496977e7406e..1a47108805fd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -479,7 +479,7 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->xdpsq = &c->rq_xdpsq;
- rq->stats = &c->priv->channel_stats[c->ix].rq;
+ rq->stats = &c->priv->channel_stats[c->ix]->rq;
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
err = mlx5e_rq_set_handlers(rq, params, NULL);
if (err)
@@ -1161,10 +1161,10 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
sq->xsk_pool = xsk_pool;
sq->stats = sq->xsk_pool ?
- &c->priv->channel_stats[c->ix].xsksq :
+ &c->priv->channel_stats[c->ix]->xsksq :
is_redirect ?
- &c->priv->channel_stats[c->ix].xdpsq :
- &c->priv->channel_stats[c->ix].rq_xdpsq;
+ &c->priv->channel_stats[c->ix]->xdpsq :
+ &c->priv->channel_stats[c->ix]->rq_xdpsq;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1928,7 +1928,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
params, &cparam->txq_sq, &c->sq[tc], tc,
qos_queue_group_id,
- &c->priv->channel_stats[c->ix].sq[tc]);
+ &c->priv->channel_stats[c->ix]->sq[tc]);
if (err)
goto err_close_sqs;
}
@@ -2176,6 +2176,30 @@ static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev);
}
+static int mlx5e_channel_stats_alloc(struct mlx5e_priv *priv, int ix, int cpu)
+{
+ if (ix > priv->stats_nch) {
+ netdev_warn(priv->netdev, "Unexpected channel stats index %d > %d\n", ix,
+ priv->stats_nch);
+ return -EINVAL;
+ }
+
+ if (priv->channel_stats[ix])
+ return 0;
+
+ /* Asymmetric dynamic memory allocation.
+ * Freed in mlx5e_priv_arrays_free, not on channel closure.
+ */
+ mlx5e_dbg(DRV, priv, "Creating channel stats %d\n", ix);
+ priv->channel_stats[ix] = kvzalloc_node(sizeof(**priv->channel_stats),
+ GFP_KERNEL, cpu_to_node(cpu));
+ if (!priv->channel_stats[ix])
+ return -ENOMEM;
+ priv->stats_nch++;
+
+ return 0;
+}
+
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam,
@@ -2193,6 +2217,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err)
return err;
+ err = mlx5e_channel_stats_alloc(priv, ix, cpu);
+ if (err)
+ return err;
+
c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
if (!c)
return -ENOMEM;
@@ -2207,7 +2235,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
c->num_tc = mlx5e_get_dcb_num_tc(params);
c->xdp = !!params->xdp_prog;
- c->stats = &priv->channel_stats[ix].ch;
+ c->stats = &priv->channel_stats[ix]->ch;
c->aff_mask = irq_get_effective_affinity_mask(irq);
c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
@@ -3371,7 +3399,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
int i;
for (i = 0; i < priv->stats_nch; i++) {
- struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i];
+ struct mlx5e_channel_stats *channel_stats = priv->channel_stats[i];
struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
int j;
@@ -4038,7 +4066,7 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
goto err_unlock;
}
- if (!priv->profile->rx_ptp_support)
+ if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
err = mlx5e_hwstamp_config_no_ptp_rx(priv,
config.rx_filter != HWTSTAMP_FILTER_NONE);
else
@@ -5093,9 +5121,23 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
.stats_grps = mlx5e_nic_stats_grps,
.stats_grps_num = mlx5e_nic_stats_grps_num,
- .rx_ptp_support = true,
+ .features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) |
+ BIT(MLX5E_PROFILE_FEATURE_PTP_TX) |
+ BIT(MLX5E_PROFILE_FEATURE_QOS_HTB),
};
+static int mlx5e_profile_max_num_channels(struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile)
+{
+ int nch;
+
+ nch = mlx5e_get_max_num_channels(mdev);
+
+ if (profile->max_nch_limit)
+ nch = min_t(int, nch, profile->max_nch_limit(mdev));
+ return nch;
+}
+
static unsigned int
mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,
const struct mlx5e_profile *profile)
@@ -5104,7 +5146,7 @@ mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,
unsigned int max_nch, tmp;
/* core resources */
- max_nch = mlx5e_get_max_num_channels(mdev);
+ max_nch = mlx5e_profile_max_num_channels(mdev, profile);
/* netdev rx queues */
tmp = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
@@ -5128,12 +5170,17 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
struct net_device *netdev,
struct mlx5_core_dev *mdev)
{
+ int nch, num_txqs, node, i;
+
+ num_txqs = netdev->num_tx_queues;
+ nch = mlx5e_calc_max_nch(mdev, netdev, profile);
+ node = dev_to_node(mlx5_core_dma_dev(mdev));
+
/* priv init */
priv->mdev = mdev;
priv->netdev = netdev;
priv->msglevel = MLX5E_MSG_LEVEL;
- priv->max_nch = mlx5e_calc_max_nch(mdev, netdev, profile);
- priv->stats_nch = priv->max_nch;
+ priv->max_nch = nch;
priv->max_opened_tc = 1;
if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL))
@@ -5150,11 +5197,46 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
if (!priv->wq)
goto err_free_cpumask;
+ priv->txq2sq = kcalloc_node(num_txqs, sizeof(*priv->txq2sq), GFP_KERNEL, node);
+ if (!priv->txq2sq)
+ goto err_destroy_workqueue;
+
+ priv->tx_rates = kcalloc_node(num_txqs, sizeof(*priv->tx_rates), GFP_KERNEL, node);
+ if (!priv->tx_rates)
+ goto err_free_txq2sq;
+
+ priv->channel_tc2realtxq =
+ kcalloc_node(nch, sizeof(*priv->channel_tc2realtxq), GFP_KERNEL, node);
+ if (!priv->channel_tc2realtxq)
+ goto err_free_tx_rates;
+
+ for (i = 0; i < nch; i++) {
+ priv->channel_tc2realtxq[i] =
+ kcalloc_node(profile->max_tc, sizeof(**priv->channel_tc2realtxq),
+ GFP_KERNEL, node);
+ if (!priv->channel_tc2realtxq[i])
+ goto err_free_channel_tc2realtxq;
+ }
+
+ priv->channel_stats =
+ kcalloc_node(nch, sizeof(*priv->channel_stats), GFP_KERNEL, node);
+ if (!priv->channel_stats)
+ goto err_free_channel_tc2realtxq;
+
return 0;
+err_free_channel_tc2realtxq:
+ while (--i >= 0)
+ kfree(priv->channel_tc2realtxq[i]);
+ kfree(priv->channel_tc2realtxq);
+err_free_tx_rates:
+ kfree(priv->tx_rates);
+err_free_txq2sq:
+ kfree(priv->txq2sq);
+err_destroy_workqueue:
+ destroy_workqueue(priv->wq);
err_free_cpumask:
free_cpumask_var(priv->scratchpad.cpumask);
-
return -ENOMEM;
}
@@ -5166,6 +5248,14 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
if (!priv->mdev)
return;
+ for (i = 0; i < priv->stats_nch; i++)
+ kvfree(priv->channel_stats[i]);
+ kfree(priv->channel_stats);
+ for (i = 0; i < priv->max_nch; i++)
+ kfree(priv->channel_tc2realtxq[i]);
+ kfree(priv->channel_tc2realtxq);
+ kfree(priv->tx_rates);
+ kfree(priv->txq2sq);
destroy_workqueue(priv->wq);
free_cpumask_var(priv->scratchpad.cpumask);
@@ -5181,13 +5271,44 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
memset(priv, 0, sizeof(*priv));
}
+static unsigned int mlx5e_get_max_num_txqs(struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile)
+{
+ unsigned int nch, ptp_txqs, qos_txqs;
+
+ nch = mlx5e_profile_max_num_channels(mdev, profile);
+
+ ptp_txqs = MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn) &&
+ mlx5e_profile_feature_cap(profile, PTP_TX) ?
+ profile->max_tc : 0;
+
+ qos_txqs = mlx5_qos_is_supported(mdev) &&
+ mlx5e_profile_feature_cap(profile, QOS_HTB) ?
+ mlx5e_qos_max_leaf_nodes(mdev) : 0;
+
+ return nch * profile->max_tc + ptp_txqs + qos_txqs;
+}
+
+static unsigned int mlx5e_get_max_num_rxqs(struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile)
+{
+ unsigned int nch;
+
+ nch = mlx5e_profile_max_num_channels(mdev, profile);
+
+ return nch * profile->rq_groups;
+}
+
struct net_device *
-mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
- unsigned int txqs, unsigned int rxqs)
+mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile)
{
struct net_device *netdev;
+ unsigned int txqs, rxqs;
int err;
+ txqs = mlx5e_get_max_num_txqs(mdev, profile);
+ rxqs = mlx5e_get_max_num_rxqs(mdev, profile);
+
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), txqs, rxqs);
if (!netdev) {
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
@@ -5432,22 +5553,10 @@ static int mlx5e_probe(struct auxiliary_device *adev,
struct mlx5_core_dev *mdev = edev->mdev;
struct net_device *netdev;
pm_message_t state = {};
- unsigned int txqs, rxqs, ptp_txqs = 0;
struct mlx5e_priv *priv;
- int qos_sqs = 0;
int err;
- int nch;
-
- if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
- ptp_txqs = profile->max_tc;
- if (mlx5_qos_is_supported(mdev))
- qos_sqs = mlx5e_qos_max_leaf_nodes(mdev);
-
- nch = mlx5e_get_max_num_channels(mdev);
- txqs = nch * profile->max_tc + ptp_txqs + qos_sqs;
- rxqs = nch * profile->rq_groups;
- netdev = mlx5e_create_netdev(mdev, profile, txqs, rxqs);
+ netdev = mlx5e_create_netdev(mdev, profile);
if (!netdev) {
mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 6e0f88ea3701..8c0f4cfbe471 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -591,6 +591,12 @@ bool mlx5e_eswitch_vf_rep(const struct net_device *netdev)
return netdev->netdev_ops == &mlx5e_netdev_ops_rep;
}
+static int mlx5e_rep_max_nch_limit(struct mlx5_core_dev *mdev)
+{
+ return (1 << MLX5_CAP_GEN(mdev, log_max_tir)) /
+ mlx5_eswitch_get_total_vports(mdev);
+}
+
static void mlx5e_build_rep_params(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -1113,7 +1119,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5e_rep_stats_grps,
.stats_grps_num = mlx5e_rep_stats_grps_num,
- .rx_ptp_support = false,
+ .max_nch_limit = mlx5e_rep_max_nch_limit,
};
static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
@@ -1134,7 +1140,6 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
.stats_grps = mlx5e_ul_rep_stats_grps,
.stats_grps_num = mlx5e_ul_rep_stats_grps_num,
- .rx_ptp_support = false,
};
/* e-Switch vport representors */
@@ -1185,14 +1190,10 @@ mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
struct devlink_port *dl_port;
struct net_device *netdev;
struct mlx5e_priv *priv;
- unsigned int txqs, rxqs;
- int nch, err;
+ int err;
profile = &mlx5e_rep_profile;
- nch = mlx5e_get_max_num_channels(dev);
- txqs = nch * profile->max_tc;
- rxqs = nch * profile->rq_groups;
- netdev = mlx5e_create_netdev(dev, profile, txqs, rxqs);
+ netdev = mlx5e_create_netdev(dev, profile);
if (!netdev) {
mlx5_core_warn(dev,
"Failed to create representor netdev for vport %d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 7e05d7592bce..f09b57c31ed7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -2189,7 +2189,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
priv = mlx5i_epriv(netdev);
tstamp = &priv->tstamp;
- stats = &priv->channel_stats[rq->ix].rq;
+ stats = rq->stats;
flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
g = (flags_rqpn >> 28) & 3;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 3c91a11e27ad..73fcd9fb17dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -463,7 +463,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
for (i = 0; i < priv->stats_nch; i++) {
struct mlx5e_channel_stats *channel_stats =
- &priv->channel_stats[i];
+ priv->channel_stats[i];
int j;
mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
@@ -2197,21 +2197,21 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_CH_STATS; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->ch,
ch_stats_desc, j);
for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_RQ_STATS; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq,
rq_stats_desc, j);
for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xskrq,
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xskrq,
xskrq_stats_desc, j);
for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq_xdpsq,
rq_xdpsq_stats_desc, j);
}
@@ -2219,17 +2219,17 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->sq[tc],
sq_stats_desc, j);
for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xsksq,
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xsksq,
xsksq_stats_desc, j);
for (j = 0; j < NUM_XDPSQ_STATS; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xdpsq,
xdpsq_stats_desc, j);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 792e0d6aa861..b695aad71ee1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -19,6 +19,7 @@
#include "lib/clock.h"
#include "diag/fw_tracer.h"
#include "mlx5_irq.h"
+#include "devlink.h"
enum {
MLX5_EQE_OWNER_INIT_VAL = 0x1,
@@ -622,6 +623,20 @@ static void cleanup_async_eq(struct mlx5_core_dev *dev,
name, err);
}
+static u16 async_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
+ &val);
+ if (!err)
+ return val.vu32;
+ mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err);
+ return MLX5_NUM_ASYNC_EQE;
+}
static int create_async_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
@@ -646,7 +661,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
param = (struct mlx5_eq_param) {
.irq_index = MLX5_IRQ_EQ_CTRL,
- .nent = MLX5_NUM_ASYNC_EQE,
+ .nent = async_eq_depth_devlink_param_get(dev),
};
gather_async_events_mask(dev, param.mask);
@@ -796,6 +811,21 @@ static void destroy_comp_eqs(struct mlx5_core_dev *dev)
}
}
+static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
+ &val);
+ if (!err)
+ return val.vu32;
+ mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err);
+ return MLX5_COMP_EQ_SIZE;
+}
+
static int create_comp_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
@@ -807,7 +837,7 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
INIT_LIST_HEAD(&table->comp_eqs_list);
ncomp_eqs = table->num_comp_eqs;
- nent = MLX5_COMP_EQ_SIZE;
+ nent = comp_eq_depth_devlink_param_get(dev);
for (i = 0; i < ncomp_eqs; i++) {
struct mlx5_eq_param param = {};
int vecidx = i;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 513f741d16c7..ead5e8acc8be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -343,9 +343,6 @@ void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata);
int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps);
-bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
-int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable);
-
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 051b20ec7bdb..0a99a020a3b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -117,7 +117,7 @@ static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
struct mlx5e_channel_stats *channel_stats;
struct mlx5e_rq_stats *rq_stats;
- channel_stats = &priv->channel_stats[i];
+ channel_stats = priv->channel_stats[i];
rq_stats = &channel_stats->rq;
s.rx_packets += rq_stats->packets;
@@ -449,7 +449,6 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5i_stats_grps,
.stats_grps_num = mlx5i_stats_grps_num,
- .rx_ptp_support = false,
};
/* mlx5i netdev NDos */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 5308f23702bc..0b86e78dbc0e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -350,7 +350,6 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
.rx_handlers = &mlx5i_rx_handlers,
.max_tc = MLX5I_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
- .rx_ptp_support = false,
};
const struct mlx5e_profile *mlx5i_pkey_get_profile(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index d97c9e86d7b3..b1a82226623c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -484,10 +484,26 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
}
+static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ &val);
+ if (!err)
+ return val.vu32;
+ mlx5_core_dbg(dev, "Failed to get param. err = %d\n", err);
+ return err;
+}
+
static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
{
struct mlx5_profile *prof = &dev->profile;
void *set_hca_cap;
+ int max_uc_list;
int err;
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
@@ -561,6 +577,11 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
if (MLX5_CAP_GEN(dev, roce_rw_supported))
MLX5_SET(cmd_hca_cap, set_hca_cap, roce, mlx5_is_roce_init_enabled(dev));
+ max_uc_list = max_uc_list_get_devlink_param(dev);
+ if (max_uc_list > 0)
+ MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_current_uc_list,
+ ilog2(max_uc_list));
+
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 7e89d5980d5e..866b9357939b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1708,6 +1708,124 @@ static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg,
static const struct mlxsw_listener mlxsw_core_health_listener =
MLXSW_EVENTL(mlxsw_core_health_listener_func, MFDE, MFDE);
+static int
+mlxsw_core_health_fw_fatal_dump_fatal_cause(const char *mfde_pl,
+ struct devlink_fmsg *fmsg)
+{
+ u32 val, tile_v;
+ int err;
+
+ val = mlxsw_reg_mfde_fatal_cause_id_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "cause_id", val);
+ if (err)
+ return err;
+ tile_v = mlxsw_reg_mfde_fatal_cause_tile_v_get(mfde_pl);
+ if (tile_v) {
+ val = mlxsw_reg_mfde_fatal_cause_tile_index_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_core_health_fw_fatal_dump_fw_assert(const char *mfde_pl,
+ struct devlink_fmsg *fmsg)
+{
+ u32 val, tile_v;
+ int err;
+
+ val = mlxsw_reg_mfde_fw_assert_var0_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "var0", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_var1_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "var1", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_var2_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "var2", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_var3_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "var3", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_var4_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "var4", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_existptr_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "existptr", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_callra_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "callra", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_oe_get(mfde_pl);
+ err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
+ if (err)
+ return err;
+ tile_v = mlxsw_reg_mfde_fw_assert_tile_v_get(mfde_pl);
+ if (tile_v) {
+ val = mlxsw_reg_mfde_fw_assert_tile_index_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val);
+ if (err)
+ return err;
+ }
+ val = mlxsw_reg_mfde_fw_assert_ext_synd_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "ext_synd", val);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int
+mlxsw_core_health_fw_fatal_dump_kvd_im_stop(const char *mfde_pl,
+ struct devlink_fmsg *fmsg)
+{
+ u32 val;
+ int err;
+
+ val = mlxsw_reg_mfde_kvd_im_stop_oe_get(mfde_pl);
+ err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_kvd_im_stop_pipes_mask_get(mfde_pl);
+ return devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val);
+}
+
+static int
+mlxsw_core_health_fw_fatal_dump_crspace_to(const char *mfde_pl,
+ struct devlink_fmsg *fmsg)
+{
+ u32 val;
+ int err;
+
+ val = mlxsw_reg_mfde_crspace_to_log_address_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "log_address", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_crspace_to_oe_get(mfde_pl);
+ err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_crspace_to_log_id_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_crspace_to_log_ip_get(mfde_pl);
+ err = devlink_fmsg_u64_pair_put(fmsg, "log_ip", val);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg, void *priv_ctx,
struct netlink_ext_ack *extack)
@@ -1741,6 +1859,46 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor
case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP:
val_str = "KVD insertion machine stopped";
break;
+ case MLXSW_REG_MFDE_EVENT_ID_TEST:
+ val_str = "Test";
+ break;
+ case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT:
+ val_str = "FW assert";
+ break;
+ case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE:
+ val_str = "Fatal cause";
+ break;
+ default:
+ val_str = NULL;
+ }
+ if (val_str) {
+ err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str);
+ if (err)
+ return err;
+ }
+
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "severity");
+ if (err)
+ return err;
+
+ val = mlxsw_reg_mfde_severity_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "id", val);
+ if (err)
+ return err;
+ switch (val) {
+ case MLXSW_REG_MFDE_SEVERITY_FATL:
+ val_str = "Fatal";
+ break;
+ case MLXSW_REG_MFDE_SEVERITY_NRML:
+ val_str = "Normal";
+ break;
+ case MLXSW_REG_MFDE_SEVERITY_INTR:
+ val_str = "Debug";
+ break;
default:
val_str = NULL;
}
@@ -1749,6 +1907,7 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor
if (err)
return err;
}
+
err = devlink_fmsg_arr_pair_nest_end(fmsg);
if (err)
return err;
@@ -1800,24 +1959,18 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor
if (err)
return err;
- if (event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO) {
- val = mlxsw_reg_mfde_log_address_get(mfde_pl);
- err = devlink_fmsg_u32_pair_put(fmsg, "log_address", val);
- if (err)
- return err;
- val = mlxsw_reg_mfde_log_id_get(mfde_pl);
- err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val);
- if (err)
- return err;
- val = mlxsw_reg_mfde_log_ip_get(mfde_pl);
- err = devlink_fmsg_u64_pair_put(fmsg, "log_ip", val);
- if (err)
- return err;
- } else if (event_id == MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP) {
- val = mlxsw_reg_mfde_pipes_mask_get(mfde_pl);
- err = devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val);
- if (err)
- return err;
+ switch (event_id) {
+ case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO:
+ return mlxsw_core_health_fw_fatal_dump_crspace_to(mfde_pl,
+ fmsg);
+ case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP:
+ return mlxsw_core_health_fw_fatal_dump_kvd_im_stop(mfde_pl,
+ fmsg);
+ case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT:
+ return mlxsw_core_health_fw_fatal_dump_fw_assert(mfde_pl, fmsg);
+ case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE:
+ return mlxsw_core_health_fw_fatal_dump_fatal_cause(mfde_pl,
+ fmsg);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index f748b537bdab..c97d2c744725 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -11318,7 +11318,7 @@ mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices,
* -----------------------------------
*/
#define MLXSW_REG_MFDE_ID 0x9200
-#define MLXSW_REG_MFDE_LEN 0x18
+#define MLXSW_REG_MFDE_LEN 0x30
MLXSW_REG_DEFINE(mfde, MLXSW_REG_MFDE_ID, MLXSW_REG_MFDE_LEN);
@@ -11328,10 +11328,32 @@ MLXSW_REG_DEFINE(mfde, MLXSW_REG_MFDE_ID, MLXSW_REG_MFDE_LEN);
*/
MLXSW_ITEM32(reg, mfde, irisc_id, 0x00, 24, 8);
+enum mlxsw_reg_mfde_severity {
+ /* Unrecoverable switch behavior */
+ MLXSW_REG_MFDE_SEVERITY_FATL = 2,
+ /* Unexpected state with possible systemic failure */
+ MLXSW_REG_MFDE_SEVERITY_NRML = 3,
+ /* Unexpected state without systemic failure */
+ MLXSW_REG_MFDE_SEVERITY_INTR = 5,
+};
+
+/* reg_mfde_severity
+ * The severity of the event.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, severity, 0x00, 16, 8);
+
enum mlxsw_reg_mfde_event_id {
+ /* CRspace timeout */
MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO = 1,
/* KVD insertion machine stopped */
MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP,
+ /* Triggered by MFGD.trigger_test */
+ MLXSW_REG_MFDE_EVENT_ID_TEST,
+ /* Triggered when firmware hits an assert */
+ MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT,
+ /* Fatal error interrupt from hardware */
+ MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE,
};
/* reg_mfde_event_id
@@ -11372,32 +11394,110 @@ MLXSW_ITEM32(reg, mfde, command_type, 0x04, 24, 2);
*/
MLXSW_ITEM32(reg, mfde, reg_attr_id, 0x04, 0, 16);
-/* reg_mfde_log_address
+/* reg_mfde_crspace_to_log_address
* crspace address accessed, which resulted in timeout.
- * Valid in case event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO
* Access: RO
*/
-MLXSW_ITEM32(reg, mfde, log_address, 0x10, 0, 32);
+MLXSW_ITEM32(reg, mfde, crspace_to_log_address, 0x10, 0, 32);
-/* reg_mfde_log_id
+/* reg_mfde_crspace_to_oe
+ * 0 - New event
+ * 1 - Old event, occurred before MFGD activation.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, crspace_to_oe, 0x14, 24, 1);
+
+/* reg_mfde_crspace_to_log_id
* Which irisc triggered the timeout.
- * Valid in case event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO
* Access: RO
*/
-MLXSW_ITEM32(reg, mfde, log_id, 0x14, 0, 4);
+MLXSW_ITEM32(reg, mfde, crspace_to_log_id, 0x14, 0, 4);
-/* reg_mfde_log_ip
+/* reg_mfde_crspace_to_log_ip
* IP (instruction pointer) that triggered the timeout.
- * Valid in case event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO
* Access: RO
*/
-MLXSW_ITEM64(reg, mfde, log_ip, 0x18, 0, 64);
+MLXSW_ITEM64(reg, mfde, crspace_to_log_ip, 0x18, 0, 64);
+
+/* reg_mfde_kvd_im_stop_oe
+ * 0 - New event
+ * 1 - Old event, occurred before MFGD activation.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, kvd_im_stop_oe, 0x10, 24, 1);
-/* reg_mfde_pipes_mask
+/* reg_mfde_kvd_im_stop_pipes_mask
* Bit per kvh pipe.
* Access: RO
*/
-MLXSW_ITEM32(reg, mfde, pipes_mask, 0x10, 0, 16);
+MLXSW_ITEM32(reg, mfde, kvd_im_stop_pipes_mask, 0x10, 0, 16);
+
+/* reg_mfde_fw_assert_var0-4
+ * Variables passed to assert.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_var0, 0x10, 0, 32);
+MLXSW_ITEM32(reg, mfde, fw_assert_var1, 0x14, 0, 32);
+MLXSW_ITEM32(reg, mfde, fw_assert_var2, 0x18, 0, 32);
+MLXSW_ITEM32(reg, mfde, fw_assert_var3, 0x1C, 0, 32);
+MLXSW_ITEM32(reg, mfde, fw_assert_var4, 0x20, 0, 32);
+
+/* reg_mfde_fw_assert_existptr
+ * The instruction pointer when assert was triggered.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_existptr, 0x24, 0, 32);
+
+/* reg_mfde_fw_assert_callra
+ * The return address after triggering assert.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_callra, 0x28, 0, 32);
+
+/* reg_mfde_fw_assert_oe
+ * 0 - New event
+ * 1 - Old event, occurred before MFGD activation.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_oe, 0x2C, 24, 1);
+
+/* reg_mfde_fw_assert_tile_v
+ * 0: The assert was from main
+ * 1: The assert was from a tile
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_tile_v, 0x2C, 23, 1);
+
+/* reg_mfde_fw_assert_tile_index
+ * When tile_v=1, the tile_index that caused the assert.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_tile_index, 0x2C, 16, 6);
+
+/* reg_mfde_fw_assert_ext_synd
+ * A generated one-to-one identifier which is specific per-assert.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_ext_synd, 0x2C, 0, 16);
+
+/* reg_mfde_fatal_cause_id
+ * HW interrupt cause id.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fatal_cause_id, 0x10, 0, 18);
+
+/* reg_mfde_fatal_cause_tile_v
+ * 0: The assert was from main
+ * 1: The assert was from a tile
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fatal_cause_tile_v, 0x14, 23, 1);
+
+/* reg_mfde_fatal_cause_tile_index
+ * When tile_v=1, the tile_index that caused the assert.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fatal_cause_tile_index, 0x14, 16, 6);
/* TNGCR - Tunneling NVE General Configuration Register
* ----------------------------------------------------
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index be3791ca6069..bb417db773b9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -203,7 +203,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
*/
burst = roundup_pow_of_two(act->police.burst);
err = mlxsw_sp_acl_rulei_act_police(mlxsw_sp, rulei,
- act->police.index,
+ act->hw_index,
act->police.rate_bytes_ps,
burst, extack);
if (err)
@@ -508,7 +508,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
- if (mlxsw_sp_flow_block_is_egress_bound(block)) {
+ if (mlxsw_sp_flow_block_is_egress_bound(block) &&
+ match.mask->vlan_id) {
NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig
index 2860a8c9923d..ac273f84b69e 100644
--- a/drivers/net/ethernet/microchip/lan966x/Kconfig
+++ b/drivers/net/ethernet/microchip/lan966x/Kconfig
@@ -2,6 +2,7 @@ config LAN966X_SWITCH
tristate "Lan966x switch driver"
depends on HAS_IOMEM
depends on OF
+ depends on NET_SWITCHDEV
select PHYLINK
select PACKING
help
diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile
index 2989ba528236..ec1a1fa8b0d5 100644
--- a/drivers/net/ethernet/microchip/lan966x/Makefile
+++ b/drivers/net/ethernet/microchip/lan966x/Makefile
@@ -6,4 +6,5 @@
obj-$(CONFIG_LAN966X_SWITCH) += lan966x-switch.o
lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \
- lan966x_mac.o lan966x_ethtool.o
+ lan966x_mac.o lan966x_ethtool.o lan966x_switchdev.o \
+ lan966x_vlan.o lan966x_fdb.o
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
new file mode 100644
index 000000000000..da5ca7188679
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <net/switchdev.h>
+
+#include "lan966x_main.h"
+
+struct lan966x_fdb_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ struct lan966x *lan966x;
+ unsigned long event;
+};
+
+struct lan966x_fdb_entry {
+ struct list_head list;
+ unsigned char mac[ETH_ALEN] __aligned(2);
+ u16 vid;
+ u32 references;
+};
+
+static struct lan966x_fdb_entry *
+lan966x_fdb_find_entry(struct lan966x *lan966x,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) {
+ if (fdb_entry->vid == fdb_info->vid &&
+ ether_addr_equal(fdb_entry->mac, fdb_info->addr))
+ return fdb_entry;
+ }
+
+ return NULL;
+}
+
+static void lan966x_fdb_add_entry(struct lan966x *lan966x,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ fdb_entry = lan966x_fdb_find_entry(lan966x, fdb_info);
+ if (fdb_entry) {
+ fdb_entry->references++;
+ return;
+ }
+
+ fdb_entry = kzalloc(sizeof(*fdb_entry), GFP_KERNEL);
+ if (!fdb_entry)
+ return;
+
+ ether_addr_copy(fdb_entry->mac, fdb_info->addr);
+ fdb_entry->vid = fdb_info->vid;
+ fdb_entry->references = 1;
+ list_add_tail(&fdb_entry->list, &lan966x->fdb_entries);
+}
+
+static bool lan966x_fdb_del_entry(struct lan966x *lan966x,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_fdb_entry *fdb_entry, *tmp;
+
+ list_for_each_entry_safe(fdb_entry, tmp, &lan966x->fdb_entries,
+ list) {
+ if (fdb_entry->vid == fdb_info->vid &&
+ ether_addr_equal(fdb_entry->mac, fdb_info->addr)) {
+ fdb_entry->references--;
+ if (!fdb_entry->references) {
+ list_del(&fdb_entry->list);
+ kfree(fdb_entry);
+ return true;
+ }
+ break;
+ }
+ }
+
+ return false;
+}
+
+void lan966x_fdb_write_entries(struct lan966x *lan966x, u16 vid)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) {
+ if (fdb_entry->vid != vid)
+ continue;
+
+ lan966x_mac_cpu_learn(lan966x, fdb_entry->mac, fdb_entry->vid);
+ }
+}
+
+void lan966x_fdb_erase_entries(struct lan966x *lan966x, u16 vid)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) {
+ if (fdb_entry->vid != vid)
+ continue;
+
+ lan966x_mac_cpu_forget(lan966x, fdb_entry->mac, fdb_entry->vid);
+ }
+}
+
+static void lan966x_fdb_purge_entries(struct lan966x *lan966x)
+{
+ struct lan966x_fdb_entry *fdb_entry, *tmp;
+
+ list_for_each_entry_safe(fdb_entry, tmp, &lan966x->fdb_entries, list) {
+ list_del(&fdb_entry->list);
+ kfree(fdb_entry);
+ }
+}
+
+int lan966x_fdb_init(struct lan966x *lan966x)
+{
+ INIT_LIST_HEAD(&lan966x->fdb_entries);
+ lan966x->fdb_work = alloc_ordered_workqueue("lan966x_order", 0);
+ if (!lan966x->fdb_work)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void lan966x_fdb_deinit(struct lan966x *lan966x)
+{
+ destroy_workqueue(lan966x->fdb_work);
+ lan966x_fdb_purge_entries(lan966x);
+}
+
+static void lan966x_fdb_event_work(struct work_struct *work)
+{
+ struct lan966x_fdb_event_work *fdb_work =
+ container_of(work, struct lan966x_fdb_event_work, work);
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct net_device *dev = fdb_work->dev;
+ struct lan966x_port *port;
+ struct lan966x *lan966x;
+ int ret;
+
+ fdb_info = &fdb_work->fdb_info;
+ lan966x = fdb_work->lan966x;
+
+ if (lan966x_netdevice_check(dev)) {
+ port = netdev_priv(dev);
+
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ lan966x_mac_add_entry(lan966x, port, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ lan966x_mac_del_entry(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ }
+ } else {
+ if (!netif_is_bridge_master(dev))
+ goto out;
+
+ /* In case the bridge is called */
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ /* If there is no front port in this vlan, there is no
+ * point to copy the frame to CPU because it would be
+ * just dropped at later point. So add it only if
+ * there is a port but it is required to store the fdb
+ * entry for later point when a port actually gets in
+ * the vlan.
+ */
+ lan966x_fdb_add_entry(lan966x, fdb_info);
+ if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
+ fdb_info->vid))
+ break;
+
+ lan966x_mac_cpu_learn(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ ret = lan966x_fdb_del_entry(lan966x, fdb_info);
+ if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
+ fdb_info->vid))
+ break;
+
+ if (ret)
+ lan966x_mac_cpu_forget(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ }
+ }
+
+out:
+ kfree(fdb_work->fdb_info.addr);
+ kfree(fdb_work);
+ dev_put(dev);
+}
+
+int lan966x_handle_fdb(struct net_device *dev,
+ struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
+ const struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_fdb_event_work *fdb_work;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (lan966x_netdevice_check(orig_dev) &&
+ !fdb_info->added_by_user)
+ break;
+
+ fdb_work = kzalloc(sizeof(*fdb_work), GFP_ATOMIC);
+ if (!fdb_work)
+ return -ENOMEM;
+
+ fdb_work->dev = orig_dev;
+ fdb_work->lan966x = lan966x;
+ fdb_work->event = event;
+ INIT_WORK(&fdb_work->work, lan966x_fdb_event_work);
+ memcpy(&fdb_work->fdb_info, fdb_info, sizeof(fdb_work->fdb_info));
+ fdb_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!fdb_work->fdb_info.addr)
+ goto err_addr_alloc;
+
+ ether_addr_copy((u8 *)fdb_work->fdb_info.addr, fdb_info->addr);
+ dev_hold(orig_dev);
+
+ queue_work(lan966x->fdb_work, &fdb_work->work);
+ break;
+ }
+
+ return 0;
+err_addr_alloc:
+ kfree(fdb_work);
+ return -ENOMEM;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
index f6878b9f57ef..efadb8d326cc 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
+#include <net/switchdev.h>
#include "lan966x_main.h"
#define LAN966X_MAC_COLUMNS 4
@@ -13,6 +14,23 @@
#define MACACCESS_CMD_WRITE 7
#define MACACCESS_CMD_SYNC_GET_NEXT 8
+#define LAN966X_MAC_INVALID_ROW -1
+
+struct lan966x_mac_entry {
+ struct list_head list;
+ unsigned char mac[ETH_ALEN] __aligned(2);
+ u16 vid;
+ u16 port_index;
+ int row;
+};
+
+struct lan966x_mac_raw_entry {
+ u32 mach;
+ u32 macl;
+ u32 maca;
+ bool processed;
+};
+
static int lan966x_mac_get_status(struct lan966x *lan966x)
{
return lan_rd(lan966x, ANA_MACACCESS);
@@ -93,9 +111,333 @@ int lan966x_mac_cpu_forget(struct lan966x *lan966x, const char *addr, u16 vid)
return lan966x_mac_forget(lan966x, addr, vid, ENTRYTYPE_LOCKED);
}
+void lan966x_mac_set_ageing(struct lan966x *lan966x,
+ u32 ageing)
+{
+ lan_rmw(ANA_AUTOAGE_AGE_PERIOD_SET(ageing / 2),
+ ANA_AUTOAGE_AGE_PERIOD,
+ lan966x, ANA_AUTOAGE);
+}
+
void lan966x_mac_init(struct lan966x *lan966x)
{
/* Clear the MAC table */
lan_wr(MACACCESS_CMD_INIT, lan966x, ANA_MACACCESS);
lan966x_mac_wait_for_completion(lan966x);
+
+ spin_lock_init(&lan966x->mac_lock);
+ INIT_LIST_HEAD(&lan966x->mac_entries);
+}
+
+static struct lan966x_mac_entry *lan966x_mac_alloc_entry(const unsigned char *mac,
+ u16 vid, u16 port_index)
+{
+ struct lan966x_mac_entry *mac_entry;
+
+ mac_entry = kzalloc(sizeof(*mac_entry), GFP_KERNEL);
+ if (!mac_entry)
+ return NULL;
+
+ memcpy(mac_entry->mac, mac, ETH_ALEN);
+ mac_entry->vid = vid;
+ mac_entry->port_index = port_index;
+ mac_entry->row = LAN966X_MAC_INVALID_ROW;
+ return mac_entry;
+}
+
+static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x,
+ const unsigned char *mac,
+ u16 vid, u16 port_index)
+{
+ struct lan966x_mac_entry *res = NULL;
+ struct lan966x_mac_entry *mac_entry;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry(mac_entry, &lan966x->mac_entries, list) {
+ if (mac_entry->vid == vid &&
+ ether_addr_equal(mac, mac_entry->mac) &&
+ mac_entry->port_index == port_index) {
+ res = mac_entry;
+ break;
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+
+ return res;
+}
+
+static int lan966x_mac_lookup(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid, enum macaccess_entry_type type)
+{
+ int ret;
+
+ lan966x_mac_select(lan966x, mac, vid);
+
+ /* Issue a read command */
+ lan_wr(ANA_MACACCESS_ENTRYTYPE_SET(type) |
+ ANA_MACACCESS_VALID_SET(1) |
+ ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_READ),
+ lan966x, ANA_MACACCESS);
+
+ ret = lan966x_mac_wait_for_completion(lan966x);
+ if (ret)
+ return ret;
+
+ return ANA_MACACCESS_VALID_GET(lan_rd(lan966x, ANA_MACACCESS));
+}
+
+static void lan966x_fdb_call_notifiers(enum switchdev_notifier_type type,
+ const char *mac, u16 vid,
+ struct net_device *dev)
+{
+ struct switchdev_notifier_fdb_info info = { 0 };
+
+ info.addr = mac;
+ info.vid = vid;
+ info.offloaded = true;
+ call_switchdev_notifiers(type, dev, &info.info, NULL);
+}
+
+int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
+ const unsigned char *addr, u16 vid)
+{
+ struct lan966x_mac_entry *mac_entry;
+
+ if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL))
+ return 0;
+
+ /* In case the entry already exists, don't add it again to SW,
+ * just update HW, but we need to look in the actual HW because
+ * it is possible for an entry to be learn by HW and before we
+ * get the interrupt the frame will reach CPU and the CPU will
+ * add the entry but without the extern_learn flag.
+ */
+ mac_entry = lan966x_mac_find_entry(lan966x, addr, vid, port->chip_port);
+ if (mac_entry)
+ return lan966x_mac_learn(lan966x, port->chip_port,
+ addr, vid, ENTRYTYPE_LOCKED);
+
+ mac_entry = lan966x_mac_alloc_entry(addr, vid, port->chip_port);
+ if (!mac_entry)
+ return -ENOMEM;
+
+ spin_lock(&lan966x->mac_lock);
+ list_add_tail(&mac_entry->list, &lan966x->mac_entries);
+ spin_unlock(&lan966x->mac_lock);
+
+ lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED);
+ lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid, port->dev);
+
+ return 0;
+}
+
+int lan966x_mac_del_entry(struct lan966x *lan966x, const unsigned char *addr,
+ u16 vid)
+{
+ struct lan966x_mac_entry *mac_entry, *tmp;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
+ list) {
+ if (mac_entry->vid == vid &&
+ ether_addr_equal(addr, mac_entry->mac)) {
+ lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+
+ return 0;
+}
+
+void lan966x_mac_purge_entries(struct lan966x *lan966x)
+{
+ struct lan966x_mac_entry *mac_entry, *tmp;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
+ list) {
+ lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+ spin_unlock(&lan966x->mac_lock);
+}
+
+static void lan966x_mac_notifiers(enum switchdev_notifier_type type,
+ unsigned char *mac, u32 vid,
+ struct net_device *dev)
+{
+ rtnl_lock();
+ lan966x_fdb_call_notifiers(type, mac, vid, dev);
+ rtnl_unlock();
+}
+
+static void lan966x_mac_process_raw_entry(struct lan966x_mac_raw_entry *raw_entry,
+ u8 *mac, u16 *vid, u32 *dest_idx)
+{
+ mac[0] = (raw_entry->mach >> 8) & 0xff;
+ mac[1] = (raw_entry->mach >> 0) & 0xff;
+ mac[2] = (raw_entry->macl >> 24) & 0xff;
+ mac[3] = (raw_entry->macl >> 16) & 0xff;
+ mac[4] = (raw_entry->macl >> 8) & 0xff;
+ mac[5] = (raw_entry->macl >> 0) & 0xff;
+
+ *vid = (raw_entry->mach >> 16) & 0xfff;
+ *dest_idx = ANA_MACACCESS_DEST_IDX_GET(raw_entry->maca);
+}
+
+static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
+ struct lan966x_mac_raw_entry *raw_entries)
+{
+ struct lan966x_mac_entry *mac_entry, *tmp;
+ unsigned char mac[ETH_ALEN] __aligned(2);
+ u32 dest_idx;
+ u32 column;
+ u16 vid;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, list) {
+ bool found = false;
+
+ if (mac_entry->row != row)
+ continue;
+
+ for (column = 0; column < LAN966X_MAC_COLUMNS; ++column) {
+ /* All the valid entries are at the start of the row,
+ * so when get one invalid entry it can just skip the
+ * rest of the columns
+ */
+ if (!ANA_MACACCESS_VALID_GET(raw_entries[column].maca))
+ break;
+
+ lan966x_mac_process_raw_entry(&raw_entries[column],
+ mac, &vid, &dest_idx);
+ WARN_ON(dest_idx > lan966x->num_phys_ports);
+
+ /* If the entry in SW is found, then there is nothing
+ * to do
+ */
+ if (mac_entry->vid == vid &&
+ ether_addr_equal(mac_entry->mac, mac) &&
+ mac_entry->port_index == dest_idx) {
+ raw_entries[column].processed = true;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ /* Notify the bridge that the entry doesn't exist
+ * anymore in the HW and remove the entry from the SW
+ * list
+ */
+ lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+ mac_entry->mac, mac_entry->vid,
+ lan966x->ports[mac_entry->port_index]->dev);
+
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+
+ /* Now go to the list of columns and see if any entry was not in the SW
+ * list, then that means that the entry is new so it needs to notify the
+ * bridge.
+ */
+ for (column = 0; column < LAN966X_MAC_COLUMNS; ++column) {
+ /* All the valid entries are at the start of the row, so when
+ * get one invalid entry it can just skip the rest of the columns
+ */
+ if (!ANA_MACACCESS_VALID_GET(raw_entries[column].maca))
+ break;
+
+ /* If the entry already exists then don't do anything */
+ if (raw_entries[column].processed)
+ continue;
+
+ lan966x_mac_process_raw_entry(&raw_entries[column],
+ mac, &vid, &dest_idx);
+ WARN_ON(dest_idx > lan966x->num_phys_ports);
+
+ mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx);
+ if (!mac_entry)
+ return;
+
+ mac_entry->row = row;
+
+ spin_lock(&lan966x->mac_lock);
+ list_add_tail(&mac_entry->list, &lan966x->mac_entries);
+ spin_unlock(&lan966x->mac_lock);
+
+ lan966x_mac_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+ mac, vid, lan966x->ports[dest_idx]->dev);
+ }
+}
+
+irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x)
+{
+ struct lan966x_mac_raw_entry entry[LAN966X_MAC_COLUMNS] = { 0 };
+ u32 index, column;
+ bool stop = true;
+ u32 val;
+
+ /* Start the scan from 0, 0 */
+ lan_wr(ANA_MACTINDX_M_INDEX_SET(0) |
+ ANA_MACTINDX_BUCKET_SET(0),
+ lan966x, ANA_MACTINDX);
+
+ while (1) {
+ lan_rmw(ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_SYNC_GET_NEXT),
+ ANA_MACACCESS_MAC_TABLE_CMD,
+ lan966x, ANA_MACACCESS);
+ lan966x_mac_wait_for_completion(lan966x);
+
+ val = lan_rd(lan966x, ANA_MACTINDX);
+ index = ANA_MACTINDX_M_INDEX_GET(val);
+ column = ANA_MACTINDX_BUCKET_GET(val);
+
+ /* The SYNC-GET-NEXT returns all the entries(4) in a row in
+ * which is suffered a change. By change it means that new entry
+ * was added or an entry was removed because of ageing.
+ * It would return all the columns for that row. And after that
+ * it would return the next row The stop conditions of the
+ * SYNC-GET-NEXT is when it reaches 'directly' to row 0
+ * column 3. So if SYNC-GET-NEXT returns row 0 and column 0
+ * then it is required to continue to read more even if it
+ * reaches row 0 and column 3.
+ */
+ if (index == 0 && column == 0)
+ stop = false;
+
+ if (column == LAN966X_MAC_COLUMNS - 1 &&
+ index == 0 && stop)
+ break;
+
+ entry[column].mach = lan_rd(lan966x, ANA_MACHDATA);
+ entry[column].macl = lan_rd(lan966x, ANA_MACLDATA);
+ entry[column].maca = lan_rd(lan966x, ANA_MACACCESS);
+
+ /* Once all the columns are read process them */
+ if (column == LAN966X_MAC_COLUMNS - 1) {
+ lan966x_mac_irq_process(lan966x, index, entry);
+ /* A row was processed so it is safe to assume that the
+ * next row/column can be the stop condition
+ */
+ stop = true;
+ }
+ }
+
+ lan_rmw(ANA_ANAINTR_INTR_SET(0),
+ ANA_ANAINTR_INTR,
+ lan966x, ANA_ANAINTR);
+
+ return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 101c1f005baf..16f4d8737d7b 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -108,12 +108,12 @@ static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
int ret;
/* Learn the new net device MAC address in the mac table. */
- ret = lan966x_mac_cpu_learn(lan966x, addr->sa_data, port->pvid);
+ ret = lan966x_mac_cpu_learn(lan966x, addr->sa_data, HOST_PVID);
if (ret)
return ret;
/* Then forget the previous one. */
- ret = lan966x_mac_cpu_forget(lan966x, dev->dev_addr, port->pvid);
+ ret = lan966x_mac_cpu_forget(lan966x, dev->dev_addr, HOST_PVID);
if (ret)
return ret;
@@ -283,6 +283,12 @@ static void lan966x_ifh_set_ipv(void *ifh, u64 bypass)
IFH_POS_IPV, IFH_LEN * 4, PACK, 0);
}
+static void lan966x_ifh_set_vid(void *ifh, u64 vid)
+{
+ packing(ifh, &vid, IFH_POS_TCI + IFH_WID_TCI - 1,
+ IFH_POS_TCI, IFH_LEN * 4, PACK, 0);
+}
+
static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct lan966x_port *port = netdev_priv(dev);
@@ -294,32 +300,11 @@ static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev)
lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
lan966x_ifh_set_qos_class(ifh, skb->priority >= 7 ? 0x7 : skb->priority);
lan966x_ifh_set_ipv(ifh, skb->priority >= 7 ? 0x7 : skb->priority);
+ lan966x_ifh_set_vid(ifh, skb_vlan_tag_get(skb));
return lan966x_port_ifh_xmit(skb, ifh, dev);
}
-static void lan966x_set_promisc(struct lan966x_port *port, bool enable)
-{
- struct lan966x *lan966x = port->lan966x;
-
- lan_rmw(ANA_CPU_FWD_CFG_SRC_COPY_ENA_SET(enable),
- ANA_CPU_FWD_CFG_SRC_COPY_ENA,
- lan966x, ANA_CPU_FWD_CFG(port->chip_port));
-}
-
-static void lan966x_port_change_rx_flags(struct net_device *dev, int flags)
-{
- struct lan966x_port *port = netdev_priv(dev);
-
- if (!(flags & IFF_PROMISC))
- return;
-
- if (dev->flags & IFF_PROMISC)
- lan966x_set_promisc(port, true);
- else
- lan966x_set_promisc(port, false);
-}
-
static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu)
{
struct lan966x_port *port = netdev_priv(dev);
@@ -369,7 +354,6 @@ static const struct net_device_ops lan966x_port_netdev_ops = {
.ndo_open = lan966x_port_open,
.ndo_stop = lan966x_port_stop,
.ndo_start_xmit = lan966x_port_xmit,
- .ndo_change_rx_flags = lan966x_port_change_rx_flags,
.ndo_change_mtu = lan966x_port_change_mtu,
.ndo_set_rx_mode = lan966x_port_set_rx_mode,
.ndo_get_phys_port_name = lan966x_port_get_phys_port_name,
@@ -378,6 +362,11 @@ static const struct net_device_ops lan966x_port_netdev_ops = {
.ndo_get_port_parent_id = lan966x_port_get_parent_id,
};
+bool lan966x_netdevice_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &lan966x_port_netdev_ops;
+}
+
static int lan966x_port_xtr_status(struct lan966x *lan966x, u8 grp)
{
return lan_rd(lan966x, QS_XTR_RD(grp));
@@ -514,6 +503,9 @@ static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
skb->protocol = eth_type_trans(skb, dev);
+ if (lan966x->bridge_mask & BIT(src_port))
+ skb->offload_fwd_mark = 1;
+
netif_rx_ni(skb);
dev->stats.rx_bytes += len;
dev->stats.rx_packets++;
@@ -527,6 +519,13 @@ recover:
return IRQ_HANDLED;
}
+static irqreturn_t lan966x_ana_irq_handler(int irq, void *args)
+{
+ struct lan966x *lan966x = args;
+
+ return lan966x_mac_irq_handler(lan966x);
+}
+
static void lan966x_cleanup_ports(struct lan966x *lan966x)
{
struct lan966x_port *port;
@@ -554,6 +553,11 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
disable_irq(lan966x->xtr_irq);
lan966x->xtr_irq = -ENXIO;
+
+ if (lan966x->ana_irq) {
+ disable_irq(lan966x->ana_irq);
+ lan966x->ana_irq = -ENXIO;
+ }
}
static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
@@ -578,13 +582,14 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
port->dev = dev;
port->lan966x = lan966x;
port->chip_port = p;
- port->pvid = PORT_PVID;
lan966x->ports[p] = port;
dev->max_mtu = ETH_MAX_MTU;
dev->netdev_ops = &lan966x_port_netdev_ops;
dev->ethtool_ops = &lan966x_ethtool_ops;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
dev->needed_headroom = IFH_LEN * sizeof(u32);
eth_hw_addr_gen(dev, lan966x->base_mac, p + 1);
@@ -631,6 +636,10 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
return err;
}
+ lan966x_vlan_port_set_vlan_aware(port, 0);
+ lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
+ lan966x_vlan_port_apply(port);
+
return 0;
}
@@ -641,6 +650,8 @@ static void lan966x_init(struct lan966x *lan966x)
/* MAC table initialization */
lan966x_mac_init(lan966x);
+ lan966x_vlan_init(lan966x);
+
/* Flush queues */
lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) |
GENMASK(1, 0),
@@ -704,8 +715,10 @@ static void lan966x_init(struct lan966x *lan966x)
/* There are 8 priorities */
for (i = 0; i < 8; ++i)
lan_rmw(ANA_FLOODING_FLD_MULTICAST_SET(PGID_MC) |
+ ANA_FLOODING_FLD_UNICAST_SET(PGID_UC) |
ANA_FLOODING_FLD_BROADCAST_SET(PGID_BC),
ANA_FLOODING_FLD_MULTICAST |
+ ANA_FLOODING_FLD_UNICAST |
ANA_FLOODING_FLD_BROADCAST,
lan966x, ANA_FLOODING(i));
@@ -757,6 +770,11 @@ static void lan966x_init(struct lan966x *lan966x)
ANA_PGID_PGID,
lan966x, ANA_PGID(PGID_MCIPV4));
+ /* Unicast to all other ports */
+ lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_UC));
+
/* Broadcast to the CPU port and to other ports */
lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT) | GENMASK(lan966x->num_phys_ports - 1, 0)),
ANA_PGID_PGID,
@@ -870,6 +888,15 @@ static int lan966x_probe(struct platform_device *pdev)
return -ENODEV;
}
+ lan966x->ana_irq = platform_get_irq_byname(pdev, "ana");
+ if (lan966x->ana_irq) {
+ err = devm_request_threaded_irq(&pdev->dev, lan966x->ana_irq, NULL,
+ lan966x_ana_irq_handler, IRQF_ONESHOT,
+ "ana irq", lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Unable to use ana irq");
+ }
+
/* init switch */
lan966x_init(lan966x);
lan966x_stats_init(lan966x);
@@ -899,6 +926,10 @@ static int lan966x_probe(struct platform_device *pdev)
lan966x_port_init(lan966x->ports[p]);
}
+ err = lan966x_fdb_init(lan966x);
+ if (err)
+ goto cleanup_ports;
+
return 0;
cleanup_ports:
@@ -923,6 +954,9 @@ static int lan966x_remove(struct platform_device *pdev)
destroy_workqueue(lan966x->stats_queue);
mutex_destroy(&lan966x->stats_lock);
+ lan966x_mac_purge_entries(lan966x);
+ lan966x_fdb_deinit(lan966x);
+
return 0;
}
@@ -934,7 +968,32 @@ static struct platform_driver lan966x_driver = {
.of_match_table = lan966x_match,
},
};
-module_platform_driver(lan966x_driver);
+
+static int __init lan966x_switch_driver_init(void)
+{
+ int ret;
+
+ lan966x_register_notifier_blocks();
+
+ ret = platform_driver_register(&lan966x_driver);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ lan966x_unregister_notifier_blocks();
+ return ret;
+}
+
+static void __exit lan966x_switch_driver_exit(void)
+{
+ platform_driver_unregister(&lan966x_driver);
+ lan966x_unregister_notifier_blocks();
+}
+
+module_init(lan966x_switch_driver_init);
+module_exit(lan966x_switch_driver_exit);
MODULE_DESCRIPTION("Microchip LAN966X switch driver");
MODULE_AUTHOR("Horatiu Vultur <horatiu.vultur@microchip.com>");
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 7e5a3b6f168d..c399b1256edc 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -4,9 +4,11 @@
#define __LAN966X_MAIN_H__
#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
#include <linux/jiffies.h>
#include <linux/phy.h>
#include <linux/phylink.h>
+#include <net/switchdev.h>
#include "lan966x_regs.h"
#include "lan966x_ifh.h"
@@ -22,7 +24,8 @@
#define PGID_SRC 80
#define PGID_ENTRIES 89
-#define PORT_PVID 0
+#define UNAWARE_PVID 0
+#define HOST_PVID 4095
/* Reserved amount for (SRC, PRIO) at index 8*SRC + PRIO */
#define QSYS_Q_RSRV 95
@@ -75,6 +78,16 @@ struct lan966x {
u8 base_mac[ETH_ALEN];
+ struct net_device *bridge;
+ u16 bridge_mask;
+ u16 bridge_fwd_mask;
+
+ struct list_head mac_entries;
+ spinlock_t mac_lock; /* lock for mac_entries list */
+
+ u16 vlan_mask[VLAN_N_VID];
+ DECLARE_BITMAP(cpu_vlan_mask, VLAN_N_VID);
+
/* stats */
const struct lan966x_stat_layout *stats_layout;
u32 num_stats;
@@ -87,6 +100,11 @@ struct lan966x {
/* interrupts */
int xtr_irq;
+ int ana_irq;
+
+ /* worqueue for fdb */
+ struct workqueue_struct *fdb_work;
+ struct list_head fdb_entries;
};
struct lan966x_port_config {
@@ -105,6 +123,10 @@ struct lan966x_port {
u8 chip_port;
u16 pvid;
+ u16 vid;
+ bool vlan_aware;
+
+ bool learn_ena;
struct phylink_config phylink_config;
struct phylink_pcs phylink_pcs;
@@ -118,6 +140,11 @@ extern const struct phylink_mac_ops lan966x_phylink_mac_ops;
extern const struct phylink_pcs_ops lan966x_phylink_pcs_ops;
extern const struct ethtool_ops lan966x_ethtool_ops;
+bool lan966x_netdevice_check(const struct net_device *dev);
+
+void lan966x_register_notifier_blocks(void);
+void lan966x_unregister_notifier_blocks(void);
+
void lan966x_stats_get(struct net_device *dev,
struct rtnl_link_stats64 *stats);
int lan966x_stats_init(struct lan966x *lan966x);
@@ -141,6 +168,43 @@ int lan966x_mac_forget(struct lan966x *lan966x,
int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid);
int lan966x_mac_cpu_forget(struct lan966x *lan966x, const char *addr, u16 vid);
void lan966x_mac_init(struct lan966x *lan966x);
+void lan966x_mac_set_ageing(struct lan966x *lan966x,
+ u32 ageing);
+int lan966x_mac_del_entry(struct lan966x *lan966x,
+ const unsigned char *addr,
+ u16 vid);
+int lan966x_mac_add_entry(struct lan966x *lan966x,
+ struct lan966x_port *port,
+ const unsigned char *addr,
+ u16 vid);
+void lan966x_mac_purge_entries(struct lan966x *lan966x);
+irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x);
+
+void lan966x_vlan_init(struct lan966x *lan966x);
+void lan966x_vlan_port_apply(struct lan966x_port *port);
+bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid);
+void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
+ bool vlan_aware);
+int lan966x_vlan_port_set_vid(struct lan966x_port *port,
+ u16 vid,
+ bool pvid,
+ bool untagged);
+void lan966x_vlan_port_add_vlan(struct lan966x_port *port,
+ u16 vid,
+ bool pvid,
+ bool untagged);
+void lan966x_vlan_port_del_vlan(struct lan966x_port *port, u16 vid);
+void lan966x_vlan_cpu_add_vlan(struct lan966x *lan966x, u16 vid);
+void lan966x_vlan_cpu_del_vlan(struct lan966x *lan966x, u16 vid);
+
+void lan966x_fdb_write_entries(struct lan966x *lan966x, u16 vid);
+void lan966x_fdb_erase_entries(struct lan966x *lan966x, u16 vid);
+int lan966x_fdb_init(struct lan966x *lan966x);
+void lan966x_fdb_deinit(struct lan966x *lan966x);
+int lan966x_handle_fdb(struct net_device *dev,
+ struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
+ const struct switchdev_notifier_fdb_info *fdb_info);
static inline void __iomem *lan_addr(void __iomem *base[],
int id, int tinst, int tcnt,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
index 879dcd807dec..a13c469e139a 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
@@ -61,6 +61,9 @@ enum lan966x_target {
#define ANA_ADVLEARN_VLAN_CHK_GET(x)\
FIELD_GET(ANA_ADVLEARN_VLAN_CHK, x)
+/* ANA:ANA:VLANMASK */
+#define ANA_VLANMASK __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 8, 0, 1, 4)
+
/* ANA:ANA:ANAINTR */
#define ANA_ANAINTR __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 16, 0, 1, 4)
@@ -88,6 +91,12 @@ enum lan966x_target {
/* ANA:ANA:FLOODING */
#define ANA_FLOODING(r) __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 68, r, 8, 4)
+#define ANA_FLOODING_FLD_UNICAST GENMASK(17, 12)
+#define ANA_FLOODING_FLD_UNICAST_SET(x)\
+ FIELD_PREP(ANA_FLOODING_FLD_UNICAST, x)
+#define ANA_FLOODING_FLD_UNICAST_GET(x)\
+ FIELD_GET(ANA_FLOODING_FLD_UNICAST, x)
+
#define ANA_FLOODING_FLD_BROADCAST GENMASK(11, 6)
#define ANA_FLOODING_FLD_BROADCAST_SET(x)\
FIELD_PREP(ANA_FLOODING_FLD_BROADCAST, x)
@@ -184,6 +193,102 @@ enum lan966x_target {
#define ANA_MACACCESS_MAC_TABLE_CMD_GET(x)\
FIELD_GET(ANA_MACACCESS_MAC_TABLE_CMD, x)
+/* ANA:ANA_TABLES:MACTINDX */
+#define ANA_MACTINDX __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 52, 0, 1, 4)
+
+#define ANA_MACTINDX_BUCKET GENMASK(12, 11)
+#define ANA_MACTINDX_BUCKET_SET(x)\
+ FIELD_PREP(ANA_MACTINDX_BUCKET, x)
+#define ANA_MACTINDX_BUCKET_GET(x)\
+ FIELD_GET(ANA_MACTINDX_BUCKET, x)
+
+#define ANA_MACTINDX_M_INDEX GENMASK(10, 0)
+#define ANA_MACTINDX_M_INDEX_SET(x)\
+ FIELD_PREP(ANA_MACTINDX_M_INDEX, x)
+#define ANA_MACTINDX_M_INDEX_GET(x)\
+ FIELD_GET(ANA_MACTINDX_M_INDEX, x)
+
+/* ANA:ANA_TABLES:VLAN_PORT_MASK */
+#define ANA_VLAN_PORT_MASK __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 56, 0, 1, 4)
+
+#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK GENMASK(8, 0)
+#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_SET(x)\
+ FIELD_PREP(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK, x)
+#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_GET(x)\
+ FIELD_GET(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK, x)
+
+/* ANA:ANA_TABLES:VLANACCESS */
+#define ANA_VLANACCESS __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 60, 0, 1, 4)
+
+#define ANA_VLANACCESS_VLAN_TBL_CMD GENMASK(1, 0)
+#define ANA_VLANACCESS_VLAN_TBL_CMD_SET(x)\
+ FIELD_PREP(ANA_VLANACCESS_VLAN_TBL_CMD, x)
+#define ANA_VLANACCESS_VLAN_TBL_CMD_GET(x)\
+ FIELD_GET(ANA_VLANACCESS_VLAN_TBL_CMD, x)
+
+/* ANA:ANA_TABLES:VLANTIDX */
+#define ANA_VLANTIDX __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 64, 0, 1, 4)
+
+#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS BIT(18)
+#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS_SET(x)\
+ FIELD_PREP(ANA_VLANTIDX_VLAN_PGID_CPU_DIS, x)
+#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS_GET(x)\
+ FIELD_GET(ANA_VLANTIDX_VLAN_PGID_CPU_DIS, x)
+
+#define ANA_VLANTIDX_V_INDEX GENMASK(11, 0)
+#define ANA_VLANTIDX_V_INDEX_SET(x)\
+ FIELD_PREP(ANA_VLANTIDX_V_INDEX, x)
+#define ANA_VLANTIDX_V_INDEX_GET(x)\
+ FIELD_GET(ANA_VLANTIDX_V_INDEX, x)
+
+/* ANA:PORT:VLAN_CFG */
+#define ANA_VLAN_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 0, 0, 1, 4)
+
+#define ANA_VLAN_CFG_VLAN_AWARE_ENA BIT(20)
+#define ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(x)\
+ FIELD_PREP(ANA_VLAN_CFG_VLAN_AWARE_ENA, x)
+#define ANA_VLAN_CFG_VLAN_AWARE_ENA_GET(x)\
+ FIELD_GET(ANA_VLAN_CFG_VLAN_AWARE_ENA, x)
+
+#define ANA_VLAN_CFG_VLAN_POP_CNT GENMASK(19, 18)
+#define ANA_VLAN_CFG_VLAN_POP_CNT_SET(x)\
+ FIELD_PREP(ANA_VLAN_CFG_VLAN_POP_CNT, x)
+#define ANA_VLAN_CFG_VLAN_POP_CNT_GET(x)\
+ FIELD_GET(ANA_VLAN_CFG_VLAN_POP_CNT, x)
+
+#define ANA_VLAN_CFG_VLAN_VID GENMASK(11, 0)
+#define ANA_VLAN_CFG_VLAN_VID_SET(x)\
+ FIELD_PREP(ANA_VLAN_CFG_VLAN_VID, x)
+#define ANA_VLAN_CFG_VLAN_VID_GET(x)\
+ FIELD_GET(ANA_VLAN_CFG_VLAN_VID, x)
+
+/* ANA:PORT:DROP_CFG */
+#define ANA_DROP_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 4, 0, 1, 4)
+
+#define ANA_DROP_CFG_DROP_UNTAGGED_ENA BIT(6)
+#define ANA_DROP_CFG_DROP_UNTAGGED_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_UNTAGGED_ENA, x)
+#define ANA_DROP_CFG_DROP_UNTAGGED_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_UNTAGGED_ENA, x)
+
+#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA BIT(3)
+#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA, x)
+#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA, x)
+
+#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA BIT(2)
+#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, x)
+#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, x)
+
+#define ANA_DROP_CFG_DROP_MC_SMAC_ENA BIT(0)
+#define ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_MC_SMAC_ENA, x)
+#define ANA_DROP_CFG_DROP_MC_SMAC_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_MC_SMAC_ENA, x)
+
/* ANA:PORT:CPU_FWD_CFG */
#define ANA_CPU_FWD_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 96, 0, 1, 4)
@@ -589,6 +694,36 @@ enum lan966x_target {
/* QSYS:RES_CTRL:RES_CFG */
#define QSYS_RES_CFG(g) __REG(TARGET_QSYS, 0, 1, 32768, g, 1024, 8, 0, 0, 1, 4)
+/* REW:PORT:PORT_VLAN_CFG */
+#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 0, 0, 1, 4)
+
+#define REW_PORT_VLAN_CFG_PORT_TPID GENMASK(31, 16)
+#define REW_PORT_VLAN_CFG_PORT_TPID_SET(x)\
+ FIELD_PREP(REW_PORT_VLAN_CFG_PORT_TPID, x)
+#define REW_PORT_VLAN_CFG_PORT_TPID_GET(x)\
+ FIELD_GET(REW_PORT_VLAN_CFG_PORT_TPID, x)
+
+#define REW_PORT_VLAN_CFG_PORT_VID GENMASK(11, 0)
+#define REW_PORT_VLAN_CFG_PORT_VID_SET(x)\
+ FIELD_PREP(REW_PORT_VLAN_CFG_PORT_VID, x)
+#define REW_PORT_VLAN_CFG_PORT_VID_GET(x)\
+ FIELD_GET(REW_PORT_VLAN_CFG_PORT_VID, x)
+
+/* REW:PORT:TAG_CFG */
+#define REW_TAG_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 4, 0, 1, 4)
+
+#define REW_TAG_CFG_TAG_CFG GENMASK(8, 7)
+#define REW_TAG_CFG_TAG_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CFG_TAG_CFG, x)
+#define REW_TAG_CFG_TAG_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CFG_TAG_CFG, x)
+
+#define REW_TAG_CFG_TAG_TPID_CFG GENMASK(6, 5)
+#define REW_TAG_CFG_TAG_TPID_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CFG_TAG_TPID_CFG, x)
+#define REW_TAG_CFG_TAG_TPID_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CFG_TAG_TPID_CFG, x)
+
/* REW:PORT:PORT_CFG */
#define REW_PORT_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 8, 0, 1, 4)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
new file mode 100644
index 000000000000..deb3dd5be67a
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/if_bridge.h>
+#include <net/switchdev.h>
+
+#include "lan966x_main.h"
+
+static struct notifier_block lan966x_netdevice_nb __read_mostly;
+static struct notifier_block lan966x_switchdev_nb __read_mostly;
+static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly;
+
+static void lan966x_port_set_mcast_flood(struct lan966x_port *port,
+ bool enabled)
+{
+ u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_MC));
+
+ val = ANA_PGID_PGID_GET(val);
+ if (enabled)
+ val |= BIT(port->chip_port);
+ else
+ val &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PGID_PGID_SET(val),
+ ANA_PGID_PGID,
+ port->lan966x, ANA_PGID(PGID_MC));
+}
+
+static void lan966x_port_set_ucast_flood(struct lan966x_port *port,
+ bool enabled)
+{
+ u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_UC));
+
+ val = ANA_PGID_PGID_GET(val);
+ if (enabled)
+ val |= BIT(port->chip_port);
+ else
+ val &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PGID_PGID_SET(val),
+ ANA_PGID_PGID,
+ port->lan966x, ANA_PGID(PGID_UC));
+}
+
+static void lan966x_port_set_bcast_flood(struct lan966x_port *port,
+ bool enabled)
+{
+ u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_BC));
+
+ val = ANA_PGID_PGID_GET(val);
+ if (enabled)
+ val |= BIT(port->chip_port);
+ else
+ val &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PGID_PGID_SET(val),
+ ANA_PGID_PGID,
+ port->lan966x, ANA_PGID(PGID_BC));
+}
+
+static void lan966x_port_set_learning(struct lan966x_port *port, bool enabled)
+{
+ lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(enabled),
+ ANA_PORT_CFG_LEARN_ENA,
+ port->lan966x, ANA_PORT_CFG(port->chip_port));
+
+ port->learn_ena = enabled;
+}
+
+static void lan966x_port_bridge_flags(struct lan966x_port *port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & BR_MCAST_FLOOD)
+ lan966x_port_set_mcast_flood(port,
+ !!(flags.val & BR_MCAST_FLOOD));
+
+ if (flags.mask & BR_FLOOD)
+ lan966x_port_set_ucast_flood(port,
+ !!(flags.val & BR_FLOOD));
+
+ if (flags.mask & BR_BCAST_FLOOD)
+ lan966x_port_set_bcast_flood(port,
+ !!(flags.val & BR_BCAST_FLOOD));
+
+ if (flags.mask & BR_LEARNING)
+ lan966x_port_set_learning(port,
+ !!(flags.val & BR_LEARNING));
+}
+
+static int lan966x_port_pre_bridge_flags(struct lan966x_port *port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & ~(BR_MCAST_FLOOD | BR_FLOOD | BR_BCAST_FLOOD |
+ BR_LEARNING))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void lan966x_update_fwd_mask(struct lan966x *lan966x)
+{
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; i++) {
+ struct lan966x_port *port = lan966x->ports[i];
+ unsigned long mask = 0;
+
+ if (port && lan966x->bridge_fwd_mask & BIT(i))
+ mask = lan966x->bridge_fwd_mask & ~BIT(i);
+
+ mask |= BIT(CPU_PORT);
+
+ lan_wr(ANA_PGID_PGID_SET(mask),
+ lan966x, ANA_PGID(PGID_SRC + i));
+ }
+}
+
+static void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool learn_ena = false;
+
+ if ((state == BR_STATE_FORWARDING || state == BR_STATE_LEARNING) &&
+ port->learn_ena)
+ learn_ena = true;
+
+ if (state == BR_STATE_FORWARDING)
+ lan966x->bridge_fwd_mask |= BIT(port->chip_port);
+ else
+ lan966x->bridge_fwd_mask &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(learn_ena),
+ ANA_PORT_CFG_LEARN_ENA,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+
+ lan966x_update_fwd_mask(lan966x);
+}
+
+static void lan966x_port_ageing_set(struct lan966x_port *port,
+ unsigned long ageing_clock_t)
+{
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+ u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
+
+ lan966x_mac_set_ageing(port->lan966x, ageing_time);
+}
+
+static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int err = 0;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ lan966x_port_bridge_flags(port, attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ err = lan966x_port_pre_bridge_flags(port, attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ lan966x_port_stp_state_set(port, attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ lan966x_port_ageing_set(port, attr->u.ageing_time);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ lan966x_vlan_port_set_vlan_aware(port, attr->u.vlan_filtering);
+ lan966x_vlan_port_apply(port);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_port_bridge_join(struct lan966x_port *port,
+ struct net_device *bridge,
+ struct netlink_ext_ack *extack)
+{
+ struct switchdev_brport_flags flags = {0};
+ struct lan966x *lan966x = port->lan966x;
+ struct net_device *dev = port->dev;
+ int err;
+
+ if (!lan966x->bridge_mask) {
+ lan966x->bridge = bridge;
+ } else {
+ if (lan966x->bridge != bridge) {
+ NL_SET_ERR_MSG_MOD(extack, "Not allow to add port to different bridge");
+ return -ENODEV;
+ }
+ }
+
+ err = switchdev_bridge_port_offload(dev, dev, port,
+ &lan966x_switchdev_nb,
+ &lan966x_switchdev_blocking_nb,
+ false, extack);
+ if (err)
+ return err;
+
+ lan966x->bridge_mask |= BIT(port->chip_port);
+
+ flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = flags.mask;
+ lan966x_port_bridge_flags(port, flags);
+
+ return 0;
+}
+
+static void lan966x_port_bridge_leave(struct lan966x_port *port,
+ struct net_device *bridge)
+{
+ struct switchdev_brport_flags flags = {0};
+ struct lan966x *lan966x = port->lan966x;
+
+ flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = flags.mask & ~BR_LEARNING;
+ lan966x_port_bridge_flags(port, flags);
+
+ lan966x->bridge_mask &= ~BIT(port->chip_port);
+
+ if (!lan966x->bridge_mask)
+ lan966x->bridge = NULL;
+
+ /* Set the port back to host mode */
+ lan966x_vlan_port_set_vlan_aware(port, false);
+ lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
+ lan966x_vlan_port_apply(port);
+}
+
+static int lan966x_port_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct netlink_ext_ack *extack;
+ int err = 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ err = lan966x_port_bridge_join(port, info->upper_dev,
+ extack);
+ else
+ lan966x_port_bridge_leave(port, info->upper_dev);
+ }
+
+ return err;
+}
+
+static int lan966x_port_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ if (netif_is_bridge_master(info->upper_dev) && !info->linking)
+ switchdev_bridge_port_unoffload(port->dev, port,
+ &lan966x_switchdev_nb,
+ &lan966x_switchdev_blocking_nb);
+
+ return NOTIFY_DONE;
+}
+
+static int lan966x_foreign_bridging_check(struct net_device *bridge,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = NULL;
+ bool has_foreign = false;
+ struct net_device *dev;
+ struct list_head *iter;
+
+ if (!netif_is_bridge_master(bridge))
+ return 0;
+
+ netdev_for_each_lower_dev(bridge, dev, iter) {
+ if (lan966x_netdevice_check(dev)) {
+ struct lan966x_port *port = netdev_priv(dev);
+
+ if (lan966x) {
+ /* Bridge already has at least one port of a
+ * lan966x switch inside it, check that it's
+ * the same instance of the driver.
+ */
+ if (port->lan966x != lan966x) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Bridging between multiple lan966x switches disallowed");
+ return -EINVAL;
+ }
+ } else {
+ /* This is the first lan966x port inside this
+ * bridge
+ */
+ lan966x = port->lan966x;
+ }
+ } else {
+ has_foreign = true;
+ }
+
+ if (lan966x && has_foreign) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Bridging lan966x ports with foreign interfaces disallowed");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int lan966x_bridge_check(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ return lan966x_foreign_bridging_check(info->upper_dev,
+ info->info.extack);
+}
+
+static int lan966x_netdevice_port_event(struct net_device *dev,
+ struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ int err = 0;
+
+ if (!lan966x_netdevice_check(dev)) {
+ if (event == NETDEV_CHANGEUPPER)
+ return lan966x_bridge_check(dev, ptr);
+ return 0;
+ }
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ err = lan966x_port_prechangeupper(dev, ptr);
+ break;
+ case NETDEV_CHANGEUPPER:
+ err = lan966x_bridge_check(dev, ptr);
+ if (err)
+ return err;
+
+ err = lan966x_port_changeupper(dev, ptr);
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ int ret;
+
+ ret = lan966x_netdevice_port_event(dev, nb, event, ptr);
+
+ return notifier_from_errno(ret);
+}
+
+static bool lan966x_foreign_dev_check(const struct net_device *dev,
+ const struct net_device *foreign_dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ if (netif_is_bridge_master(foreign_dev))
+ if (lan966x->bridge != foreign_dev)
+ return true;
+
+ return false;
+}
+
+static int lan966x_switchdev_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_port_attr_set);
+ return notifier_from_errno(err);
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
+ lan966x_netdevice_check,
+ lan966x_foreign_dev_check,
+ lan966x_handle_fdb,
+ NULL);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int lan966x_handle_port_vlan_add(struct lan966x_port *port,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct lan966x *lan966x = port->lan966x;
+
+ /* When adding a port to a vlan, we get a callback for the port but
+ * also for the bridge. When get the callback for the bridge just bail
+ * out. Then when the bridge is added to the vlan, then we get a
+ * callback here but in this case the flags has set:
+ * BRIDGE_VLAN_INFO_BRENTRY. In this case it means that the CPU
+ * port is added to the vlan, so the broadcast frames and unicast frames
+ * with dmac of the bridge should be foward to CPU.
+ */
+ if (netif_is_bridge_master(obj->orig_dev) &&
+ !(v->flags & BRIDGE_VLAN_INFO_BRENTRY))
+ return 0;
+
+ if (!netif_is_bridge_master(obj->orig_dev))
+ lan966x_vlan_port_add_vlan(port, v->vid,
+ v->flags & BRIDGE_VLAN_INFO_PVID,
+ v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ else
+ lan966x_vlan_cpu_add_vlan(lan966x, v->vid);
+
+ return 0;
+}
+
+static int lan966x_handle_port_obj_add(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int err;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = lan966x_handle_port_vlan_add(port, obj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_handle_port_vlan_del(struct lan966x_port *port,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct lan966x *lan966x = port->lan966x;
+
+ if (!netif_is_bridge_master(obj->orig_dev))
+ lan966x_vlan_port_del_vlan(port, v->vid);
+ else
+ lan966x_vlan_cpu_del_vlan(lan966x, v->vid);
+
+ return 0;
+}
+
+static int lan966x_handle_port_obj_del(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int err;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = lan966x_handle_port_vlan_del(port, obj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_switchdev_blocking_event(struct notifier_block *nb,
+ unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_handle_port_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_handle_port_obj_del);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block lan966x_netdevice_nb __read_mostly = {
+ .notifier_call = lan966x_netdevice_event,
+};
+
+static struct notifier_block lan966x_switchdev_nb __read_mostly = {
+ .notifier_call = lan966x_switchdev_event,
+};
+
+static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = {
+ .notifier_call = lan966x_switchdev_blocking_event,
+};
+
+void lan966x_register_notifier_blocks(void)
+{
+ register_netdevice_notifier(&lan966x_netdevice_nb);
+ register_switchdev_notifier(&lan966x_switchdev_nb);
+ register_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
+}
+
+void lan966x_unregister_notifier_blocks(void)
+{
+ unregister_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
+ unregister_switchdev_notifier(&lan966x_switchdev_nb);
+ unregister_netdevice_notifier(&lan966x_netdevice_nb);
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
new file mode 100644
index 000000000000..057f48ddf22c
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+#define VLANACCESS_CMD_IDLE 0
+#define VLANACCESS_CMD_READ 1
+#define VLANACCESS_CMD_WRITE 2
+#define VLANACCESS_CMD_INIT 3
+
+static int lan966x_vlan_get_status(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, ANA_VLANACCESS);
+}
+
+static int lan966x_vlan_wait_for_completion(struct lan966x *lan966x)
+{
+ u32 val;
+
+ return readx_poll_timeout(lan966x_vlan_get_status,
+ lan966x, val,
+ (val & ANA_VLANACCESS_VLAN_TBL_CMD) ==
+ VLANACCESS_CMD_IDLE,
+ TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
+}
+
+static void lan966x_vlan_set_mask(struct lan966x *lan966x, u16 vid)
+{
+ u16 mask = lan966x->vlan_mask[vid];
+ bool cpu_dis;
+
+ cpu_dis = !(mask & BIT(CPU_PORT));
+
+ /* Set flags and the VID to configure */
+ lan_rmw(ANA_VLANTIDX_VLAN_PGID_CPU_DIS_SET(cpu_dis) |
+ ANA_VLANTIDX_V_INDEX_SET(vid),
+ ANA_VLANTIDX_VLAN_PGID_CPU_DIS |
+ ANA_VLANTIDX_V_INDEX,
+ lan966x, ANA_VLANTIDX);
+
+ /* Set the vlan port members mask */
+ lan_rmw(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_SET(mask),
+ ANA_VLAN_PORT_MASK_VLAN_PORT_MASK,
+ lan966x, ANA_VLAN_PORT_MASK);
+
+ /* Issue a write command */
+ lan_rmw(ANA_VLANACCESS_VLAN_TBL_CMD_SET(VLANACCESS_CMD_WRITE),
+ ANA_VLANACCESS_VLAN_TBL_CMD,
+ lan966x, ANA_VLANACCESS);
+
+ if (lan966x_vlan_wait_for_completion(lan966x))
+ dev_err(lan966x->dev, "Vlan set mask failed\n");
+}
+
+static void lan966x_vlan_port_add_vlan_mask(struct lan966x_port *port, u16 vid)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u8 p = port->chip_port;
+
+ lan966x->vlan_mask[vid] |= BIT(p);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static void lan966x_vlan_port_del_vlan_mask(struct lan966x_port *port, u16 vid)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u8 p = port->chip_port;
+
+ lan966x->vlan_mask[vid] &= ~BIT(p);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static bool lan966x_vlan_port_any_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ return !!(lan966x->vlan_mask[vid] & ~BIT(CPU_PORT));
+}
+
+static void lan966x_vlan_cpu_add_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ lan966x->vlan_mask[vid] |= BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static void lan966x_vlan_cpu_del_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ lan966x->vlan_mask[vid] &= ~BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static void lan966x_vlan_cpu_add_cpu_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ __set_bit(vid, lan966x->cpu_vlan_mask);
+}
+
+static void lan966x_vlan_cpu_del_cpu_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ __clear_bit(vid, lan966x->cpu_vlan_mask);
+}
+
+bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ return test_bit(vid, lan966x->cpu_vlan_mask);
+}
+
+static u16 lan966x_vlan_port_get_pvid(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ if (!(lan966x->bridge_mask & BIT(port->chip_port)))
+ return HOST_PVID;
+
+ return port->vlan_aware ? port->pvid : UNAWARE_PVID;
+}
+
+int lan966x_vlan_port_set_vid(struct lan966x_port *port, u16 vid,
+ bool pvid, bool untagged)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* Egress vlan classification */
+ if (untagged && port->vid != vid) {
+ if (port->vid) {
+ dev_err(lan966x->dev,
+ "Port already has a native VLAN: %d\n",
+ port->vid);
+ return -EBUSY;
+ }
+ port->vid = vid;
+ }
+
+ /* Default ingress vlan classification */
+ if (pvid)
+ port->pvid = vid;
+
+ return 0;
+}
+
+static void lan966x_vlan_port_remove_vid(struct lan966x_port *port, u16 vid)
+{
+ if (port->pvid == vid)
+ port->pvid = 0;
+
+ if (port->vid == vid)
+ port->vid = 0;
+}
+
+void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
+ bool vlan_aware)
+{
+ port->vlan_aware = vlan_aware;
+}
+
+void lan966x_vlan_port_apply(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u16 pvid;
+ u32 val;
+
+ pvid = lan966x_vlan_port_get_pvid(port);
+
+ /* Ingress clasification (ANA_PORT_VLAN_CFG) */
+ /* Default vlan to classify for untagged frames (may be zero) */
+ val = ANA_VLAN_CFG_VLAN_VID_SET(pvid);
+ if (port->vlan_aware)
+ val |= ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(1) |
+ ANA_VLAN_CFG_VLAN_POP_CNT_SET(1);
+
+ lan_rmw(val,
+ ANA_VLAN_CFG_VLAN_VID | ANA_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_VLAN_CFG_VLAN_POP_CNT,
+ lan966x, ANA_VLAN_CFG(port->chip_port));
+
+ /* Drop frames with multicast source address */
+ val = ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(1);
+ if (port->vlan_aware && !pvid)
+ /* If port is vlan-aware and tagged, drop untagged and priority
+ * tagged frames.
+ */
+ val |= ANA_DROP_CFG_DROP_UNTAGGED_ENA_SET(1) |
+ ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_SET(1) |
+ ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_SET(1);
+
+ lan_wr(val, lan966x, ANA_DROP_CFG(port->chip_port));
+
+ /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q */
+ val = REW_TAG_CFG_TAG_TPID_CFG_SET(0);
+ if (port->vlan_aware) {
+ if (port->vid)
+ /* Tag all frames except when VID == DEFAULT_VLAN */
+ val |= REW_TAG_CFG_TAG_CFG_SET(1);
+ else
+ val |= REW_TAG_CFG_TAG_CFG_SET(3);
+ }
+
+ /* Update only some bits in the register */
+ lan_rmw(val,
+ REW_TAG_CFG_TAG_TPID_CFG | REW_TAG_CFG_TAG_CFG,
+ lan966x, REW_TAG_CFG(port->chip_port));
+
+ /* Set default VLAN and tag type to 8021Q */
+ lan_rmw(REW_PORT_VLAN_CFG_PORT_TPID_SET(ETH_P_8021Q) |
+ REW_PORT_VLAN_CFG_PORT_VID_SET(port->vid),
+ REW_PORT_VLAN_CFG_PORT_TPID |
+ REW_PORT_VLAN_CFG_PORT_VID,
+ lan966x, REW_PORT_VLAN_CFG(port->chip_port));
+}
+
+void lan966x_vlan_port_add_vlan(struct lan966x_port *port,
+ u16 vid,
+ bool pvid,
+ bool untagged)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* If the CPU(br) is already part of the vlan then add the fdb
+ * entries in MAC table to copy the frames to the CPU(br).
+ * If the CPU(br) is not part of the vlan then it would
+ * just drop the frames.
+ */
+ if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, vid)) {
+ lan966x_vlan_cpu_add_vlan_mask(lan966x, vid);
+ lan966x_fdb_write_entries(lan966x, vid);
+ }
+
+ lan966x_vlan_port_set_vid(port, vid, pvid, untagged);
+ lan966x_vlan_port_add_vlan_mask(port, vid);
+ lan966x_vlan_port_apply(port);
+}
+
+void lan966x_vlan_port_del_vlan(struct lan966x_port *port, u16 vid)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ lan966x_vlan_port_remove_vid(port, vid);
+ lan966x_vlan_port_del_vlan_mask(port, vid);
+ lan966x_vlan_port_apply(port);
+
+ /* In case there are no other ports in vlan then remove the CPU from
+ * that vlan but still keep it in the mask because it may be needed
+ * again then another port gets added in that vlan
+ */
+ if (!lan966x_vlan_port_any_vlan_mask(lan966x, vid)) {
+ lan966x_vlan_cpu_del_vlan_mask(lan966x, vid);
+ lan966x_fdb_erase_entries(lan966x, vid);
+ }
+}
+
+void lan966x_vlan_cpu_add_vlan(struct lan966x *lan966x, u16 vid)
+{
+ /* Add an entry in the MAC table for the CPU
+ * Add the CPU part of the vlan only if there is another port in that
+ * vlan otherwise all the broadcast frames in that vlan will go to CPU
+ * even if none of the ports are in the vlan and then the CPU will just
+ * need to discard these frames. It is required to store this
+ * information so when a front port is added then it would add also the
+ * CPU port.
+ */
+ if (lan966x_vlan_port_any_vlan_mask(lan966x, vid))
+ lan966x_vlan_cpu_add_vlan_mask(lan966x, vid);
+
+ lan966x_vlan_cpu_add_cpu_vlan_mask(lan966x, vid);
+ lan966x_fdb_write_entries(lan966x, vid);
+}
+
+void lan966x_vlan_cpu_del_vlan(struct lan966x *lan966x, u16 vid)
+{
+ /* Remove the CPU part of the vlan */
+ lan966x_vlan_cpu_del_cpu_vlan_mask(lan966x, vid);
+ lan966x_vlan_cpu_del_vlan_mask(lan966x, vid);
+ lan966x_fdb_erase_entries(lan966x, vid);
+}
+
+void lan966x_vlan_init(struct lan966x *lan966x)
+{
+ u16 port, vid;
+
+ /* Clear VLAN table, by default all ports are members of all VLANS */
+ lan_rmw(ANA_VLANACCESS_VLAN_TBL_CMD_SET(VLANACCESS_CMD_INIT),
+ ANA_VLANACCESS_VLAN_TBL_CMD,
+ lan966x, ANA_VLANACCESS);
+ lan966x_vlan_wait_for_completion(lan966x);
+
+ for (vid = 1; vid < VLAN_N_VID; vid++) {
+ lan966x->vlan_mask[vid] = 0;
+ lan966x_vlan_set_mask(lan966x, vid);
+ }
+
+ /* Set all the ports + cpu to be part of HOST_PVID and UNAWARE_PVID */
+ lan966x->vlan_mask[HOST_PVID] =
+ GENMASK(lan966x->num_phys_ports - 1, 0) | BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, HOST_PVID);
+
+ lan966x->vlan_mask[UNAWARE_PVID] =
+ GENMASK(lan966x->num_phys_ports - 1, 0) | BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, UNAWARE_PVID);
+
+ lan966x_vlan_cpu_add_cpu_vlan_mask(lan966x, UNAWARE_PVID);
+
+ /* Configure the CPU port to be vlan aware */
+ lan_wr(ANA_VLAN_CFG_VLAN_VID_SET(0) |
+ ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(1) |
+ ANA_VLAN_CFG_VLAN_POP_CNT_SET(1),
+ lan966x, ANA_VLAN_CFG(CPU_PORT));
+
+ /* Set vlan ingress filter mask to all ports */
+ lan_wr(GENMASK(lan966x->num_phys_ports, 0),
+ lan966x, ANA_VLANMASK);
+
+ for (port = 0; port < lan966x->num_phys_ports; port++) {
+ lan_wr(0, lan966x, REW_PORT_VLAN_CFG(port));
+ lan_wr(0, lan966x, REW_TAG_CFG(port));
+ }
+}
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 58fce173f95b..beb9379424c0 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -303,7 +303,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
}
filter->action.police_ena = true;
- pol_ix = a->police.index + ocelot->vcap_pol.base;
+ pol_ix = a->hw_index + ocelot->vcap_pol.base;
pol_max = ocelot->vcap_pol.max;
if (ocelot->vcap_pol.max2 && pol_ix > pol_max) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 224089d04d98..f97eff5afd12 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1867,6 +1867,9 @@ nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *
void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
{
+ if (!netdev)
+ return -EOPNOTSUPP;
+
if (!nfp_fl_is_netdev_to_offload(netdev))
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 18a262ef17f6..aca3c3f0f0ad 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -174,11 +174,14 @@ struct stmmac_flow_entry {
/* Rx Frame Steering */
enum stmmac_rfs_type {
STMMAC_RFS_T_VLAN,
+ STMMAC_RFS_T_LLDP,
+ STMMAC_RFS_T_1588,
STMMAC_RFS_T_MAX,
};
struct stmmac_rfs_entry {
unsigned long cookie;
+ u16 etype;
int in_use;
int type;
int tc;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index d0a2b289f460..d61766eeac6d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -237,6 +237,8 @@ static int tc_rfs_init(struct stmmac_priv *priv)
int i;
priv->rfs_entries_max[STMMAC_RFS_T_VLAN] = 8;
+ priv->rfs_entries_max[STMMAC_RFS_T_LLDP] = 1;
+ priv->rfs_entries_max[STMMAC_RFS_T_1588] = 1;
for (i = 0; i < STMMAC_RFS_T_MAX; i++)
priv->rfs_entries_total += priv->rfs_entries_max[i];
@@ -451,6 +453,8 @@ static int tc_parse_flow_actions(struct stmmac_priv *priv,
return 0;
}
+#define ETHER_TYPE_FULL_MASK cpu_to_be16(~0)
+
static int tc_add_basic_flow(struct stmmac_priv *priv,
struct flow_cls_offload *cls,
struct stmmac_flow_entry *entry)
@@ -464,6 +468,7 @@ static int tc_add_basic_flow(struct stmmac_priv *priv,
return -EINVAL;
flow_rule_match_basic(rule, &match);
+
entry->ip_proto = match.key->ip_proto;
return 0;
}
@@ -724,6 +729,114 @@ static int tc_del_vlan_flow(struct stmmac_priv *priv,
return 0;
}
+static int tc_add_ethtype_flow(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls)
+{
+ struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
+ struct flow_match_basic match;
+
+ if (!entry) {
+ entry = tc_find_rfs(priv, cls, true);
+ if (!entry)
+ return -ENOENT;
+ }
+
+ /* Nothing to do here */
+ if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC))
+ return -EINVAL;
+
+ if (tc < 0) {
+ netdev_err(priv->dev, "Invalid traffic class\n");
+ return -EINVAL;
+ }
+
+ flow_rule_match_basic(rule, &match);
+
+ if (match.mask->n_proto) {
+ u16 etype = ntohs(match.key->n_proto);
+
+ if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
+ netdev_err(priv->dev, "Only full mask is supported for EthType filter");
+ return -EINVAL;
+ }
+ switch (etype) {
+ case ETH_P_LLDP:
+ if (priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP] >=
+ priv->rfs_entries_max[STMMAC_RFS_T_LLDP])
+ return -ENOENT;
+
+ entry->type = STMMAC_RFS_T_LLDP;
+ priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]++;
+
+ stmmac_rx_queue_routing(priv, priv->hw,
+ PACKET_DCBCPQ, tc);
+ break;
+ case ETH_P_1588:
+ if (priv->rfs_entries_cnt[STMMAC_RFS_T_1588] >=
+ priv->rfs_entries_max[STMMAC_RFS_T_1588])
+ return -ENOENT;
+
+ entry->type = STMMAC_RFS_T_1588;
+ priv->rfs_entries_cnt[STMMAC_RFS_T_1588]++;
+
+ stmmac_rx_queue_routing(priv, priv->hw,
+ PACKET_PTPQ, tc);
+ break;
+ default:
+ netdev_err(priv->dev, "EthType(0x%x) is not supported", etype);
+ return -EINVAL;
+ }
+
+ entry->in_use = true;
+ entry->cookie = cls->cookie;
+ entry->tc = tc;
+ entry->etype = etype;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int tc_del_ethtype_flow(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls)
+{
+ struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
+
+ if (!entry || !entry->in_use ||
+ entry->type < STMMAC_RFS_T_LLDP ||
+ entry->type > STMMAC_RFS_T_1588)
+ return -ENOENT;
+
+ switch (entry->etype) {
+ case ETH_P_LLDP:
+ stmmac_rx_queue_routing(priv, priv->hw,
+ PACKET_DCBCPQ, 0);
+ priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]--;
+ break;
+ case ETH_P_1588:
+ stmmac_rx_queue_routing(priv, priv->hw,
+ PACKET_PTPQ, 0);
+ priv->rfs_entries_cnt[STMMAC_RFS_T_1588]--;
+ break;
+ default:
+ netdev_err(priv->dev, "EthType(0x%x) is not supported",
+ entry->etype);
+ return -EINVAL;
+ }
+
+ entry->in_use = false;
+ entry->cookie = 0;
+ entry->tc = 0;
+ entry->etype = 0;
+ entry->type = 0;
+
+ return 0;
+}
+
static int tc_add_flow_cls(struct stmmac_priv *priv,
struct flow_cls_offload *cls)
{
@@ -733,6 +846,10 @@ static int tc_add_flow_cls(struct stmmac_priv *priv,
if (!ret)
return ret;
+ ret = tc_add_ethtype_flow(priv, cls);
+ if (!ret)
+ return ret;
+
return tc_add_vlan_flow(priv, cls);
}
@@ -745,6 +862,10 @@ static int tc_del_flow_cls(struct stmmac_priv *priv,
if (!ret)
return ret;
+ ret = tc_del_ethtype_flow(priv, cls);
+ if (!ret)
+ return ret;
+
return tc_del_vlan_flow(priv, cls);
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 44a24b99c894..c6a97fcca0e6 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -66,6 +66,19 @@
#define KSZ8081_LMD_SHORT_INDICATOR BIT(12)
#define KSZ8081_LMD_DELTA_TIME_MASK GENMASK(8, 0)
+/* Lan8814 general Interrupt control/status reg in GPHY specific block. */
+#define LAN8814_INTC 0x18
+#define LAN8814_INTS 0x1B
+
+#define LAN8814_INT_LINK_DOWN BIT(2)
+#define LAN8814_INT_LINK_UP BIT(0)
+#define LAN8814_INT_LINK (LAN8814_INT_LINK_UP |\
+ LAN8814_INT_LINK_DOWN)
+
+#define LAN8814_INTR_CTRL_REG 0x34
+#define LAN8814_INTR_CTRL_REG_POLARITY BIT(1)
+#define LAN8814_INTR_CTRL_REG_INTR_ENABLE BIT(0)
+
/* PHY Control 1 */
#define MII_KSZPHY_CTRL_1 0x1e
#define KSZ8081_CTRL1_MDIX_STAT BIT(4)
@@ -1620,6 +1633,58 @@ static int lan8804_config_init(struct phy_device *phydev)
return 0;
}
+static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, LAN8814_INTS);
+ if (irq_status < 0)
+ return IRQ_NONE;
+
+ if (!(irq_status & LAN8814_INT_LINK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
+static int lan8814_ack_interrupt(struct phy_device *phydev)
+{
+ /* bit[12..0] int status, which is a read and clear register. */
+ int rc;
+
+ rc = phy_read(phydev, LAN8814_INTS);
+
+ return (rc < 0) ? rc : 0;
+}
+
+static int lan8814_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ lanphy_write_page_reg(phydev, 4, LAN8814_INTR_CTRL_REG,
+ LAN8814_INTR_CTRL_REG_POLARITY |
+ LAN8814_INTR_CTRL_REG_INTR_ENABLE);
+
+ /* enable / disable interrupts */
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = lan8814_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ err = phy_write(phydev, LAN8814_INTC, LAN8814_INT_LINK);
+ } else {
+ err = phy_write(phydev, LAN8814_INTC, 0);
+ if (err)
+ return err;
+
+ err = lan8814_ack_interrupt(phydev);
+ }
+
+ return err;
+}
+
static struct phy_driver ksphy_driver[] = {
{
.phy_id = PHY_ID_KS8737,
@@ -1802,6 +1867,8 @@ static struct phy_driver ksphy_driver[] = {
.get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = kszphy_resume,
+ .config_intr = lan8814_config_intr,
+ .handle_interrupt = lan8814_handle_interrupt,
}, {
.phy_id = PHY_ID_LAN8804,
.phy_id_mask = MICREL_PHY_ID_MASK,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 4733fd7fb169..62c453a21e49 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -2613,6 +2613,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
if (ieee80211_is_beacon(hdr->frame_control) ||
ieee80211_is_probe_resp(hdr->frame_control)) {
struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ enum cfg80211_bss_frame_type ftype;
u8 *ies;
int ies_ch;
@@ -2623,9 +2624,14 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
ies = mgmt->u.beacon.variable;
+ if (ieee80211_is_beacon(mgmt->frame_control))
+ ftype = CFG80211_BSS_FTYPE_BEACON;
+ else
+ ftype = CFG80211_BSS_FTYPE_PRESP;
+
ies_ch = cfg80211_get_ies_channel_number(mgmt->u.beacon.variable,
skb_tail_pointer(skb) - ies,
- sband->band);
+ sband->band, ftype);
if (ies_ch > 0 && ies_ch != channel) {
ath10k_dbg(ar, ATH10K_DBG_MGMT,
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index ca68cf63dd27..198ade90b725 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -3,6 +3,8 @@
* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
*/
+#include <linux/vmalloc.h>
+
#include "debugfs.h"
#include "core.h"
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index 1f8a81987187..d6575feca5a2 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -2,6 +2,8 @@
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
+#include <linux/rtnetlink.h>
+
#include "core.h"
#include "debug.h"
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 88444fe6d1c6..1b76f4434c06 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -275,12 +275,12 @@ static void carl9170_tx_release(struct kref *ref)
if (WARN_ON_ONCE(!ar))
return;
- BUILD_BUG_ON(
- offsetof(struct ieee80211_tx_info, status.ack_signal) != 20);
-
- memset(&txinfo->status.ack_signal, 0,
- sizeof(struct ieee80211_tx_info) -
- offsetof(struct ieee80211_tx_info, status.ack_signal));
+ /*
+ * This does not call ieee80211_tx_info_clear_status() because
+ * carl9170_tx_fill_rateinfo() has filled the rate information
+ * before we get to this point.
+ */
+ memset_after(&txinfo->status, 0, rates);
if (atomic_read(&ar->tx_total_queued))
ar->tx_schedule = true;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 9ef89ff09105..dd58c8f9aa11 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -608,7 +608,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
.phy_cap_info[9] =
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
- IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED,
+ (IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED <<
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_POS),
.phy_cap_info[10] =
IEEE80211_HE_PHY_CAP10_HE_MU_M1RU_MAX_LTF,
},
@@ -665,7 +666,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242,
.phy_cap_info[9] =
- IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED,
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED
+ << IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_POS,
},
/*
* Set default Tx/Rx HE MCS NSS Support field.
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index efda37645fd4..65f4fe3ef504 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -2215,24 +2215,24 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
}
flags |= STA_CTXT_HE_PACKET_EXT;
- } else if ((sta->he_cap.he_cap_elem.phy_cap_info[9] &
- IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) !=
- IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED) {
+ } else if (u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9],
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK)
+ != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) {
int low_th = -1;
int high_th = -1;
/* Take the PPE thresholds from the nominal padding info */
- switch (sta->he_cap.he_cap_elem.phy_cap_info[9] &
- IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) {
- case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_0US:
+ switch (u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9],
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK)) {
+ case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US:
low_th = IWL_HE_PKT_EXT_NONE;
high_th = IWL_HE_PKT_EXT_NONE;
break;
- case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_8US:
+ case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US:
low_th = IWL_HE_PKT_EXT_BPSK;
high_th = IWL_HE_PKT_EXT_NONE;
break;
- case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US:
+ case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US:
low_th = IWL_HE_PKT_EXT_NONE;
high_th = IWL_HE_PKT_EXT_BPSK;
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 58d4d3bf6f86..87630d38dc52 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -5,6 +5,7 @@
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#include <linux/module.h>
+#include <linux/rtnetlink.h>
#include <linux/vmalloc.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
index f702ad85e609..78450366312b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
@@ -4,6 +4,7 @@
*/
#include "mvm.h"
#include <linux/nl80211-vnd-intel.h>
+#include <net/netlink.h>
static const struct nla_policy
iwl_mvm_vendor_attr_policy[NUM_IWL_MVM_VENDOR_ATTR] = {
diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c
index 873fea59894f..8414aa208655 100644
--- a/drivers/net/wireless/intersil/p54/txrx.c
+++ b/drivers/net/wireless/intersil/p54/txrx.c
@@ -431,11 +431,7 @@ static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb)
* Clear manually, ieee80211_tx_info_clear_status would
* clear the counts too and we need them.
*/
- memset(&info->status.ack_signal, 0,
- sizeof(struct ieee80211_tx_info) -
- offsetof(struct ieee80211_tx_info, status.ack_signal));
- BUILD_BUG_ON(offsetof(struct ieee80211_tx_info,
- status.ack_signal) != 20);
+ memset_after(&info->status, 0, rates);
if (entry_hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN))
pad = entry_data->align[0];
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 23219f3747f8..0307a6677907 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1276,7 +1276,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
/* If the queue contains MAX_QUEUE skb's drop some */
if (skb_queue_len(&data->pending) >= MAX_QUEUE) {
- /* Droping until WARN_QUEUE level */
+ /* Dropping until WARN_QUEUE level */
while (skb_queue_len(&data->pending) >= WARN_QUEUE) {
ieee80211_free_txskb(hw, skb_dequeue(&data->pending));
data->tx_dropped++;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index 4fa8e7ba93e6..d054cdecd5f7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -853,7 +853,8 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
mt7915_gen_ppe_thresh(he_cap->ppe_thres, nss);
} else {
he_cap_elem->phy_cap_info[9] |=
- IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US;
+ u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
}
idx++;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 3effe22f69c0..7a8d2596c226 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -152,7 +152,8 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
mt7921_gen_ppe_thresh(he_cap->ppe_thres, nss);
} else {
he_cap_elem->phy_cap_info[9] |=
- IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US;
+ u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
}
if (band == NL80211_BAND_6GHZ) {
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.h b/drivers/net/wireless/mediatek/mt76/testmode.h
index 5e2792d81576..0590c35c7126 100644
--- a/drivers/net/wireless/mediatek/mt76/testmode.h
+++ b/drivers/net/wireless/mediatek/mt76/testmode.h
@@ -7,6 +7,8 @@
#define MT76_TM_TIMEOUT 10
+#include <net/netlink.h>
+
/**
* enum mt76_testmode_attr - testmode attributes inside NL80211_ATTR_TESTDATA
*
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index 00ae86807dc2..a0737eea9f81 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright(c) 2019-2020 Realtek Corporation
*/
+#include <linux/ip.h>
+#include <linux/udp.h>
#include "coex.h"
#include "core.h"
@@ -2119,7 +2121,8 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
- IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US;
+ u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
if (i == NL80211_IFTYPE_STATION)
phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
he_cap->he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(mcs_map);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 9756d75ef24e..22bd1d03e722 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -2,6 +2,8 @@
/* Copyright(c) 2019-2020 Realtek Corporation
*/
+#include <linux/vmalloc.h>
+
#include "coex.h"
#include "debug.h"
#include "fw.h"
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index d4b59fbe7365..8a57b75b07c0 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -781,7 +781,7 @@ static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
if (!ppe_th) {
u8 pad;
- pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK,
+ pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK,
sta->he_cap.he_cap_elem.phy_cap_info[9]);
for (i = 0; i < RTW89_PPE_BW_NUM; i++)