summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c59x.c4
-rw-r--r--drivers/net/ethernet/3com/Kconfig5
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c2
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c5
-rw-r--r--drivers/net/ethernet/amd/sunlance.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c41
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mdio.c22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c69
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c876
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.h36
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h29
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c55
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c164
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c109
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h48
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h135
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h58
-rw-r--r--drivers/net/ethernet/arc/emac_main.c3
-rw-r--r--drivers/net/ethernet/broadcom/b44.c12
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c83
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c567
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h114
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c201
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h614
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c32
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c108
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c21
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c1
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c65
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c5
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c105
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c4
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c7
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c113
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c73
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c62
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h20
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c28
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/Kconfig4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c17
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c71
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c454
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h28
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c58
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c5
-rw-r--r--drivers/net/ethernet/freescale/fec.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c12
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c23
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c8
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c9
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h48
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c399
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c550
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h57
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h106
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c70
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c933
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h713
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c1556
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h79
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c1063
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h98
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c104
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c154
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c775
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h96
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c22
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c55
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c14
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c30
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.h10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c47
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.h6
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c9
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c7
-rw-r--r--drivers/net/ethernet/intel/Kconfig25
-rw-r--r--drivers/net/ethernet/intel/e100.c14
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c11
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c59
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c25
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c48
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c20
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c67
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h35
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c223
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c152
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c94
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c122
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c55
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c46
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c51
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c65
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c4
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c9
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h11
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c48
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c44
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c299
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c54
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c26
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c21
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c162
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h36
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h340
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h24
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c986
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h220
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c133
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c1410
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c1538
-rw-r--r--drivers/net/ethernet/marvell/skge.c14
-rw-r--r--drivers/net/ethernet/marvell/sky2.c20
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c162
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h110
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c169
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c634
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c222
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c231
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c112
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c483
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c766
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c981
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c1260
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c283
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c325
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c83
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c285
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c255
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h98
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c368
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c716
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h324
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c535
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h85
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c281
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c156
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c249
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c561
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c171
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h81
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c178
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c279
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c782
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c88
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c3
-rw-r--r--drivers/net/ethernet/neterion/Kconfig4
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile2
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/cls.c283
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/ctrl.c379
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.c363
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.h208
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/qdisc.c850
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c72
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c43
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c164
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h51
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c48
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c41
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h24
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c38
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c244
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c62
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c89
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h25
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c113
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c21
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c42
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c90
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c3
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c334
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c152
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c30
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c79
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c65
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h50
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c47
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h13
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c19
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c6
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c17
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c16
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h1
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c5
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c9
-rw-r--r--drivers/net/ethernet/realtek/r8169.c389
-rw-r--r--drivers/net/ethernet/renesas/ravb.h1
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c23
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c60
-rw-r--r--drivers/net/ethernet/sfc/ef10.c7
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c5
-rw-r--r--drivers/net/ethernet/smsc/Kconfig4
-rw-r--r--drivers/net/ethernet/socionext/netsec.c385
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c62
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c390
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c36
-rw-r--r--drivers/net/ethernet/sun/sunhme.c2
-rw-r--r--drivers/net/ethernet/ti/Kconfig3
-rw-r--r--drivers/net/ethernet/ti/cpmac.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c241
-rw-r--r--drivers/net/ethernet/ti/cpts.c32
-rw-r--r--drivers/net/ethernet/ti/cpts.h38
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c14
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c36
-rw-r--r--drivers/net/ethernet/ti/tlan.c4
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c29
380 files changed, 28710 insertions, 9214 deletions
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 5bc168314ea2..40f421dbdf57 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1151,7 +1151,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
print_info = (vortex_debug > 1);
if (print_info)
- pr_info("See Documentation/networking/vortex.txt\n");
+ pr_info("See Documentation/networking/device_drivers/3com/vortex.txt\n");
pr_info("%s: 3Com %s %s at %p.\n",
print_name,
@@ -1956,7 +1956,7 @@ vortex_error(struct net_device *dev, int status)
dev->name, tx_status);
if (tx_status == 0x82) {
pr_err("Probably a duplex mismatch. See "
- "Documentation/networking/vortex.txt\n");
+ "Documentation/networking/device_drivers/3com/vortex.txt\n");
}
dump_tx_ring(dev);
}
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index 5c3ef9fc8207..0ac44ef1f7a9 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -75,8 +75,9 @@ config VORTEX
"Hurricane" (3c555/3cSOHO) PCI
If you have such a card, say Y here. More specific information is in
- <file:Documentation/networking/vortex.txt> and in the comments at
- the beginning of <file:drivers/net/ethernet/3com/3c59x.c>.
+ <file:Documentation/networking/device_drivers/3com/vortex.txt> and
+ in the comments at the beginning of
+ <file:drivers/net/ethernet/3com/3c59x.c>.
To compile this support as a module, choose M here.
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 7c9348a26cbb..91fc64c1145e 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1283,7 +1283,7 @@ static int greth_mdio_probe(struct net_device *dev)
else
phy_set_max_speed(phy, SPEED_100);
- phy->advertising = phy->supported;
+ linkmode_copy(phy->advertising, phy->supported);
greth->link = 0;
greth->speed = 0;
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 7c1eb304c27e..e833d1b3fe18 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -940,11 +940,8 @@ static int au1000_open(struct net_device *dev)
return retval;
}
- if (dev->phydev) {
- /* cause the PHY state machine to schedule a link state check */
- dev->phydev->state = PHY_CHANGELINK;
+ if (dev->phydev)
phy_start(dev->phydev);
- }
netif_start_queue(dev);
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 9d4899826823..bd6589de93d9 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1488,9 +1488,9 @@ static int sunlance_sbus_probe(struct platform_device *op)
struct device_node *parent_dp = parent->dev.of_node;
int err;
- if (!strcmp(parent_dp->name, "ledma")) {
+ if (of_node_name_eq(parent_dp, "ledma")) {
err = sparc_lance_probe_one(op, parent, NULL);
- } else if (!strcmp(parent_dp->name, "lebuffer")) {
+ } else if (of_node_name_eq(parent_dp, "lebuffer")) {
err = sparc_lance_probe_one(op, NULL, parent);
} else
err = sparc_lance_probe_one(op, NULL, NULL);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 151bdb629e8a..128cd648ba99 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -857,6 +857,7 @@ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -878,9 +879,15 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
phy_write(phy_data->phydev, 0x04, 0x0d01);
phy_write(phy_data->phydev, 0x00, 0x9140);
- phy_data->phydev->supported = PHY_10BT_FEATURES |
- PHY_100BT_FEATURES |
- PHY_1000BT_FEATURES;
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ supported);
+ linkmode_set_bit_array(phy_gbit_features_array,
+ ARRAY_SIZE(phy_gbit_features_array),
+ supported);
+
+ linkmode_copy(phy_data->phydev->supported, supported);
+
phy_support_asym_pause(phy_data->phydev);
netif_dbg(pdata, drv, pdata->netdev,
@@ -891,6 +898,7 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -951,9 +959,13 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
reg = phy_read(phy_data->phydev, 0x00);
phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
- phy_data->phydev->supported = (PHY_10BT_FEATURES |
- PHY_100BT_FEATURES |
- PHY_1000BT_FEATURES);
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ supported);
+ linkmode_set_bit_array(phy_gbit_features_array,
+ ARRAY_SIZE(phy_gbit_features_array),
+ supported);
+ linkmode_copy(phy_data->phydev->supported, supported);
phy_support_asym_pause(phy_data->phydev);
netif_dbg(pdata, drv, pdata->netdev,
@@ -976,7 +988,6 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
struct ethtool_link_ksettings *lks = &pdata->phy.lks;
struct xgbe_phy_data *phy_data = pdata->phy_data;
struct phy_device *phydev;
- u32 advertising;
int ret;
/* If we already have a PHY, just return */
@@ -1036,9 +1047,8 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
xgbe_phy_external_phy_quirks(pdata);
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- lks->link_modes.advertising);
- phydev->advertising &= advertising;
+ linkmode_and(phydev->advertising, phydev->advertising,
+ lks->link_modes.advertising);
phy_start_aneg(phy_data->phydev);
@@ -1497,7 +1507,7 @@ static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata)
if (!phy_data->phydev)
return;
- lcl_adv = ethtool_adv_to_lcl_adv_t(phy_data->phydev->advertising);
+ lcl_adv = linkmode_adv_to_lcl_adv_t(phy_data->phydev->advertising);
if (phy_data->phydev->pause) {
XGBE_SET_LP_ADV(lks, Pause);
@@ -1815,7 +1825,6 @@ static int xgbe_phy_an_config(struct xgbe_prv_data *pdata)
{
struct ethtool_link_ksettings *lks = &pdata->phy.lks;
struct xgbe_phy_data *phy_data = pdata->phy_data;
- u32 advertising;
int ret;
ret = xgbe_phy_find_phy_device(pdata);
@@ -1825,12 +1834,10 @@ static int xgbe_phy_an_config(struct xgbe_prv_data *pdata)
if (!phy_data->phydev)
return 0;
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- lks->link_modes.advertising);
-
phy_data->phydev->autoneg = pdata->phy.autoneg;
- phy_data->phydev->advertising = phy_data->phydev->supported &
- advertising;
+ linkmode_and(phy_data->phydev->advertising,
+ phy_data->phydev->supported,
+ lks->link_modes.advertising);
if (pdata->phy.autoneg != AUTONEG_ENABLE) {
phy_data->phydev->speed = pdata->phy.speed;
diff --git a/drivers/net/ethernet/apm/xgene-v2/mdio.c b/drivers/net/ethernet/apm/xgene-v2/mdio.c
index f5fe3bb2e59d..53529cd85162 100644
--- a/drivers/net/ethernet/apm/xgene-v2/mdio.c
+++ b/drivers/net/ethernet/apm/xgene-v2/mdio.c
@@ -109,6 +109,7 @@ void xge_mdio_remove(struct net_device *ndev)
int xge_mdio_config(struct net_device *ndev)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct xge_pdata *pdata = netdev_priv(ndev);
struct device *dev = &pdata->pdev->dev;
struct mii_bus *mdio_bus;
@@ -148,16 +149,17 @@ int xge_mdio_config(struct net_device *ndev)
goto err;
}
- phydev->supported &= ~(SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Half |
- SUPPORTED_AUI |
- SUPPORTED_MII |
- SUPPORTED_FIBRE |
- SUPPORTED_BNC);
- phydev->advertising = phydev->supported;
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_AUI_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_BNC_BIT, mask);
+
+ linkmode_andnot(phydev->supported, phydev->supported, mask);
+ linkmode_copy(phydev->advertising, phydev->supported);
pdata->phy_speed = SPEED_UNKNOWN;
return 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index 686f6d8c9e79..4556630ee286 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -36,6 +36,7 @@ atlantic-objs := aq_main.o \
aq_ring.o \
aq_hw_utils.o \
aq_ethtool.o \
+ aq_filters.o \
hw_atl/hw_atl_a0.o \
hw_atl/hw_atl_b0.o \
hw_atl/hw_atl_utils.o \
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 91eb8910b1c9..3944ce7f0870 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -12,7 +12,7 @@
#ifndef AQ_CFG_H
#define AQ_CFG_H
-#define AQ_CFG_VECS_DEF 4U
+#define AQ_CFG_VECS_DEF 8U
#define AQ_CFG_TCS_DEF 1U
#define AQ_CFG_TXDS_DEF 4096U
@@ -42,8 +42,8 @@
#define AQ_CFG_IS_LRO_DEF 1U
/* RSS */
-#define AQ_CFG_RSS_INDIRECTION_TABLE_MAX 128U
-#define AQ_CFG_RSS_HASHKEY_SIZE 320U
+#define AQ_CFG_RSS_INDIRECTION_TABLE_MAX 64U
+#define AQ_CFG_RSS_HASHKEY_SIZE 40U
#define AQ_CFG_IS_RSS_DEF 1U
#define AQ_CFG_NUM_RSS_QUEUES_DEF AQ_CFG_VECS_DEF
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index becb578211ed..6b6d1724676e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -14,7 +14,7 @@
#include <linux/etherdevice.h>
#include <linux/pci.h>
-
+#include <linux/if_vlan.h>
#include "ver.h"
#include "aq_cfg.h"
#include "aq_utils.h"
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 99ef1daaa4d8..38e87eed76b9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -12,6 +12,7 @@
#include "aq_ethtool.h"
#include "aq_nic.h"
#include "aq_vec.h"
+#include "aq_filters.h"
static void aq_ethtool_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *p)
@@ -201,6 +202,41 @@ static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key,
return 0;
}
+static int aq_ethtool_set_rss(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(netdev);
+ struct aq_nic_cfg_s *cfg;
+ unsigned int i = 0U;
+ u32 rss_entries;
+ int err = 0;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+ rss_entries = cfg->aq_rss.indirection_table_size;
+
+ /* We do not allow change in unsupported parameters */
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+ /* Fill out the redirection table */
+ if (indir)
+ for (i = 0; i < rss_entries; i++)
+ cfg->aq_rss.indirection_table[i] = indir[i];
+
+ /* Fill out the rss hash key */
+ if (key) {
+ memcpy(cfg->aq_rss.hash_secret_key, key,
+ sizeof(cfg->aq_rss.hash_secret_key));
+ err = aq_nic->aq_hw_ops->hw_rss_hash_set(aq_nic->aq_hw,
+ &cfg->aq_rss);
+ if (err)
+ return err;
+ }
+
+ err = aq_nic->aq_hw_ops->hw_rss_set(aq_nic->aq_hw, &cfg->aq_rss);
+
+ return err;
+}
+
static int aq_ethtool_get_rxnfc(struct net_device *ndev,
struct ethtool_rxnfc *cmd,
u32 *rule_locs)
@@ -213,7 +249,36 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
case ETHTOOL_GRXRINGS:
cmd->data = cfg->vecs;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = aq_get_rxnfc_count_all_rules(aq_nic);
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = aq_get_rxnfc_rule(aq_nic, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ err = aq_get_rxnfc_all_rules(aq_nic, cmd, rule_locs);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int aq_ethtool_set_rxnfc(struct net_device *ndev,
+ struct ethtool_rxnfc *cmd)
+{
+ int err = 0;
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ err = aq_add_rxnfc_rule(aq_nic, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = aq_del_rxnfc_rule(aq_nic, cmd);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -495,7 +560,7 @@ static int aq_set_ringparam(struct net_device *ndev,
}
}
if (ndev_running)
- err = dev_open(ndev);
+ err = dev_open(ndev, NULL);
err_exit:
return err;
@@ -519,7 +584,9 @@ const struct ethtool_ops aq_ethtool_ops = {
.set_pauseparam = aq_ethtool_set_pauseparam,
.get_rxfh_key_size = aq_ethtool_get_rss_key_size,
.get_rxfh = aq_ethtool_get_rss,
+ .set_rxfh = aq_ethtool_set_rss,
.get_rxnfc = aq_ethtool_get_rxnfc,
+ .set_rxnfc = aq_ethtool_set_rxnfc,
.get_sset_count = aq_ethtool_get_sset_count,
.get_ethtool_stats = aq_ethtool_stats,
.get_link_ksettings = aq_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
new file mode 100644
index 000000000000..18bc035da850
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -0,0 +1,876 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File aq_filters.c: RX filters related functions. */
+
+#include "aq_filters.h"
+
+static bool __must_check
+aq_rule_is_approve(struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->flow_type & FLOW_MAC_EXT)
+ return false;
+
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case ETHER_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ return true;
+ case IP_USER_FLOW:
+ switch (fsp->h_u.usr_ip4_spec.proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_SCTP:
+ case IPPROTO_IP:
+ return true;
+ default:
+ return false;
+ }
+ case IPV6_USER_FLOW:
+ switch (fsp->h_u.usr_ip6_spec.l4_proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_SCTP:
+ case IPPROTO_IP:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+static bool __must_check
+aq_match_filter(struct ethtool_rx_flow_spec *fsp1,
+ struct ethtool_rx_flow_spec *fsp2)
+{
+ if (fsp1->flow_type != fsp2->flow_type ||
+ memcmp(&fsp1->h_u, &fsp2->h_u, sizeof(fsp2->h_u)) ||
+ memcmp(&fsp1->h_ext, &fsp2->h_ext, sizeof(fsp2->h_ext)) ||
+ memcmp(&fsp1->m_u, &fsp2->m_u, sizeof(fsp2->m_u)) ||
+ memcmp(&fsp1->m_ext, &fsp2->m_ext, sizeof(fsp2->m_ext)))
+ return false;
+
+ return true;
+}
+
+static bool __must_check
+aq_rule_already_exists(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct aq_rx_filter *rule;
+ struct hlist_node *aq_node2;
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (rule->aq_fsp.location == fsp->location)
+ continue;
+ if (aq_match_filter(&rule->aq_fsp, fsp)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: This filter is already set\n");
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int aq_check_approve_fl3l4(struct aq_nic_s *aq_nic,
+ struct aq_hw_rx_fltrs_s *rx_fltrs,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->location < AQ_RX_FIRST_LOC_FL3L4 ||
+ fsp->location > AQ_RX_LAST_LOC_FL3L4) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: location must be in range [%d, %d]",
+ AQ_RX_FIRST_LOC_FL3L4,
+ AQ_RX_LAST_LOC_FL3L4);
+ return -EINVAL;
+ }
+ if (rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv4) {
+ rx_fltrs->fl3l4.is_ipv6 = false;
+ netdev_err(aq_nic->ndev,
+ "ethtool: mixing ipv4 and ipv6 is not allowed");
+ return -EINVAL;
+ } else if (!rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv6) {
+ rx_fltrs->fl3l4.is_ipv6 = true;
+ netdev_err(aq_nic->ndev,
+ "ethtool: mixing ipv4 and ipv6 is not allowed");
+ return -EINVAL;
+ } else if (rx_fltrs->fl3l4.is_ipv6 &&
+ fsp->location != AQ_RX_FIRST_LOC_FL3L4 + 4 &&
+ fsp->location != AQ_RX_FIRST_LOC_FL3L4) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified location for ipv6 must be %d or %d",
+ AQ_RX_FIRST_LOC_FL3L4, AQ_RX_FIRST_LOC_FL3L4 + 4);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __must_check
+aq_check_approve_fl2(struct aq_nic_s *aq_nic,
+ struct aq_hw_rx_fltrs_s *rx_fltrs,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->location < AQ_RX_FIRST_LOC_FETHERT ||
+ fsp->location > AQ_RX_LAST_LOC_FETHERT) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: location must be in range [%d, %d]",
+ AQ_RX_FIRST_LOC_FETHERT,
+ AQ_RX_LAST_LOC_FETHERT);
+ return -EINVAL;
+ }
+
+ if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK &&
+ fsp->m_u.ether_spec.h_proto == 0U) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: proto (ether_type) parameter must be specified");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __must_check
+aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
+ struct aq_hw_rx_fltrs_s *rx_fltrs,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->location < AQ_RX_FIRST_LOC_FVLANID ||
+ fsp->location > AQ_RX_LAST_LOC_FVLANID) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: location must be in range [%d, %d]",
+ AQ_RX_FIRST_LOC_FVLANID,
+ AQ_RX_LAST_LOC_FVLANID);
+ return -EINVAL;
+ }
+
+ if ((aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci),
+ aq_nic->active_vlans))) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: unknown vlan-id specified");
+ return -EINVAL;
+ }
+
+ if (fsp->ring_cookie > aq_nic->aq_nic_cfg.num_rss_queues) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: queue number must be in range [0, %d]",
+ aq_nic->aq_nic_cfg.num_rss_queues - 1);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __must_check
+aq_check_filter(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ int err = 0;
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+
+ if (fsp->flow_type & FLOW_EXT) {
+ if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_VID_MASK) {
+ err = aq_check_approve_fvlan(aq_nic, rx_fltrs, fsp);
+ } else if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK) {
+ err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
+ } else {
+ netdev_err(aq_nic->ndev,
+ "ethtool: invalid vlan mask 0x%x specified",
+ be16_to_cpu(fsp->m_ext.vlan_tci));
+ err = -EINVAL;
+ }
+ } else {
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case ETHER_FLOW:
+ err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IPV4_FLOW:
+ case IP_USER_FLOW:
+ rx_fltrs->fl3l4.is_ipv6 = false;
+ err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_FLOW:
+ case IPV6_USER_FLOW:
+ rx_fltrs->fl3l4.is_ipv6 = true;
+ err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
+ break;
+ default:
+ netdev_err(aq_nic->ndev,
+ "ethtool: unknown flow-type specified");
+ err = -EINVAL;
+ }
+ }
+
+ return err;
+}
+
+static bool __must_check
+aq_rule_is_not_support(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ bool rule_is_not_support = false;
+
+ if (!(aq_nic->ndev->features & NETIF_F_NTUPLE)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: Please, to enable the RX flow control:\n"
+ "ethtool -K %s ntuple on\n", aq_nic->ndev->name);
+ rule_is_not_support = true;
+ } else if (!aq_rule_is_approve(fsp)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified flow type is not supported\n");
+ rule_is_not_support = true;
+ } else if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW &&
+ (fsp->h_u.tcp_ip4_spec.tos ||
+ fsp->h_u.tcp_ip6_spec.tclass)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified tos tclass are not supported\n");
+ rule_is_not_support = true;
+ } else if (fsp->flow_type & FLOW_MAC_EXT) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: MAC_EXT is not supported");
+ rule_is_not_support = true;
+ }
+
+ return rule_is_not_support;
+}
+
+static bool __must_check
+aq_rule_is_not_correct(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ bool rule_is_not_correct = false;
+
+ if (!aq_nic) {
+ rule_is_not_correct = true;
+ } else if (fsp->location > AQ_RX_MAX_RXNFC_LOC) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified number %u rule is invalid\n",
+ fsp->location);
+ rule_is_not_correct = true;
+ } else if (aq_check_filter(aq_nic, fsp)) {
+ rule_is_not_correct = true;
+ } else if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
+ if (fsp->ring_cookie >= aq_nic->aq_nic_cfg.num_rss_queues) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified action is invalid.\n"
+ "Maximum allowable value action is %u.\n",
+ aq_nic->aq_nic_cfg.num_rss_queues - 1);
+ rule_is_not_correct = true;
+ }
+ }
+
+ return rule_is_not_correct;
+}
+
+static int __must_check
+aq_check_rule(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ int err = 0;
+
+ if (aq_rule_is_not_correct(aq_nic, fsp))
+ err = -EINVAL;
+ else if (aq_rule_is_not_support(aq_nic, fsp))
+ err = -EOPNOTSUPP;
+ else if (aq_rule_already_exists(aq_nic, fsp))
+ err = -EEXIST;
+
+ return err;
+}
+
+static void aq_set_data_fl2(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr,
+ struct aq_rx_filter_l2 *data, bool add)
+{
+ const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
+
+ memset(data, 0, sizeof(*data));
+
+ data->location = fsp->location - AQ_RX_FIRST_LOC_FETHERT;
+
+ if (fsp->ring_cookie != RX_CLS_FLOW_DISC)
+ data->queue = fsp->ring_cookie;
+ else
+ data->queue = -1;
+
+ data->ethertype = be16_to_cpu(fsp->h_u.ether_spec.h_proto);
+ data->user_priority_en = be16_to_cpu(fsp->m_ext.vlan_tci)
+ == VLAN_PRIO_MASK;
+ data->user_priority = (be16_to_cpu(fsp->h_ext.vlan_tci)
+ & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+}
+
+static int aq_add_del_fether(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ struct aq_rx_filter_l2 data;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+
+ aq_set_data_fl2(aq_nic, aq_rx_fltr, &data, add);
+
+ if (unlikely(!aq_hw_ops->hw_filter_l2_set))
+ return -EOPNOTSUPP;
+ if (unlikely(!aq_hw_ops->hw_filter_l2_clear))
+ return -EOPNOTSUPP;
+
+ if (add)
+ return aq_hw_ops->hw_filter_l2_set(aq_hw, &data);
+ else
+ return aq_hw_ops->hw_filter_l2_clear(aq_hw, &data);
+}
+
+static bool aq_fvlan_is_busy(struct aq_rx_filter_vlan *aq_vlans, int vlan)
+{
+ int i;
+
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
+ if (aq_vlans[i].enable &&
+ aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED &&
+ aq_vlans[i].vlan_id == vlan) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* Function rebuilds array of vlan filters so that filters with assigned
+ * queue have a precedence over just vlans on the interface.
+ */
+static void aq_fvlan_rebuild(struct aq_nic_s *aq_nic,
+ unsigned long *active_vlans,
+ struct aq_rx_filter_vlan *aq_vlans)
+{
+ bool vlan_busy = false;
+ int vlan = -1;
+ int i;
+
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
+ if (aq_vlans[i].enable &&
+ aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED)
+ continue;
+ do {
+ vlan = find_next_bit(active_vlans,
+ VLAN_N_VID,
+ vlan + 1);
+ if (vlan == VLAN_N_VID) {
+ aq_vlans[i].enable = 0U;
+ aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
+ aq_vlans[i].vlan_id = 0;
+ continue;
+ }
+
+ vlan_busy = aq_fvlan_is_busy(aq_vlans, vlan);
+ if (!vlan_busy) {
+ aq_vlans[i].enable = 1U;
+ aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
+ aq_vlans[i].vlan_id = vlan;
+ }
+ } while (vlan_busy && vlan != VLAN_N_VID);
+ }
+}
+
+static int aq_set_data_fvlan(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr,
+ struct aq_rx_filter_vlan *aq_vlans, bool add)
+{
+ const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
+ int location = fsp->location - AQ_RX_FIRST_LOC_FVLANID;
+ int i;
+
+ memset(&aq_vlans[location], 0, sizeof(aq_vlans[location]));
+
+ if (!add)
+ return 0;
+
+ /* remove vlan if it was in table without queue assignment */
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
+ if (aq_vlans[i].vlan_id ==
+ (be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK)) {
+ aq_vlans[i].enable = false;
+ }
+ }
+
+ aq_vlans[location].location = location;
+ aq_vlans[location].vlan_id = be16_to_cpu(fsp->h_ext.vlan_tci)
+ & VLAN_VID_MASK;
+ aq_vlans[location].queue = fsp->ring_cookie & 0x1FU;
+ aq_vlans[location].enable = 1U;
+
+ return 0;
+}
+
+int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct aq_rx_filter *rule = NULL;
+ struct hlist_node *aq_node2;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id)
+ break;
+ }
+ if (rule && be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
+ struct ethtool_rxnfc cmd;
+
+ cmd.fs.location = rule->aq_fsp.location;
+ return aq_del_rxnfc_rule(aq_nic, &cmd);
+ }
+
+ return -ENOENT;
+}
+
+static int aq_add_del_fvlan(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
+ return -EOPNOTSUPP;
+
+ aq_set_data_fvlan(aq_nic,
+ aq_rx_fltr,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans,
+ add);
+
+ return aq_filters_vlans_update(aq_nic);
+}
+
+static int aq_set_data_fl3l4(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr,
+ struct aq_rx_filter_l3l4 *data, bool add)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
+
+ memset(data, 0, sizeof(*data));
+
+ data->is_ipv6 = rx_fltrs->fl3l4.is_ipv6;
+ data->location = HW_ATL_GET_REG_LOCATION_FL3L4(fsp->location);
+
+ if (!add) {
+ if (!data->is_ipv6)
+ rx_fltrs->fl3l4.active_ipv4 &= ~BIT(data->location);
+ else
+ rx_fltrs->fl3l4.active_ipv6 &=
+ ~BIT((data->location) / 4);
+
+ return 0;
+ }
+
+ data->cmd |= HW_ATL_RX_ENABLE_FLTR_L3L4;
+
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ data->cmd |= HW_ATL_RX_UDP;
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ data->cmd |= HW_ATL_RX_SCTP;
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
+ break;
+ default:
+ break;
+ }
+
+ if (!data->is_ipv6) {
+ data->ip_src[0] =
+ ntohl(fsp->h_u.tcp_ip4_spec.ip4src);
+ data->ip_dst[0] =
+ ntohl(fsp->h_u.tcp_ip4_spec.ip4dst);
+ rx_fltrs->fl3l4.active_ipv4 |= BIT(data->location);
+ } else {
+ int i;
+
+ rx_fltrs->fl3l4.active_ipv6 |= BIT((data->location) / 4);
+ for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
+ data->ip_dst[i] =
+ ntohl(fsp->h_u.tcp_ip6_spec.ip6dst[i]);
+ data->ip_src[i] =
+ ntohl(fsp->h_u.tcp_ip6_spec.ip6src[i]);
+ }
+ data->cmd |= HW_ATL_RX_ENABLE_L3_IPV6;
+ }
+ if (fsp->flow_type != IP_USER_FLOW &&
+ fsp->flow_type != IPV6_USER_FLOW) {
+ if (!data->is_ipv6) {
+ data->p_dst =
+ ntohs(fsp->h_u.tcp_ip4_spec.pdst);
+ data->p_src =
+ ntohs(fsp->h_u.tcp_ip4_spec.psrc);
+ } else {
+ data->p_dst =
+ ntohs(fsp->h_u.tcp_ip6_spec.pdst);
+ data->p_src =
+ ntohs(fsp->h_u.tcp_ip6_spec.psrc);
+ }
+ }
+ if (data->ip_src[0] && !data->is_ipv6)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3;
+ if (data->ip_dst[0] && !data->is_ipv6)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3;
+ if (data->p_dst)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4;
+ if (data->p_src)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4;
+ if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
+ data->cmd |= HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT;
+ data->cmd |= fsp->ring_cookie << HW_ATL_RX_QUEUE_FL3L4_SHIFT;
+ data->cmd |= HW_ATL_RX_ENABLE_QUEUE_L3L4;
+ } else {
+ data->cmd |= HW_ATL_RX_DISCARD << HW_ATL_RX_ACTION_FL3F4_SHIFT;
+ }
+
+ return 0;
+}
+
+static int aq_set_fl3l4(struct aq_hw_s *aq_hw,
+ const struct aq_hw_ops *aq_hw_ops,
+ struct aq_rx_filter_l3l4 *data)
+{
+ if (unlikely(!aq_hw_ops->hw_filter_l3l4_set))
+ return -EOPNOTSUPP;
+
+ return aq_hw_ops->hw_filter_l3l4_set(aq_hw, data);
+}
+
+static int aq_add_del_fl3l4(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ struct aq_rx_filter_l3l4 data;
+
+ if (unlikely(aq_rx_fltr->aq_fsp.location < AQ_RX_FIRST_LOC_FL3L4 ||
+ aq_rx_fltr->aq_fsp.location > AQ_RX_LAST_LOC_FL3L4 ||
+ aq_set_data_fl3l4(aq_nic, aq_rx_fltr, &data, add)))
+ return -EINVAL;
+
+ return aq_set_fl3l4(aq_hw, aq_hw_ops, &data);
+}
+
+static int aq_add_del_rule(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ int err = -EINVAL;
+
+ if (aq_rx_fltr->aq_fsp.flow_type & FLOW_EXT) {
+ if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
+ == VLAN_VID_MASK) {
+ aq_rx_fltr->type = aq_rx_filter_vlan;
+ err = aq_add_del_fvlan(aq_nic, aq_rx_fltr, add);
+ } else if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
+ == VLAN_PRIO_MASK) {
+ aq_rx_fltr->type = aq_rx_filter_ethertype;
+ err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
+ }
+ } else {
+ switch (aq_rx_fltr->aq_fsp.flow_type & ~FLOW_EXT) {
+ case ETHER_FLOW:
+ aq_rx_fltr->type = aq_rx_filter_ethertype;
+ err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IP_USER_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_USER_FLOW:
+ aq_rx_fltr->type = aq_rx_filter_l3l4;
+ err = aq_add_del_fl3l4(aq_nic, aq_rx_fltr, add);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int aq_update_table_filters(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, u16 index,
+ struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct aq_rx_filter *rule = NULL, *parent = NULL;
+ struct hlist_node *aq_node2;
+ int err = -EINVAL;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (rule->aq_fsp.location >= index)
+ break;
+ parent = rule;
+ }
+
+ if (rule && rule->aq_fsp.location == index) {
+ err = aq_add_del_rule(aq_nic, rule, false);
+ hlist_del(&rule->aq_node);
+ kfree(rule);
+ --rx_fltrs->active_filters;
+ }
+
+ if (unlikely(!aq_rx_fltr))
+ return err;
+
+ INIT_HLIST_NODE(&aq_rx_fltr->aq_node);
+
+ if (parent)
+ hlist_add_behind(&aq_rx_fltr->aq_node, &parent->aq_node);
+ else
+ hlist_add_head(&aq_rx_fltr->aq_node, &rx_fltrs->filter_list);
+
+ ++rx_fltrs->active_filters;
+
+ return 0;
+}
+
+u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+
+ return rx_fltrs->active_filters;
+}
+
+struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic)
+{
+ return &aq_nic->aq_hw_rx_fltrs;
+}
+
+int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct aq_rx_filter *aq_rx_fltr;
+ int err = 0;
+
+ err = aq_check_rule(aq_nic, fsp);
+ if (err)
+ goto err_exit;
+
+ aq_rx_fltr = kzalloc(sizeof(*aq_rx_fltr), GFP_KERNEL);
+ if (unlikely(!aq_rx_fltr)) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ memcpy(&aq_rx_fltr->aq_fsp, fsp, sizeof(*fsp));
+
+ err = aq_update_table_filters(aq_nic, aq_rx_fltr, fsp->location, NULL);
+ if (unlikely(err))
+ goto err_free;
+
+ err = aq_add_del_rule(aq_nic, aq_rx_fltr, true);
+ if (unlikely(err)) {
+ hlist_del(&aq_rx_fltr->aq_node);
+ --rx_fltrs->active_filters;
+ goto err_free;
+ }
+
+ return 0;
+
+err_free:
+ kfree(aq_rx_fltr);
+err_exit:
+ return err;
+}
+
+int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct aq_rx_filter *rule = NULL;
+ struct hlist_node *aq_node2;
+ int err = -EINVAL;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (rule->aq_fsp.location == cmd->fs.location)
+ break;
+ }
+
+ if (rule && rule->aq_fsp.location == cmd->fs.location) {
+ err = aq_add_del_rule(aq_nic, rule, false);
+ hlist_del(&rule->aq_node);
+ kfree(rule);
+ --rx_fltrs->active_filters;
+ }
+ return err;
+}
+
+int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct aq_rx_filter *rule = NULL;
+ struct hlist_node *aq_node2;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node)
+ if (fsp->location <= rule->aq_fsp.location)
+ break;
+
+ if (unlikely(!rule || fsp->location != rule->aq_fsp.location))
+ return -EINVAL;
+
+ memcpy(fsp, &rule->aq_fsp, sizeof(*fsp));
+
+ return 0;
+}
+
+int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct hlist_node *aq_node2;
+ struct aq_rx_filter *rule;
+ int count = 0;
+
+ cmd->data = aq_get_rxnfc_count_all_rules(aq_nic);
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (unlikely(count == cmd->rule_cnt))
+ return -EMSGSIZE;
+
+ rule_locs[count++] = rule->aq_fsp.location;
+ }
+
+ cmd->rule_cnt = count;
+
+ return 0;
+}
+
+int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct hlist_node *aq_node2;
+ struct aq_rx_filter *rule;
+ int err = 0;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ err = aq_add_del_rule(aq_nic, rule, false);
+ if (err)
+ goto err_exit;
+ hlist_del(&rule->aq_node);
+ kfree(rule);
+ --rx_fltrs->active_filters;
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct hlist_node *aq_node2;
+ struct aq_rx_filter *rule;
+ int err = 0;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ err = aq_add_del_rule(aq_nic, rule, true);
+ if (err)
+ goto err_exit;
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ int hweight = 0;
+ int err = 0;
+ int i;
+
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
+ return -EOPNOTSUPP;
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
+ return -EOPNOTSUPP;
+
+ aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
+
+ if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ for (i = 0; i < BITS_TO_LONGS(VLAN_N_VID); i++)
+ hweight += hweight_long(aq_nic->active_vlans[i]);
+
+ err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
+ if (err)
+ return err;
+ }
+
+ err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
+ );
+ if (err)
+ return err;
+
+ if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ if (hweight < AQ_VLAN_MAX_FILTERS)
+ err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, true);
+ /* otherwise left in promiscue mode */
+ }
+
+ return err;
+}
+
+int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ int err = 0;
+
+ memset(aq_nic->active_vlans, 0, sizeof(aq_nic->active_vlans));
+ aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
+
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
+ return -EOPNOTSUPP;
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
+ return -EOPNOTSUPP;
+
+ err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
+ if (err)
+ return err;
+ err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
+ );
+ return err;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.h b/drivers/net/ethernet/aquantia/atlantic/aq_filters.h
new file mode 100644
index 000000000000..c6a08c6585d5
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File aq_filters.h: RX filters related functions. */
+
+#ifndef AQ_FILTERS_H
+#define AQ_FILTERS_H
+
+#include "aq_nic.h"
+
+enum aq_rx_filter_type {
+ aq_rx_filter_ethertype,
+ aq_rx_filter_vlan,
+ aq_rx_filter_l3l4
+};
+
+struct aq_rx_filter {
+ struct hlist_node aq_node;
+ enum aq_rx_filter_type type;
+ struct ethtool_rx_flow_spec aq_fsp;
+};
+
+u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic);
+struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic);
+int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd);
+int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd);
+int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd);
+int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs);
+int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id);
+int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic);
+int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic);
+int aq_filters_vlans_update(struct aq_nic_s *aq_nic);
+int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic);
+
+#endif /* AQ_FILTERS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index a1e70da358ca..81aab73dc22f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -18,6 +18,17 @@
#include "aq_rss.h"
#include "hw_atl/hw_atl_utils.h"
+#define AQ_RX_FIRST_LOC_FVLANID 0U
+#define AQ_RX_LAST_LOC_FVLANID 15U
+#define AQ_RX_FIRST_LOC_FETHERT 16U
+#define AQ_RX_LAST_LOC_FETHERT 31U
+#define AQ_RX_FIRST_LOC_FL3L4 32U
+#define AQ_RX_LAST_LOC_FL3L4 39U
+#define AQ_RX_MAX_RXNFC_LOC AQ_RX_LAST_LOC_FL3L4
+#define AQ_VLAN_MAX_FILTERS \
+ (AQ_RX_LAST_LOC_FVLANID - AQ_RX_FIRST_LOC_FVLANID + 1U)
+#define AQ_RX_QUEUE_NOT_ASSIGNED 0xFFU
+
/* NIC H/W capabilities */
struct aq_hw_caps_s {
u64 hw_features;
@@ -130,6 +141,7 @@ struct aq_hw_s {
struct aq_ring_s;
struct aq_ring_param_s;
struct sk_buff;
+struct aq_rx_filter_l3l4;
struct aq_hw_ops {
@@ -183,6 +195,23 @@ struct aq_hw_ops {
int (*hw_packet_filter_set)(struct aq_hw_s *self,
unsigned int packet_filter);
+ int (*hw_filter_l3l4_set)(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data);
+
+ int (*hw_filter_l3l4_clear)(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data);
+
+ int (*hw_filter_l2_set)(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data);
+
+ int (*hw_filter_l2_clear)(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data);
+
+ int (*hw_filter_vlan_set)(struct aq_hw_s *self,
+ struct aq_rx_filter_vlan *aq_vlans);
+
+ int (*hw_filter_vlan_ctrl)(struct aq_hw_s *self, bool enable);
+
int (*hw_multicast_list_set)(struct aq_hw_s *self,
u8 ar_mac[AQ_HW_MULTICAST_ADDRESS_MAX]
[ETH_ALEN],
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 7c07eef275eb..2a11c1eefd8f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -13,6 +13,7 @@
#include "aq_nic.h"
#include "aq_pci_func.h"
#include "aq_ethtool.h"
+#include "aq_filters.h"
#include <linux/netdevice.h>
#include <linux/module.h>
@@ -49,6 +50,11 @@ static int aq_ndev_open(struct net_device *ndev)
err = aq_nic_init(aq_nic);
if (err < 0)
goto err_exit;
+
+ err = aq_reapply_rxnfc_all_rules(aq_nic);
+ if (err < 0)
+ goto err_exit;
+
err = aq_nic_start(aq_nic);
if (err < 0)
goto err_exit;
@@ -101,6 +107,21 @@ static int aq_ndev_set_features(struct net_device *ndev,
bool is_lro = false;
int err = 0;
+ if (!(features & NETIF_F_NTUPLE)) {
+ if (aq_nic->ndev->features & NETIF_F_NTUPLE) {
+ err = aq_clear_rxnfc_all_rules(aq_nic);
+ if (unlikely(err))
+ goto err_exit;
+ }
+ }
+ if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
+ if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ err = aq_filters_vlan_offload_off(aq_nic);
+ if (unlikely(err))
+ goto err_exit;
+ }
+ }
+
aq_cfg->features = features;
if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) {
@@ -119,6 +140,7 @@ static int aq_ndev_set_features(struct net_device *ndev,
err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw,
aq_cfg);
+err_exit:
return err;
}
@@ -147,6 +169,35 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
aq_nic_set_multicast_list(aq_nic, ndev);
}
+static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
+ u16 vid)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
+ return -EOPNOTSUPP;
+
+ set_bit(vid, aq_nic->active_vlans);
+
+ return aq_filters_vlans_update(aq_nic);
+}
+
+static int aq_ndo_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto,
+ u16 vid)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
+ return -EOPNOTSUPP;
+
+ clear_bit(vid, aq_nic->active_vlans);
+
+ if (-ENOENT == aq_del_fvlan_by_vlan(aq_nic, vid))
+ return aq_filters_vlans_update(aq_nic);
+
+ return 0;
+}
+
static const struct net_device_ops aq_ndev_ops = {
.ndo_open = aq_ndev_open,
.ndo_stop = aq_ndev_close,
@@ -154,5 +205,7 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_set_rx_mode = aq_ndev_set_multicast_settings,
.ndo_change_mtu = aq_ndev_change_mtu,
.ndo_set_mac_address = aq_ndev_set_mac_address,
- .ndo_set_features = aq_ndev_set_features
+ .ndo_set_features = aq_ndev_set_features,
+ .ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 7abdc0952425..0147c037ca96 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -44,7 +44,7 @@ static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
struct aq_rss_parameters *rss_params = &cfg->aq_rss;
int i = 0;
- static u8 rss_key[40] = {
+ static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = {
0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
@@ -84,10 +84,6 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->is_lro = AQ_CFG_IS_LRO_DEF;
- cfg->vlan_id = 0U;
-
- aq_nic_rss_init(self, cfg->num_rss_queues);
-
/*descriptors */
cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
@@ -108,6 +104,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
+ aq_nic_rss_init(self, cfg->num_rss_queues);
+
cfg->irq_type = aq_pci_func_get_irq_type(self);
if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 44ec47a3d60a..8e34c1e49bf2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -35,7 +35,6 @@ struct aq_nic_cfg_s {
u32 mtu;
u32 flow_control;
u32 link_speed_msk;
- u32 vlan_id;
u32 wol;
u16 is_mc_list_enabled;
u16 mc_list_count;
@@ -61,6 +60,23 @@ struct aq_nic_cfg_s {
#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
+struct aq_hw_rx_fl2 {
+ struct aq_rx_filter_vlan aq_vlans[AQ_VLAN_MAX_FILTERS];
+};
+
+struct aq_hw_rx_fl3l4 {
+ u8 active_ipv4;
+ u8 active_ipv6:2;
+ u8 is_ipv6;
+};
+
+struct aq_hw_rx_fltrs_s {
+ struct hlist_head filter_list;
+ u16 active_filters;
+ struct aq_hw_rx_fl2 fl2;
+ struct aq_hw_rx_fl3l4 fl3l4;
+};
+
struct aq_nic_s {
atomic_t flags;
struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
@@ -81,10 +97,13 @@ struct aq_nic_s {
u32 count;
u8 ar[AQ_HW_MULTICAST_ADDRESS_MAX][ETH_ALEN];
} mc_list;
+ /* Bitmask of currently assigned vlans from linux */
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct pci_dev *pdev;
unsigned int msix_entry_mask;
u32 irqvecs;
+ struct aq_hw_rx_fltrs_s aq_hw_rx_fltrs;
};
static inline struct device *aq_nic_get_dev(struct aq_nic_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 1d5d6b8df855..c8b44cdb91c1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -19,6 +19,7 @@
#include "aq_pci_func.h"
#include "hw_atl/hw_atl_a0.h"
#include "hw_atl/hw_atl_b0.h"
+#include "aq_filters.h"
static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
@@ -309,6 +310,7 @@ static void aq_pci_remove(struct pci_dev *pdev)
struct aq_nic_s *self = pci_get_drvdata(pdev);
if (self->ndev) {
+ aq_clear_rxnfc_all_rules(self);
if (self->ndev->reg_state == NETREG_REGISTERED)
unregister_netdev(self->ndev);
aq_nic_free_vectors(self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index a7e853fa43c2..b58ca7cb8e9d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -21,7 +21,7 @@
#define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
.is_64_dma = true, \
- .msix_irqs = 4U, \
+ .msix_irqs = 8U, \
.irq_mask = ~0U, \
.vecs = HW_ATL_B0_RSS_MAX, \
.tcs = HW_ATL_B0_TC_MAX, \
@@ -41,7 +41,9 @@
NETIF_F_RXHASH | \
NETIF_F_SG | \
NETIF_F_TSO | \
- NETIF_F_LRO, \
+ NETIF_F_LRO | \
+ NETIF_F_NTUPLE | \
+ NETIF_F_HW_VLAN_CTAG_FILTER, \
.hw_priv_flags = IFF_UNICAST_FLT, \
.flow_control = true, \
.mtu = HW_ATL_B0_MTU_JUMBO, \
@@ -319,20 +321,11 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
- if (cfg->vlan_id) {
- hw_atl_rpf_vlan_flr_act_set(self, 1U, 0U);
- hw_atl_rpf_vlan_id_flr_set(self, 0U, 0U);
- hw_atl_rpf_vlan_flr_en_set(self, 0U, 0U);
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
- hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
- hw_atl_rpf_vlan_untagged_act_set(self, 1U);
-
- hw_atl_rpf_vlan_flr_act_set(self, 1U, 1U);
- hw_atl_rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
- hw_atl_rpf_vlan_flr_en_set(self, 1U, 1U);
- } else {
- hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
- }
+ // Always accept untagged packets
+ hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
+ hw_atl_rpf_vlan_untagged_act_set(self, 1U);
/* Rx Interrupts */
hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
@@ -945,6 +938,142 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
+static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data)
+{
+ u8 location = data->location;
+
+ if (!data->is_ipv6) {
+ hw_atl_rpfl3l4_cmd_clear(self, location);
+ hw_atl_rpf_l4_spd_set(self, 0U, location);
+ hw_atl_rpf_l4_dpd_set(self, 0U, location);
+ hw_atl_rpfl3l4_ipv4_src_addr_clear(self, location);
+ hw_atl_rpfl3l4_ipv4_dest_addr_clear(self, location);
+ } else {
+ int i;
+
+ for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
+ hw_atl_rpfl3l4_cmd_clear(self, location + i);
+ hw_atl_rpf_l4_spd_set(self, 0U, location + i);
+ hw_atl_rpf_l4_dpd_set(self, 0U, location + i);
+ }
+ hw_atl_rpfl3l4_ipv6_src_addr_clear(self, location);
+ hw_atl_rpfl3l4_ipv6_dest_addr_clear(self, location);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data)
+{
+ u8 location = data->location;
+
+ hw_atl_b0_hw_fl3l4_clear(self, data);
+
+ if (data->cmd) {
+ if (!data->is_ipv6) {
+ hw_atl_rpfl3l4_ipv4_dest_addr_set(self,
+ location,
+ data->ip_dst[0]);
+ hw_atl_rpfl3l4_ipv4_src_addr_set(self,
+ location,
+ data->ip_src[0]);
+ } else {
+ hw_atl_rpfl3l4_ipv6_dest_addr_set(self,
+ location,
+ data->ip_dst);
+ hw_atl_rpfl3l4_ipv6_src_addr_set(self,
+ location,
+ data->ip_src);
+ }
+ }
+ hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
+ hw_atl_rpf_l4_spd_set(self, data->p_src, location);
+ hw_atl_rpfl3l4_cmd_set(self, location, data->cmd);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_fl2_set(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data)
+{
+ hw_atl_rpf_etht_flr_en_set(self, 1U, data->location);
+ hw_atl_rpf_etht_flr_set(self, data->ethertype, data->location);
+ hw_atl_rpf_etht_user_priority_en_set(self,
+ !!data->user_priority_en,
+ data->location);
+ if (data->user_priority_en)
+ hw_atl_rpf_etht_user_priority_set(self,
+ data->user_priority,
+ data->location);
+
+ if (data->queue < 0) {
+ hw_atl_rpf_etht_flr_act_set(self, 0U, data->location);
+ hw_atl_rpf_etht_rx_queue_en_set(self, 0U, data->location);
+ } else {
+ hw_atl_rpf_etht_flr_act_set(self, 1U, data->location);
+ hw_atl_rpf_etht_rx_queue_en_set(self, 1U, data->location);
+ hw_atl_rpf_etht_rx_queue_set(self, data->queue, data->location);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_fl2_clear(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data)
+{
+ hw_atl_rpf_etht_flr_en_set(self, 0U, data->location);
+ hw_atl_rpf_etht_flr_set(self, 0U, data->location);
+ hw_atl_rpf_etht_user_priority_en_set(self, 0U, data->location);
+
+ return aq_hw_err_from_flags(self);
+}
+
+/**
+ * @brief Set VLAN filter table
+ * @details Configure VLAN filter table to accept (and assign the queue) traffic
+ * for the particular vlan ids.
+ * Note: use this function under vlan promisc mode not to lost the traffic
+ *
+ * @param aq_hw_s
+ * @param aq_rx_filter_vlan VLAN filter configuration
+ * @return 0 - OK, <0 - error
+ */
+static int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
+ struct aq_rx_filter_vlan *aq_vlans)
+{
+ int i;
+
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; i++) {
+ hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
+ hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
+ if (aq_vlans[i].enable) {
+ hw_atl_rpf_vlan_id_flr_set(self,
+ aq_vlans[i].vlan_id,
+ i);
+ hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
+ hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
+ if (aq_vlans[i].queue != 0xFF) {
+ hw_atl_rpf_vlan_rxq_flr_set(self,
+ aq_vlans[i].queue,
+ i);
+ hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
+ }
+ }
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
+{
+ /* set promisc in case of disabing the vland filter */
+ hw_atl_rpf_vlan_prom_mode_en_set(self, !!!enable);
+
+ return aq_hw_err_from_flags(self);
+}
+
const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
.hw_init = hw_atl_b0_hw_init,
@@ -969,6 +1098,11 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_ring_rx_init = hw_atl_b0_hw_ring_rx_init,
.hw_ring_tx_init = hw_atl_b0_hw_ring_tx_init,
.hw_packet_filter_set = hw_atl_b0_hw_packet_filter_set,
+ .hw_filter_l2_set = hw_atl_b0_hw_fl2_set,
+ .hw_filter_l2_clear = hw_atl_b0_hw_fl2_clear,
+ .hw_filter_l3l4_set = hw_atl_b0_hw_fl3l4_set,
+ .hw_filter_vlan_set = hw_atl_b0_hw_vlan_set,
+ .hw_filter_vlan_ctrl = hw_atl_b0_hw_vlan_ctrl,
.hw_multicast_list_set = hw_atl_b0_hw_multicast_list_set,
.hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
.hw_rss_set = hw_atl_b0_hw_rss_set,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 5502ec5f0f69..939f77e2e117 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -898,6 +898,24 @@ void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
vlan_id_flr);
}
+void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq_en,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_RXQ_EN_F_ADR(filter),
+ HW_ATL_RPF_VL_RXQ_EN_F_MSK,
+ HW_ATL_RPF_VL_RXQ_EN_F_SHIFT,
+ vlan_rxq_en);
+}
+
+void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_RXQ_F_ADR(filter),
+ HW_ATL_RPF_VL_RXQ_F_MSK,
+ HW_ATL_RPF_VL_RXQ_F_SHIFT,
+ vlan_rxq);
+};
+
void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
u32 filter)
{
@@ -965,6 +983,20 @@ void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
HW_ATL_RPF_ET_VALF_SHIFT, etht_flr);
}
+void hw_atl_rpf_l4_spd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_SPD_ADR(filter),
+ HW_ATL_RPF_L4_SPD_MSK,
+ HW_ATL_RPF_L4_SPD_SHIFT, val);
+}
+
+void hw_atl_rpf_l4_dpd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_DPD_ADR(filter),
+ HW_ATL_RPF_L4_DPD_MSK,
+ HW_ATL_RPF_L4_DPD_SHIFT, val);
+}
+
/* RPO: rx packet offload */
void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 ipv4header_crc_offload_en)
@@ -1476,3 +1508,80 @@ void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr)
HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT,
up_force_intr);
}
+
+void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_DSTA_ADR(location), 0U);
+}
+
+void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_SRCA_ADR(location), 0U);
+}
+
+void hw_atl_rpfl3l4_cmd_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_REG_CTRL_ADR(location), 0U);
+}
+
+void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_DSTA_ADR(location + i),
+ 0U);
+}
+
+void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_SRCA_ADR(location + i),
+ 0U);
+}
+
+void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_dest)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_DSTA_ADR(location),
+ ipv4_dest);
+}
+
+void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_src)
+{
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_SRCA_ADR(location),
+ ipv4_src);
+}
+
+void hw_atl_rpfl3l4_cmd_set(struct aq_hw_s *aq_hw, u8 location, u32 cmd)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_REG_CTRL_ADR(location), cmd);
+}
+
+void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_src)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_SRCA_ADR(location + i),
+ ipv6_src[i]);
+}
+
+void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_dest)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_DSTA_ADR(location + i),
+ ipv6_dest[i]);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index 41f239928c15..03c570d115fe 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -441,6 +441,14 @@ void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
u32 filter);
+/* Set VLAN RX queue assignment enable */
+void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq_en,
+ u32 filter);
+
+/* Set VLAN RX queue */
+void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq,
+ u32 filter);
+
/* set ethertype filter enable */
void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
u32 filter);
@@ -475,6 +483,12 @@ void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
/* set ethertype filter */
void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
+/* set L4 source port */
+void hw_atl_rpf_l4_spd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter);
+
+/* set L4 destination port */
+void hw_atl_rpf_l4_dpd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter);
+
/* rpo */
/* set ipv4 header checksum offload enable */
@@ -704,4 +718,38 @@ void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
/* set uP Force Interrupt */
void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr);
+/* clear ipv4 filter destination address */
+void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear ipv4 filter source address */
+void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear command for filter l3-l4 */
+void hw_atl_rpfl3l4_cmd_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear ipv6 filter destination address */
+void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear ipv6 filter source address */
+void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* set ipv4 filter destination address */
+void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_dest);
+
+/* set ipv4 filter source address */
+void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_src);
+
+/* set command for filter l3-l4 */
+void hw_atl_rpfl3l4_cmd_set(struct aq_hw_s *aq_hw, u8 location, u32 cmd);
+
+/* set ipv6 filter source address */
+void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_src);
+
+/* set ipv6 filter destination address */
+void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_dest);
+
#endif /* HW_ATL_LLH_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index a715fa317b1c..8470d92db812 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -1092,24 +1092,43 @@
/* Default value of bitfield vl_id{F}[B:0] */
#define HW_ATL_RPF_VL_ID_F_DEFAULT 0x0
-/* RX et_en{F} Bitfield Definitions
- * Preprocessor definitions for the bitfield "et_en{F}".
+/* RX vl_rxq_en{F} Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_rxq{F}".
* Parameter: filter {F} | stride size 0x4 | range [0, 15]
- * PORT="pif_rpf_et_en_i[0]"
- */
-
-/* Register address for bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_ADR(filter) (0x00005300 + (filter) * 0x4)
-/* Bitmask for bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_MSK 0x80000000
-/* Inverted bitmask for bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_MSKN 0x7FFFFFFF
-/* Lower bit position of bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_SHIFT 31
-/* Width of bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_WIDTH 1
-/* Default value of bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_DEFAULT 0x0
+ * PORT="pif_rpf_vl_rxq_en_i"
+ */
+
+/* Register address for bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_MSK 0x10000000
+/* Inverted bitmask for bitfield vl_rxq_en{F}[ */
+#define HW_ATL_RPF_VL_RXQ_EN_F_MSKN 0xEFFFFFFF
+/* Lower bit position of bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_SHIFT 28
+/* Width of bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_WIDTH 1
+/* Default value of bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_DEFAULT 0x0
+
+/* RX vl_rxq{F}[4:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_rxq{F}[4:0]".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_vl_rxq0_i[4:0]"
+ */
+
+/* Register address for bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_MSK 0x01F00000
+/* Inverted bitmask for bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_MSKN 0xFE0FFFFF
+/* Lower bit position of bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_SHIFT 20
+/* Width of bitfield vl_rxw{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_WIDTH 5
+/* Default value of bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_DEFAULT 0x0
/* rx et_en{f} bitfield definitions
* preprocessor definitions for the bitfield "et_en{f}".
@@ -1263,6 +1282,44 @@
/* default value of bitfield et_val{f}[f:0] */
#define HW_ATL_RPF_ET_VALF_DEFAULT 0x0
+/* RX l4_sp{D}[F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l4_sp{D}[F:0]".
+ * Parameter: srcport {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l4_sp0_i[15:0]"
+ */
+
+/* Register address for bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_ADR(srcport) (0x00005400u + (srcport) * 0x4)
+/* Bitmask for bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_MSK 0x0000FFFFu
+/* Inverted bitmask for bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_MSKN 0xFFFF0000u
+/* Lower bit position of bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_SHIFT 0
+/* Width of bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_WIDTH 16
+/* Default value of bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_DEFAULT 0x0
+
+/* RX l4_dp{D}[F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l4_dp{D}[F:0]".
+ * Parameter: destport {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l4_dp0_i[15:0]"
+ */
+
+/* Register address for bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_ADR(destport) (0x00005420u + (destport) * 0x4)
+/* Bitmask for bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_MSK 0x0000FFFFu
+/* Inverted bitmask for bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_MSKN 0xFFFF0000u
+/* Lower bit position of bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_SHIFT 0
+/* Width of bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_WIDTH 16
+/* Default value of bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_DEFAULT 0x0
+
/* rx ipv4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "ipv4_chk_en".
* port="pif_rpo_ipv4_chk_en_i"
@@ -2418,4 +2475,48 @@
/* default value of bitfield uP Force Interrupt */
#define HW_ATL_MCP_UP_FORCE_INTERRUPT_DEFAULT 0x0
+#define HW_ATL_RX_CTRL_ADDR_BEGIN_FL3L4 0x00005380
+#define HW_ATL_RX_SRCA_ADDR_BEGIN_FL3L4 0x000053B0
+#define HW_ATL_RX_DESTA_ADDR_BEGIN_FL3L4 0x000053D0
+
+#define HW_ATL_RPF_L3_REG_CTRL_ADR(location) (0x00005380 + (location) * 0x4)
+
+/* RX rpf_l3_sa{D}[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_sa{D}[1F:0]".
+ * Parameter: location {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_sa0_i[31:0]"
+ */
+
+/* Register address for bitfield pif_rpf_l3_sa0_i[31:0] */
+#define HW_ATL_RPF_L3_SRCA_ADR(location) (0x000053B0 + (location) * 0x4)
+/* Bitmask for bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_MSKN 0xFFFFFFFFu
+/* Lower bit position of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_SHIFT 0
+/* Width of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_WIDTH 32
+/* Default value of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_DEFAULT 0x0
+
+/* RX rpf_l3_da{D}[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_da{D}[1F:0]".
+ * Parameter: location {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_da0_i[31:0]"
+ */
+
+ /* Register address for bitfield pif_rpf_l3_da0_i[31:0] */
+#define HW_ATL_RPF_L3_DSTA_ADR(location) (0x000053B0 + (location) * 0x4)
+/* Bitmask for bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_MSKN 0xFFFFFFFFu
+/* Lower bit position of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_SHIFT 0
+/* Width of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_WIDTH 32
+/* Default value of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_DEFAULT 0x0
+
#endif /* HW_ATL_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 7def1cb8ab9d..9b74a3197d7f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -263,6 +263,8 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self)
AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR) &
HW_ATL_MPI_STATE_MSK) == MPI_DEINIT,
10, 1000U);
+ if (err)
+ return err;
}
if (self->rbl_enabled)
@@ -454,8 +456,6 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
(fw.val =
aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR),
fw.tid), 1000U, 100U);
- if (err < 0)
- goto err_exit;
if (fw.len == 0xFFFFU) {
err = hw_atl_utils_fw_rpc_call(self, sw.len);
@@ -463,8 +463,6 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
goto err_exit;
}
} while (sw.tid != fw.tid || 0xFFFFU == fw.len);
- if (err < 0)
- goto err_exit;
if (rpc) {
if (fw.len) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index 3613fca64b58..48278e333462 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -240,6 +240,64 @@ struct __packed offload_info {
u8 buf[0];
};
+enum hw_atl_rx_action_with_traffic {
+ HW_ATL_RX_DISCARD,
+ HW_ATL_RX_HOST,
+};
+
+struct aq_rx_filter_vlan {
+ u8 enable;
+ u8 location;
+ u16 vlan_id;
+ u8 queue;
+};
+
+struct aq_rx_filter_l2 {
+ s8 queue;
+ u8 location;
+ u8 user_priority_en;
+ u8 user_priority;
+ u16 ethertype;
+};
+
+struct aq_rx_filter_l3l4 {
+ u32 cmd;
+ u8 location;
+ u32 ip_dst[4];
+ u32 ip_src[4];
+ u16 p_dst;
+ u16 p_src;
+ u8 is_ipv6;
+};
+
+enum hw_atl_rx_protocol_value_l3l4 {
+ HW_ATL_RX_TCP,
+ HW_ATL_RX_UDP,
+ HW_ATL_RX_SCTP,
+ HW_ATL_RX_ICMP
+};
+
+enum hw_atl_rx_ctrl_registers_l3l4 {
+ HW_ATL_RX_ENABLE_MNGMNT_QUEUE_L3L4 = BIT(22),
+ HW_ATL_RX_ENABLE_QUEUE_L3L4 = BIT(23),
+ HW_ATL_RX_ENABLE_ARP_FLTR_L3 = BIT(24),
+ HW_ATL_RX_ENABLE_CMP_PROT_L4 = BIT(25),
+ HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 = BIT(26),
+ HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4 = BIT(27),
+ HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3 = BIT(28),
+ HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3 = BIT(29),
+ HW_ATL_RX_ENABLE_L3_IPV6 = BIT(30),
+ HW_ATL_RX_ENABLE_FLTR_L3L4 = BIT(31)
+};
+
+#define HW_ATL_RX_QUEUE_FL3L4_SHIFT 8U
+#define HW_ATL_RX_ACTION_FL3F4_SHIFT 16U
+
+#define HW_ATL_RX_CNT_REG_ADDR_IPV6 4U
+
+#define HW_ATL_GET_REG_LOCATION_FL3L4(location) \
+ ((location) - AQ_RX_FIRST_LOC_FL3L4)
+
#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U
#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U
#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index bd277b0dc615..4406325fdd9f 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -432,7 +432,8 @@ static int arc_emac_open(struct net_device *ndev)
phy_dev->autoneg = AUTONEG_ENABLE;
phy_dev->speed = 0;
phy_dev->duplex = 0;
- phy_dev->advertising &= phy_dev->supported;
+ linkmode_and(phy_dev->advertising, phy_dev->advertising,
+ phy_dev->supported);
priv->last_rx_bd = 0;
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index e445ab724827..f44808959ff3 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2248,6 +2248,7 @@ static void b44_adjust_link(struct net_device *dev)
static int b44_register_phy_one(struct b44 *bp)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct mii_bus *mii_bus;
struct ssb_device *sdev = bp->sdev;
struct phy_device *phydev;
@@ -2303,11 +2304,12 @@ static int b44_register_phy_one(struct b44 *bp)
}
/* mask with MAC supported features */
- phydev->supported &= (SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_MII);
- phydev->advertising = phydev->supported;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
+ linkmode_and(phydev->supported, phydev->supported, mask);
+ linkmode_copy(phydev->advertising, phydev->supported);
bp->old_link = 0;
bp->phy_addr = phydev->mdio.addr;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 0e2d99c737e3..4574275ef445 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1068,6 +1068,7 @@ static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable)
static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
{
+ unsigned int index;
u32 reg;
/* Disable RXCHK, active filters and Broadcom tag matching */
@@ -1076,6 +1077,15 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN);
rxchk_writel(priv, reg, RXCHK_CONTROL);
+ /* Make sure we restore correct CID index in case HW lost
+ * its context during deep idle state
+ */
+ for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
+ rxchk_writel(priv, priv->filters_loc[index] <<
+ RXCHK_BRCM_TAG_CID_SHIFT, RXCHK_BRCM_TAG(index));
+ rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
+ }
+
/* Clear the MagicPacket detection logic */
mpd_enable_set(priv, false);
@@ -2189,6 +2199,7 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index));
rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
+ priv->filters_loc[index] = nfc->fs.location;
set_bit(index, priv->filters);
return 0;
@@ -2208,6 +2219,7 @@ static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv,
* be taken care of during suspend time by bcm_sysport_suspend_to_wol
*/
clear_bit(index, priv->filters);
+ priv->filters_loc[index] = 0;
return 0;
}
@@ -2312,7 +2324,7 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
struct bcm_sysport_priv *priv;
struct net_device *slave_dev;
unsigned int num_tx_queues;
- unsigned int q, start, port;
+ unsigned int q, qp, port;
struct net_device *dev;
priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
@@ -2351,20 +2363,61 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
priv->per_port_num_tx_queues = num_tx_queues;
- start = find_first_zero_bit(&priv->queue_bitmap, dev->num_tx_queues);
- for (q = 0; q < num_tx_queues; q++) {
- ring = &priv->tx_rings[q + start];
+ for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues;
+ q++) {
+ ring = &priv->tx_rings[q];
+
+ if (ring->inspect)
+ continue;
/* Just remember the mapping actual programming done
* during bcm_sysport_init_tx_ring
*/
- ring->switch_queue = q;
+ ring->switch_queue = qp;
ring->switch_port = port;
ring->inspect = true;
priv->ring_map[q + port * num_tx_queues] = ring;
+ qp++;
+ }
+
+ return 0;
+}
+
+static int bcm_sysport_unmap_queues(struct notifier_block *nb,
+ struct dsa_notifier_register_info *info)
+{
+ struct bcm_sysport_tx_ring *ring;
+ struct bcm_sysport_priv *priv;
+ struct net_device *slave_dev;
+ unsigned int num_tx_queues;
+ struct net_device *dev;
+ unsigned int q, port;
+
+ priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
+ if (priv->netdev != info->master)
+ return 0;
+
+ dev = info->master;
+
+ if (dev->netdev_ops != &bcm_sysport_netdev_ops)
+ return 0;
+
+ port = info->port_number;
+ slave_dev = info->info.dev;
+
+ num_tx_queues = slave_dev->real_num_tx_queues;
+
+ for (q = 0; q < dev->num_tx_queues; q++) {
+ ring = &priv->tx_rings[q];
- /* Set all queues as being used now */
- set_bit(q + start, &priv->queue_bitmap);
+ if (ring->switch_port != port)
+ continue;
+
+ if (!ring->inspect)
+ continue;
+
+ ring->inspect = false;
+ priv->ring_map[q + port * num_tx_queues] = NULL;
}
return 0;
@@ -2373,14 +2426,18 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
- struct dsa_notifier_register_info *info;
-
- if (event != DSA_PORT_REGISTER)
- return NOTIFY_DONE;
+ int ret = NOTIFY_DONE;
- info = ptr;
+ switch (event) {
+ case DSA_PORT_REGISTER:
+ ret = bcm_sysport_map_queues(nb, ptr);
+ break;
+ case DSA_PORT_UNREGISTER:
+ ret = bcm_sysport_unmap_queues(nb, ptr);
+ break;
+ }
- return notifier_from_errno(bcm_sysport_map_queues(nb, info));
+ return notifier_from_errno(ret);
}
#define REV_FMT "v%2x.%02x"
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index a7a230884a87..0887e6356649 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -786,6 +786,7 @@ struct bcm_sysport_priv {
/* Ethtool */
u32 msg_enable;
DECLARE_BITMAP(filters, RXCHK_BRCM_TAG_MAX);
+ u32 filters_loc[RXCHK_BRCM_TAG_MAX];
struct bcm_sysport_stats64 stats64;
@@ -795,7 +796,6 @@ struct bcm_sysport_priv {
/* map information between switch port queues and local queues */
struct notifier_block dsa_notifier;
unsigned int per_port_num_tx_queues;
- unsigned long queue_bitmap;
struct bcm_sysport_tx_ring *ring_map[DSA_MAX_PORTS * 8];
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index a4a90b6cdb46..749d0ef44371 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1105,11 +1105,39 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct bnx2x *bp = netdev_priv(dev);
+ char version[ETHTOOL_FWVERS_LEN];
+ int ext_dev_info_offset;
+ u32 mbi;
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
- bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version));
+ memset(version, 0, sizeof(version));
+ snprintf(version, ETHTOOL_FWVERS_LEN, " storm %d.%d.%d.%d",
+ BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
+ BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_ENGINEERING_VERSION);
+ strlcat(info->version, version, sizeof(info->version));
+
+ if (SHMEM2_HAS(bp, extended_dev_info_shared_addr)) {
+ ext_dev_info_offset = SHMEM2_RD(bp,
+ extended_dev_info_shared_addr);
+ mbi = REG_RD(bp, ext_dev_info_offset +
+ offsetof(struct extended_dev_info_shared_cfg,
+ mbi_version));
+ if (mbi) {
+ memset(version, 0, sizeof(version));
+ snprintf(version, ETHTOOL_FWVERS_LEN, "mbi %d.%d.%d ",
+ (mbi & 0xff000000) >> 24,
+ (mbi & 0x00ff0000) >> 16,
+ (mbi & 0x0000ff00) >> 8);
+ strlcpy(info->fw_version, version,
+ sizeof(info->fw_version));
+ }
+ }
+
+ memset(version, 0, sizeof(version));
+ bnx2x_fill_fw_str(bp, version, ETHTOOL_FWVERS_LEN);
+ strlcat(info->fw_version, version, sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index f8b810313094..d9057c8bbeef 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1140,6 +1140,11 @@ struct shm_dev_info { /* size */
};
+struct extended_dev_info_shared_cfg {
+ u32 reserved[18];
+ u32 mbi_version;
+ u32 mbi_date;
+};
#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN)
#error "Missing either LITTLE_ENDIAN or BIG_ENDIAN definition."
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index b164f705709d..3b5b47e98c73 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9360,10 +9360,16 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
rc);
- /* Remove all currently configured VLANs */
- rc = bnx2x_del_all_vlans(bp);
- if (rc < 0)
- BNX2X_ERR("Failed to delete all VLANs\n");
+ /* The whole *vlan_obj structure may be not initialized if VLAN
+ * filtering offload is not supported by hardware. Currently this is
+ * true for all hardware covered by CHIP_IS_E1x().
+ */
+ if (!CHIP_IS_E1x(bp)) {
+ /* Remove all currently configured VLANs */
+ rc = bnx2x_del_all_vlans(bp);
+ if (rc < 0)
+ BNX2X_ERR("Failed to delete all VLANs\n");
+ }
/* Disable LLH */
if (!CHIP_IS_E1(bp))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 5d21c14853ac..3aa80da973d7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -118,6 +118,7 @@ enum board_idx {
NETXTREME_E_VF,
NETXTREME_C_VF,
NETXTREME_S_VF,
+ NETXTREME_E_P5_VF,
};
/* indexed by enum above */
@@ -160,6 +161,7 @@ static const struct {
[NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
[NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
[NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
+ [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
@@ -210,6 +212,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
+ { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
#endif
{ 0 }
@@ -237,7 +240,7 @@ static struct workqueue_struct *bnxt_pf_wq;
static bool bnxt_vf_pciid(enum board_idx idx)
{
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
- idx == NETXTREME_S_VF);
+ idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -1809,7 +1812,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
case CMPL_BASE_TYPE_HWRM_DONE:
seq_id = le16_to_cpu(h_cmpl->sequence_id);
if (seq_id == bp->hwrm_intr_seq_id)
- bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
+ bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
else
netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
break;
@@ -2372,7 +2375,11 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
rmem->pg_arr[i] = NULL;
}
if (rmem->pg_tbl) {
- dma_free_coherent(&pdev->dev, rmem->nr_pages * 8,
+ size_t pg_tbl_size = rmem->nr_pages * 8;
+
+ if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
+ pg_tbl_size = rmem->page_size;
+ dma_free_coherent(&pdev->dev, pg_tbl_size,
rmem->pg_tbl, rmem->pg_tbl_map);
rmem->pg_tbl = NULL;
}
@@ -2390,9 +2397,12 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
valid_bit = PTU_PTE_VALID;
- if (rmem->nr_pages > 1) {
- rmem->pg_tbl = dma_alloc_coherent(&pdev->dev,
- rmem->nr_pages * 8,
+ if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
+ size_t pg_tbl_size = rmem->nr_pages * 8;
+
+ if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
+ pg_tbl_size = rmem->page_size;
+ rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
&rmem->pg_tbl_map,
GFP_KERNEL);
if (!rmem->pg_tbl)
@@ -2409,7 +2419,7 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
if (!rmem->pg_arr[i])
return -ENOMEM;
- if (rmem->nr_pages > 1) {
+ if (rmem->nr_pages > 1 || rmem->depth > 0) {
if (i == rmem->nr_pages - 2 &&
(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
extra_bits |= PTU_PTE_NEXT_TO_LAST;
@@ -3276,6 +3286,27 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp)
bp->hwrm_cmd_resp_dma_addr);
bp->hwrm_cmd_resp_addr = NULL;
}
+
+ if (bp->hwrm_cmd_kong_resp_addr) {
+ dma_free_coherent(&pdev->dev, PAGE_SIZE,
+ bp->hwrm_cmd_kong_resp_addr,
+ bp->hwrm_cmd_kong_resp_dma_addr);
+ bp->hwrm_cmd_kong_resp_addr = NULL;
+ }
+}
+
+static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+
+ bp->hwrm_cmd_kong_resp_addr =
+ dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &bp->hwrm_cmd_kong_resp_dma_addr,
+ GFP_KERNEL);
+ if (!bp->hwrm_cmd_kong_resp_addr)
+ return -ENOMEM;
+
+ return 0;
}
static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
@@ -3317,9 +3348,8 @@ static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
return 0;
}
-static void bnxt_free_stats(struct bnxt *bp)
+static void bnxt_free_port_stats(struct bnxt *bp)
{
- u32 size, i;
struct pci_dev *pdev = bp->pdev;
bp->flags &= ~BNXT_FLAG_PORT_STATS;
@@ -3345,6 +3375,12 @@ static void bnxt_free_stats(struct bnxt *bp)
bp->hw_rx_port_stats_ext_map);
bp->hw_rx_port_stats_ext = NULL;
}
+}
+
+static void bnxt_free_ring_stats(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+ int size, i;
if (!bp->bnapi)
return;
@@ -3384,6 +3420,9 @@ static int bnxt_alloc_stats(struct bnxt *bp)
}
if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
+ if (bp->hw_rx_port_stats)
+ goto alloc_ext_stats;
+
bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
sizeof(struct tx_port_stats) + 1024;
@@ -3400,11 +3439,15 @@ static int bnxt_alloc_stats(struct bnxt *bp)
sizeof(struct rx_port_stats) + 512;
bp->flags |= BNXT_FLAG_PORT_STATS;
+alloc_ext_stats:
/* Display extended statistics only if FW supports it */
if (bp->hwrm_spec_code < 0x10804 ||
bp->hwrm_spec_code == 0x10900)
return 0;
+ if (bp->hw_rx_port_stats_ext)
+ goto alloc_tx_ext_stats;
+
bp->hw_rx_port_stats_ext =
dma_zalloc_coherent(&pdev->dev,
sizeof(struct rx_port_stats_ext),
@@ -3413,6 +3456,10 @@ static int bnxt_alloc_stats(struct bnxt *bp)
if (!bp->hw_rx_port_stats_ext)
return 0;
+alloc_tx_ext_stats:
+ if (bp->hw_tx_port_stats_ext)
+ return 0;
+
if (bp->hwrm_spec_code >= 0x10902) {
bp->hw_tx_port_stats_ext =
dma_zalloc_coherent(&pdev->dev,
@@ -3520,7 +3567,7 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
bnxt_free_cp_rings(bp);
bnxt_free_ntp_fltrs(bp, irq_re_init);
if (irq_re_init) {
- bnxt_free_stats(bp);
+ bnxt_free_ring_stats(bp);
bnxt_free_ring_grps(bp);
bnxt_free_vnics(bp);
kfree(bp->tx_ring_map);
@@ -3721,7 +3768,10 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
req->req_type = cpu_to_le16(req_type);
req->cmpl_ring = cpu_to_le16(cmpl_ring);
req->target_id = cpu_to_le16(target_id);
- req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
+ if (bnxt_kong_hwrm_message(bp, req))
+ req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
+ else
+ req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
}
static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
@@ -3736,11 +3786,10 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
struct hwrm_short_input short_input = {0};
-
- req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
- memset(resp, 0, PAGE_SIZE);
- cp_ring_id = le16_to_cpu(req->cmpl_ring);
- intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
+ u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
+ u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
+ u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
+ u16 dst = BNXT_HWRM_CHNL_CHIMP;
if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
if (msg_len > bp->hwrm_max_ext_req_len ||
@@ -3748,6 +3797,23 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
return -EINVAL;
}
+ if (bnxt_hwrm_kong_chnl(bp, req)) {
+ dst = BNXT_HWRM_CHNL_KONG;
+ bar_offset = BNXT_GRCPF_REG_KONG_COMM;
+ doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
+ resp = bp->hwrm_cmd_kong_resp_addr;
+ resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
+ }
+
+ memset(resp, 0, PAGE_SIZE);
+ cp_ring_id = le16_to_cpu(req->cmpl_ring);
+ intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
+
+ req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
+ /* currently supports only one outstanding message */
+ if (intr_process)
+ bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
+
if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
msg_len > BNXT_HWRM_MAX_REQ_LEN) {
void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
@@ -3781,17 +3847,13 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
}
/* Write request msg to hwrm channel */
- __iowrite32_copy(bp->bar0, data, msg_len / 4);
+ __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
for (i = msg_len; i < max_req_len; i += 4)
- writel(0, bp->bar0 + i);
-
- /* currently supports only one outstanding message */
- if (intr_process)
- bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
+ writel(0, bp->bar0 + bar_offset + i);
/* Ring channel doorbell */
- writel(1, bp->bar0 + 0x100);
+ writel(1, bp->bar0 + doorbell_offset);
if (!timeout)
timeout = DFLT_HWRM_CMD_TIMEOUT;
@@ -3806,10 +3868,13 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
- resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
+ resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
+
if (intr_process) {
+ u16 seq_id = bp->hwrm_intr_seq_id;
+
/* Wait until hwrm response cmpl interrupt is processed */
- while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
+ while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
i++ < tmo_count) {
/* on first few passes, just barely sleep */
if (i < HWRM_SHORT_TIMEOUT_COUNTER)
@@ -3820,14 +3885,14 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
HWRM_MAX_TIMEOUT);
}
- if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
+ if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
le16_to_cpu(req->req_type));
return -1;
}
len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
HWRM_RESP_LEN_SFT;
- valid = bp->hwrm_cmd_resp_addr + len - 1;
+ valid = resp_addr + len - 1;
} else {
int j;
@@ -3855,7 +3920,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
}
/* Last byte of resp contains valid bit */
- valid = bp->hwrm_cmd_resp_addr + len - 1;
+ valid = resp_addr + len - 1;
for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
/* make sure we read from updated DMA memory */
dma_rmb();
@@ -3990,6 +4055,10 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
}
+ if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
+ req.flags |= cpu_to_le32(
+ FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
+
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
@@ -4118,12 +4187,11 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr)
{
- int rc = 0;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
- struct hwrm_cfa_ntuple_filter_alloc_output *resp =
- bp->hwrm_cmd_resp_addr;
+ struct hwrm_cfa_ntuple_filter_alloc_output *resp;
struct flow_keys *keys = &fltr->fkeys;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
+ int rc = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
@@ -4169,8 +4237,10 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (!rc)
+ if (!rc) {
+ resp = bnxt_get_hwrm_resp_addr(bp, &req);
fltr->filter_id = resp->ntuple_filter_id;
+ }
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -5161,7 +5231,6 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
cp = le16_to_cpu(resp->alloc_cmpl_rings);
stats = le16_to_cpu(resp->alloc_stat_ctx);
- cp = min_t(u16, cp, stats);
hw_resc->resv_irqs = cp;
if (bp->flags & BNXT_FLAG_CHIP_P5) {
int rx = hw_resc->resv_rx_rings;
@@ -5180,6 +5249,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
hw_resc->resv_hw_ring_grps = rx;
}
hw_resc->resv_cp_rings = cp;
+ hw_resc->resv_stat_ctxs = stats;
}
mutex_unlock(&bp->hwrm_cmd_lock);
return 0;
@@ -5209,7 +5279,7 @@ static bool bnxt_rfs_supported(struct bnxt *bp);
static void
__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
int tx_rings, int rx_rings, int ring_grps,
- int cp_rings, int vnics)
+ int cp_rings, int stats, int vnics)
{
u32 enables = 0;
@@ -5251,7 +5321,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
req->num_rsscos_ctxs =
cpu_to_le16(ring_grps + 1);
}
- req->num_stat_ctxs = req->num_cmpl_rings;
+ req->num_stat_ctxs = cpu_to_le16(stats);
req->num_vnics = cpu_to_le16(vnics);
}
req->enables = cpu_to_le32(enables);
@@ -5261,7 +5331,7 @@ static void
__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
struct hwrm_func_vf_cfg_input *req, int tx_rings,
int rx_rings, int ring_grps, int cp_rings,
- int vnics)
+ int stats, int vnics)
{
u32 enables = 0;
@@ -5294,7 +5364,7 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
req->num_hw_ring_grps = cpu_to_le16(ring_grps);
req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
}
- req->num_stat_ctxs = req->num_cmpl_rings;
+ req->num_stat_ctxs = cpu_to_le16(stats);
req->num_vnics = cpu_to_le16(vnics);
req->enables = cpu_to_le32(enables);
@@ -5302,13 +5372,13 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
static int
bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int vnics)
+ int ring_grps, int cp_rings, int stats, int vnics)
{
struct hwrm_func_cfg_input req = {0};
int rc;
__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
- cp_rings, vnics);
+ cp_rings, stats, vnics);
if (!req.enables)
return 0;
@@ -5325,7 +5395,7 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
static int
bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int vnics)
+ int ring_grps, int cp_rings, int stats, int vnics)
{
struct hwrm_func_vf_cfg_input req = {0};
int rc;
@@ -5336,7 +5406,7 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
}
__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
- cp_rings, vnics);
+ cp_rings, stats, vnics);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return -ENOMEM;
@@ -5346,15 +5416,17 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
}
static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
- int cp, int vnic)
+ int cp, int stat, int vnic)
{
if (BNXT_PF(bp))
- return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, vnic);
+ return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
+ vnic);
else
- return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic);
+ return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
+ vnic);
}
-static int bnxt_nq_rings_in_use(struct bnxt *bp)
+int bnxt_nq_rings_in_use(struct bnxt *bp)
{
int cp = bp->cp_nr_rings;
int ulp_msix, ulp_base;
@@ -5380,12 +5452,17 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp)
return cp;
}
+static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
+{
+ return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
+}
+
static bool bnxt_need_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int cp = bnxt_cp_rings_in_use(bp);
int nq = bnxt_nq_rings_in_use(bp);
- int rx = bp->rx_nr_rings;
+ int rx = bp->rx_nr_rings, stat;
int vnic = 1, grp = rx;
if (bp->hwrm_spec_code < 0x10601)
@@ -5398,9 +5475,11 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
+ stat = bnxt_get_func_stat_ctxs(bp);
if (BNXT_NEW_RM(bp) &&
(hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
hw_resc->resv_irqs < nq || hw_resc->resv_vnics != vnic ||
+ hw_resc->resv_stat_ctxs != stat ||
(hw_resc->resv_hw_ring_grps != grp &&
!(bp->flags & BNXT_FLAG_CHIP_P5))))
return true;
@@ -5414,8 +5493,8 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
int tx = bp->tx_nr_rings;
int rx = bp->rx_nr_rings;
int grp, rx_rings, rc;
+ int vnic = 1, stat;
bool sh = false;
- int vnic = 1;
if (!bnxt_need_reserve_rings(bp))
return 0;
@@ -5427,8 +5506,9 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
grp = bp->rx_nr_rings;
+ stat = bnxt_get_func_stat_ctxs(bp);
- rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic);
+ rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
if (rc)
return rc;
@@ -5438,6 +5518,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
cp = hw_resc->resv_irqs;
grp = hw_resc->resv_hw_ring_grps;
vnic = hw_resc->resv_vnics;
+ stat = hw_resc->resv_stat_ctxs;
}
rx_rings = rx;
@@ -5456,6 +5537,10 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
}
}
rx_rings = min_t(int, rx_rings, grp);
+ cp = min_t(int, cp, bp->cp_nr_rings);
+ if (stat > bnxt_get_ulp_stat_ctxs(bp))
+ stat -= bnxt_get_ulp_stat_ctxs(bp);
+ cp = min_t(int, cp, stat);
rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx = rx_rings << 1;
@@ -5464,14 +5549,15 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
bp->rx_nr_rings = rx_rings;
bp->cp_nr_rings = cp;
- if (!tx || !rx || !cp || !grp || !vnic)
+ if (!tx || !rx || !cp || !grp || !vnic || !stat)
return -ENOMEM;
return rc;
}
static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int vnics)
+ int ring_grps, int cp_rings, int stats,
+ int vnics)
{
struct hwrm_func_vf_cfg_input req = {0};
u32 flags;
@@ -5481,7 +5567,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return 0;
__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
- cp_rings, vnics);
+ cp_rings, stats, vnics);
flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
@@ -5499,14 +5585,15 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
}
static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int vnics)
+ int ring_grps, int cp_rings, int stats,
+ int vnics)
{
struct hwrm_func_cfg_input req = {0};
u32 flags;
int rc;
__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
- cp_rings, vnics);
+ cp_rings, stats, vnics);
flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
if (BNXT_NEW_RM(bp)) {
flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
@@ -5527,17 +5614,19 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
}
static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int vnics)
+ int ring_grps, int cp_rings, int stats,
+ int vnics)
{
if (bp->hwrm_spec_code < 0x10801)
return 0;
if (BNXT_PF(bp))
return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
- ring_grps, cp_rings, vnics);
+ ring_grps, cp_rings, stats,
+ vnics);
return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, vnics);
+ cp_rings, stats, vnics);
}
static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
@@ -5962,8 +6051,11 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
pg_size = 2 << 4;
*pg_attr = pg_size;
- if (rmem->nr_pages > 1) {
- *pg_attr |= 1;
+ if (rmem->depth >= 1) {
+ if (rmem->depth == 2)
+ *pg_attr |= 2;
+ else
+ *pg_attr |= 1;
*pg_dir = cpu_to_le64(rmem->pg_tbl_map);
} else {
*pg_dir = cpu_to_le64(rmem->dma_arr[0]);
@@ -6040,6 +6132,22 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
&req.stat_pg_size_stat_lvl,
&req.stat_page_dir);
}
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
+ ctx_pg = &ctx->mrav_mem;
+ req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
+ req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.mrav_pg_size_mrav_lvl,
+ &req.mrav_page_dir);
+ }
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
+ ctx_pg = &ctx->tim_mem;
+ req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
+ req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.tim_pg_size_tim_lvl,
+ &req.tim_page_dir);
+ }
for (i = 0, num_entries = &req.tqm_sp_num_entries,
pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
pg_dir = &req.tqm_sp_page_dir,
@@ -6060,25 +6168,104 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
}
static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
- struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size)
+ struct bnxt_ctx_pg_info *ctx_pg)
{
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
- if (!mem_size)
- return 0;
-
- rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
- if (rmem->nr_pages > MAX_CTX_PAGES) {
- rmem->nr_pages = 0;
- return -EINVAL;
- }
rmem->page_size = BNXT_PAGE_SIZE;
rmem->pg_arr = ctx_pg->ctx_pg_arr;
rmem->dma_arr = ctx_pg->ctx_dma_arr;
rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
+ if (rmem->depth >= 1)
+ rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
return bnxt_alloc_ring(bp, rmem);
}
+static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
+ struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
+ u8 depth)
+{
+ struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
+ int rc;
+
+ if (!mem_size)
+ return 0;
+
+ ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
+ if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
+ ctx_pg->nr_pages = 0;
+ return -EINVAL;
+ }
+ if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
+ int nr_tbls, i;
+
+ rmem->depth = 2;
+ ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
+ GFP_KERNEL);
+ if (!ctx_pg->ctx_pg_tbl)
+ return -ENOMEM;
+ nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
+ rmem->nr_pages = nr_tbls;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
+ if (rc)
+ return rc;
+ for (i = 0; i < nr_tbls; i++) {
+ struct bnxt_ctx_pg_info *pg_tbl;
+
+ pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
+ if (!pg_tbl)
+ return -ENOMEM;
+ ctx_pg->ctx_pg_tbl[i] = pg_tbl;
+ rmem = &pg_tbl->ring_mem;
+ rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
+ rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
+ rmem->depth = 1;
+ rmem->nr_pages = MAX_CTX_PAGES;
+ if (i == (nr_tbls - 1))
+ rmem->nr_pages = ctx_pg->nr_pages %
+ MAX_CTX_PAGES;
+ rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
+ if (rc)
+ break;
+ }
+ } else {
+ rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
+ if (rmem->nr_pages > 1 || depth)
+ rmem->depth = 1;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
+ }
+ return rc;
+}
+
+static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
+ struct bnxt_ctx_pg_info *ctx_pg)
+{
+ struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
+
+ if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
+ ctx_pg->ctx_pg_tbl) {
+ int i, nr_tbls = rmem->nr_pages;
+
+ for (i = 0; i < nr_tbls; i++) {
+ struct bnxt_ctx_pg_info *pg_tbl;
+ struct bnxt_ring_mem_info *rmem2;
+
+ pg_tbl = ctx_pg->ctx_pg_tbl[i];
+ if (!pg_tbl)
+ continue;
+ rmem2 = &pg_tbl->ring_mem;
+ bnxt_free_ring(bp, rmem2);
+ ctx_pg->ctx_pg_arr[i] = NULL;
+ kfree(pg_tbl);
+ ctx_pg->ctx_pg_tbl[i] = NULL;
+ }
+ kfree(ctx_pg->ctx_pg_tbl);
+ ctx_pg->ctx_pg_tbl = NULL;
+ }
+ bnxt_free_ring(bp, rmem);
+ ctx_pg->nr_pages = 0;
+}
+
static void bnxt_free_ctx_mem(struct bnxt *bp)
{
struct bnxt_ctx_mem_info *ctx = bp->ctx;
@@ -6089,16 +6276,18 @@ static void bnxt_free_ctx_mem(struct bnxt *bp)
if (ctx->tqm_mem[0]) {
for (i = 0; i < bp->max_q + 1; i++)
- bnxt_free_ring(bp, &ctx->tqm_mem[i]->ring_mem);
+ bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
kfree(ctx->tqm_mem[0]);
ctx->tqm_mem[0] = NULL;
}
- bnxt_free_ring(bp, &ctx->stat_mem.ring_mem);
- bnxt_free_ring(bp, &ctx->vnic_mem.ring_mem);
- bnxt_free_ring(bp, &ctx->cq_mem.ring_mem);
- bnxt_free_ring(bp, &ctx->srq_mem.ring_mem);
- bnxt_free_ring(bp, &ctx->qp_mem.ring_mem);
+ bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
+ bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
+ bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
+ bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
+ bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
+ bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
+ bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
ctx->flags &= ~BNXT_CTX_FLAG_INITED;
}
@@ -6107,6 +6296,9 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
u32 mem_size, ena, entries;
+ u32 extra_srqs = 0;
+ u32 extra_qps = 0;
+ u8 pg_lvl = 1;
int i, rc;
rc = bnxt_hwrm_func_backing_store_qcaps(bp);
@@ -6119,24 +6311,31 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
return 0;
+ if (bp->flags & BNXT_FLAG_ROCE_CAP) {
+ pg_lvl = 2;
+ extra_qps = 65536;
+ extra_srqs = 8192;
+ }
+
ctx_pg = &ctx->qp_mem;
- ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
+ ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
+ extra_qps;
mem_size = ctx->qp_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
if (rc)
return rc;
ctx_pg = &ctx->srq_mem;
- ctx_pg->entries = ctx->srq_max_l2_entries;
+ ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
mem_size = ctx->srq_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
if (rc)
return rc;
ctx_pg = &ctx->cq_mem;
- ctx_pg->entries = ctx->cq_max_l2_entries;
+ ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
mem_size = ctx->cq_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
if (rc)
return rc;
@@ -6144,26 +6343,47 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg->entries = ctx->vnic_max_vnic_entries +
ctx->vnic_max_ring_table_entries;
mem_size = ctx->vnic_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
if (rc)
return rc;
ctx_pg = &ctx->stat_mem;
ctx_pg->entries = ctx->stat_max_entries;
mem_size = ctx->stat_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
+ if (rc)
+ return rc;
+
+ ena = 0;
+ if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
+ goto skip_rdma;
+
+ ctx_pg = &ctx->mrav_mem;
+ ctx_pg->entries = extra_qps * 4;
+ mem_size = ctx->mrav_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2);
if (rc)
return rc;
+ ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
- entries = ctx->qp_max_l2_entries;
+ ctx_pg = &ctx->tim_mem;
+ ctx_pg->entries = ctx->qp_mem.entries;
+ mem_size = ctx->tim_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
+ if (rc)
+ return rc;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
+
+skip_rdma:
+ entries = ctx->qp_max_l2_entries + extra_qps;
entries = roundup(entries, ctx->tqm_entries_multiple);
entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
ctx->tqm_max_entries_per_ring);
- for (i = 0, ena = 0; i < bp->max_q + 1; i++) {
+ for (i = 0; i < bp->max_q + 1; i++) {
ctx_pg = ctx->tqm_mem[i];
ctx_pg->entries = entries;
mem_size = ctx->tqm_entry_size * entries;
- rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
if (rc)
return rc;
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
@@ -6190,7 +6410,8 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
req.fid = cpu_to_le16(0xffff);
mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
if (rc) {
rc = -EIO;
goto hwrm_func_resc_qcaps_exit;
@@ -6220,7 +6441,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
if (bp->flags & BNXT_FLAG_CHIP_P5) {
u16 max_msix = le16_to_cpu(resp->max_msix);
- hw_resc->max_irqs = min_t(u16, hw_resc->max_irqs, max_msix);
+ hw_resc->max_nqs = max_msix;
hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
}
@@ -6442,6 +6663,13 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
(dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
+ if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
+
+ if (dev_caps_cfg &
+ VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
+
hwrm_ver_get_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -6488,6 +6716,7 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp)
static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
{
struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
struct hwrm_port_qstats_ext_input req = {0};
struct bnxt_pf_info *pf = &bp->pf;
int rc;
@@ -6510,6 +6739,34 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
bp->fw_rx_stats_ext_size = 0;
bp->fw_tx_stats_ext_size = 0;
}
+ if (bp->fw_tx_stats_ext_size <=
+ offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ bp->pri2cos_valid = 0;
+ return rc;
+ }
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
+ req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
+
+ rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ struct hwrm_queue_pri2cos_qcfg_output *resp2;
+ u8 *pri2cos;
+ int i, j;
+
+ resp2 = bp->hwrm_cmd_resp_addr;
+ pri2cos = &resp2->pri0_cos_queue_id;
+ for (i = 0; i < 8; i++) {
+ u8 queue_id = pri2cos[i];
+
+ for (j = 0; j < bp->max_q; j++) {
+ if (bp->q_ids[j] == queue_id)
+ bp->pri2cos[i] = j;
+ }
+ }
+ bp->pri2cos_valid = 1;
+ }
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -7034,17 +7291,12 @@ unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
return bp->hw_resc.max_stat_ctxs;
}
-void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
-{
- bp->hw_resc.max_stat_ctxs = max;
-}
-
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
{
return bp->hw_resc.max_cp_rings;
}
-unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
+static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
{
unsigned int cp = bp->hw_resc.max_cp_rings;
@@ -7058,6 +7310,9 @@ static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
+
return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
}
@@ -7066,6 +7321,26 @@ static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
bp->hw_resc.max_irqs = max_irqs;
}
+unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
+{
+ unsigned int cp;
+
+ cp = bnxt_get_max_func_cp_rings_for_en(bp);
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return cp - bp->rx_nr_rings - bp->tx_nr_rings;
+ else
+ return cp - bp->cp_nr_rings;
+}
+
+unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
+{
+ unsigned int stat;
+
+ stat = bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_ulp_stat_ctxs(bp);
+ stat -= bp->cp_nr_rings;
+ return stat;
+}
+
int bnxt_get_avail_msix(struct bnxt *bp, int num)
{
int max_cp = bnxt_get_max_func_cp_rings(bp);
@@ -7203,23 +7478,26 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
int bnxt_reserve_rings(struct bnxt *bp)
{
int tcs = netdev_get_num_tc(bp->dev);
+ bool reinit_irq = false;
int rc;
if (!bnxt_need_reserve_rings(bp))
return 0;
- rc = __bnxt_reserve_rings(bp);
- if (rc) {
- netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
- return rc;
- }
if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
- rc = bnxt_init_int_mode(bp);
+ reinit_irq = true;
+ }
+ rc = __bnxt_reserve_rings(bp);
+ if (reinit_irq) {
+ if (!rc)
+ rc = bnxt_init_int_mode(bp);
bnxt_ulp_irq_restart(bp, rc);
- if (rc)
- return rc;
+ }
+ if (rc) {
+ netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
+ return rc;
}
if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
netdev_err(bp->dev, "tx ring reservation failure\n");
@@ -7227,7 +7505,6 @@ int bnxt_reserve_rings(struct bnxt *bp)
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
return -ENOMEM;
}
- bp->num_stat_ctxs = bp->cp_nr_rings;
return 0;
}
@@ -7821,6 +8098,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
rc = bnxt_hwrm_func_resc_qcaps(bp, true);
hw_resc->resv_cp_rings = 0;
+ hw_resc->resv_stat_ctxs = 0;
hw_resc->resv_irqs = 0;
hw_resc->resv_tx_rings = 0;
hw_resc->resv_rx_rings = 0;
@@ -8260,6 +8538,9 @@ static bool bnxt_drv_busy(struct bnxt *bp)
test_bit(BNXT_STATE_READ_STATS, &bp->state));
}
+static void bnxt_get_ring_stats(struct bnxt *bp,
+ struct rtnl_link_stats64 *stats);
+
static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
bool link_re_init)
{
@@ -8285,6 +8566,9 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
del_timer_sync(&bp->timer);
bnxt_free_skbs(bp);
+ /* Save ring stats before shutdown */
+ if (bp->bnapi)
+ bnxt_get_ring_stats(bp, &bp->net_stats_prev);
if (irq_re_init) {
bnxt_free_irq(bp);
bnxt_del_napi(bp);
@@ -8346,23 +8630,12 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EOPNOTSUPP;
}
-static void
-bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+static void bnxt_get_ring_stats(struct bnxt *bp,
+ struct rtnl_link_stats64 *stats)
{
- u32 i;
- struct bnxt *bp = netdev_priv(dev);
+ int i;
- set_bit(BNXT_STATE_READ_STATS, &bp->state);
- /* Make sure bnxt_close_nic() sees that we are reading stats before
- * we check the BNXT_STATE_OPEN flag.
- */
- smp_mb__after_atomic();
- if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
- clear_bit(BNXT_STATE_READ_STATS, &bp->state);
- return;
- }
- /* TODO check if we need to synchronize with bnxt_close path */
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
@@ -8391,6 +8664,40 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
}
+}
+
+static void bnxt_add_prev_stats(struct bnxt *bp,
+ struct rtnl_link_stats64 *stats)
+{
+ struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
+
+ stats->rx_packets += prev_stats->rx_packets;
+ stats->tx_packets += prev_stats->tx_packets;
+ stats->rx_bytes += prev_stats->rx_bytes;
+ stats->tx_bytes += prev_stats->tx_bytes;
+ stats->rx_missed_errors += prev_stats->rx_missed_errors;
+ stats->multicast += prev_stats->multicast;
+ stats->tx_dropped += prev_stats->tx_dropped;
+}
+
+static void
+bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ set_bit(BNXT_STATE_READ_STATS, &bp->state);
+ /* Make sure bnxt_close_nic() sees that we are reading stats before
+ * we check the BNXT_STATE_OPEN flag.
+ */
+ smp_mb__after_atomic();
+ if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
+ clear_bit(BNXT_STATE_READ_STATS, &bp->state);
+ *stats = bp->net_stats_prev;
+ return;
+ }
+
+ bnxt_get_ring_stats(bp, stats);
+ bnxt_add_prev_stats(bp, stats);
if (bp->flags & BNXT_FLAG_PORT_STATS) {
struct rx_port_stats *rx = bp->hw_rx_port_stats;
@@ -8626,12 +8933,12 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
if (vnics == bp->hw_resc.resv_vnics)
return true;
- bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, vnics);
+ bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
if (vnics <= bp->hw_resc.resv_vnics)
return true;
netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
- bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 1);
+ bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
return false;
#else
return false;
@@ -9042,7 +9349,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp)
{
int max_rx, max_tx, tx_sets = 1;
- int tx_rings_needed;
+ int tx_rings_needed, stats;
int rx_rings = rx;
int cp, vnics, rc;
@@ -9067,10 +9374,13 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx_rings <<= 1;
cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
- if (BNXT_NEW_RM(bp))
+ stats = cp;
+ if (BNXT_NEW_RM(bp)) {
cp += bnxt_get_ulp_msix_num(bp);
+ stats += bnxt_get_ulp_stat_ctxs(bp);
+ }
return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
- vnics);
+ stats, vnics);
}
static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -9106,7 +9416,7 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
* 1 coal_buf x bufs_per_record = 1 completion record.
*/
coal = &bp->rx_coal;
- coal->coal_ticks = 14;
+ coal->coal_ticks = 10;
coal->coal_bufs = 30;
coal->coal_ticks_irq = 1;
coal->coal_bufs_irq = 2;
@@ -9294,7 +9604,6 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
bp->tx_nr_rings += bp->tx_nr_rings_xdp;
bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
bp->tx_nr_rings + bp->rx_nr_rings;
- bp->num_stat_ctxs = bp->cp_nr_rings;
if (netif_running(bp->dev))
return bnxt_open_nic(bp, true, false);
@@ -9617,7 +9926,7 @@ static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
}
static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
- u16 flags)
+ u16 flags, struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
struct nlattr *attr, *br_spec;
@@ -9760,6 +10069,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
kfree(bp->ctx);
bp->ctx = NULL;
bnxt_cleanup_pci(bp);
+ bnxt_free_port_stats(bp);
free_netdev(dev);
}
@@ -9834,7 +10144,7 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
*max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
bnxt_get_ulp_msix_num(bp),
- bnxt_get_max_func_stat_ctxs(bp));
+ hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
if (!(bp->flags & BNXT_FLAG_CHIP_P5))
*max_cp = min_t(int, *max_cp, max_irq);
max_ring_grps = hw_resc->max_hw_ring_grps;
@@ -9965,7 +10275,6 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
netdev_warn(bp->dev, "2nd rings reservation failed.\n");
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
}
- bp->num_stat_ctxs = bp->cp_nr_rings;
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bp->rx_nr_rings++;
bp->cp_nr_rings++;
@@ -10099,6 +10408,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
+ rc = bnxt_alloc_kong_hwrm_resources(bp);
+ if (rc)
+ bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
+ }
+
if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
rc = bnxt_alloc_hwrm_short_cmd_req(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 3030931ccaf8..a451796deefe 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -567,7 +567,6 @@ struct nqe_cn {
#define HWRM_RESP_LEN_MASK 0xffff0000
#define HWRM_RESP_LEN_SFT 16
#define HWRM_RESP_VALID_MASK 0xff000000
-#define HWRM_SEQ_ID_INVALID -1
#define BNXT_HWRM_REQ_MAX_SIZE 128
#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
BNXT_HWRM_REQ_MAX_SIZE)
@@ -585,6 +584,9 @@ struct nqe_cn {
#define HWRM_VALID_BIT_DELAY_USEC 20
+#define BNXT_HWRM_CHNL_CHIMP 0
+#define BNXT_HWRM_CHNL_KONG 1
+
#define BNXT_RX_EVENT 1
#define BNXT_AGG_EVENT 2
#define BNXT_TX_EVENT 4
@@ -615,9 +617,12 @@ struct bnxt_sw_rx_agg_bd {
struct bnxt_ring_mem_info {
int nr_pages;
int page_size;
- u32 flags;
+ u16 flags;
#define BNXT_RMEM_VALID_PTE_FLAG 1
#define BNXT_RMEM_RING_PTE_FLAG 2
+#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4
+
+ u16 depth;
void **pg_arr;
dma_addr_t *dma_arr;
@@ -927,6 +932,8 @@ struct bnxt_hw_resc {
u16 resv_vnics;
u16 min_stat_ctxs;
u16 max_stat_ctxs;
+ u16 resv_stat_ctxs;
+ u16 max_nqs;
u16 max_irqs;
u16 resv_irqs;
};
@@ -1111,9 +1118,14 @@ struct bnxt_test_info {
char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
};
-#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
-#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
-#define BNXT_CAG_REG_BASE 0x300000
+#define BNXT_GRCPF_REG_CHIMP_COMM 0x0
+#define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
+#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
+#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
+#define BNXT_CAG_REG_BASE 0x300000
+
+#define BNXT_GRCPF_REG_KONG_COMM 0xA00
+#define BNXT_GRCPF_REG_KONG_COMM_TRIGGER 0xB00
struct bnxt_tc_flow_stats {
u64 packets;
@@ -1181,12 +1193,15 @@ struct bnxt_vf_rep {
#define PTU_PTE_NEXT_TO_LAST 0x4UL
#define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8)
+#define MAX_CTX_TOTAL_PAGES (MAX_CTX_PAGES * MAX_CTX_PAGES)
struct bnxt_ctx_pg_info {
u32 entries;
+ u32 nr_pages;
void *ctx_pg_arr[MAX_CTX_PAGES];
dma_addr_t ctx_dma_arr[MAX_CTX_PAGES];
struct bnxt_ring_mem_info ring_mem;
+ struct bnxt_ctx_pg_info **ctx_pg_tbl;
};
struct bnxt_ctx_mem_info {
@@ -1222,6 +1237,8 @@ struct bnxt_ctx_mem_info {
struct bnxt_ctx_pg_info cq_mem;
struct bnxt_ctx_pg_info vnic_mem;
struct bnxt_ctx_pg_info stat_mem;
+ struct bnxt_ctx_pg_info mrav_mem;
+ struct bnxt_ctx_pg_info tim_mem;
struct bnxt_ctx_pg_info *tqm_mem[9];
};
@@ -1416,8 +1433,6 @@ struct bnxt {
int cp_nr_pages;
int cp_nr_rings;
- int num_stat_ctxs;
-
/* grp_info indexed by completion ring index */
struct bnxt_ring_grp_info *grp_info;
struct bnxt_vnic_info *vnic_info;
@@ -1457,21 +1472,27 @@ struct bnxt {
u32 msg_enable;
u32 fw_cap;
- #define BNXT_FW_CAP_SHORT_CMD 0x00000001
- #define BNXT_FW_CAP_LLDP_AGENT 0x00000002
- #define BNXT_FW_CAP_DCBX_AGENT 0x00000004
- #define BNXT_FW_CAP_NEW_RM 0x00000008
- #define BNXT_FW_CAP_IF_CHANGE 0x00000010
+ #define BNXT_FW_CAP_SHORT_CMD 0x00000001
+ #define BNXT_FW_CAP_LLDP_AGENT 0x00000002
+ #define BNXT_FW_CAP_DCBX_AGENT 0x00000004
+ #define BNXT_FW_CAP_NEW_RM 0x00000008
+ #define BNXT_FW_CAP_IF_CHANGE 0x00000010
+ #define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080
+ #define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
- u32 hwrm_intr_seq_id;
+ u16 hwrm_cmd_kong_seq;
+ u16 hwrm_intr_seq_id;
void *hwrm_short_cmd_req_addr;
dma_addr_t hwrm_short_cmd_req_dma_addr;
void *hwrm_cmd_resp_addr;
dma_addr_t hwrm_cmd_resp_dma_addr;
+ void *hwrm_cmd_kong_resp_addr;
+ dma_addr_t hwrm_cmd_kong_resp_dma_addr;
+ struct rtnl_link_stats64 net_stats_prev;
struct rx_port_stats *hw_rx_port_stats;
struct tx_port_stats *hw_tx_port_stats;
struct rx_port_stats_ext *hw_rx_port_stats_ext;
@@ -1483,6 +1504,8 @@ struct bnxt {
int hw_port_stats_size;
u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size;
+ u8 pri2cos[8];
+ u8 pri2cos_valid;
u16 hwrm_max_req_len;
u16 hwrm_max_ext_req_len;
@@ -1669,6 +1692,66 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
}
}
+static inline bool bnxt_cfa_hwrm_message(u16 req_type)
+{
+ switch (req_type) {
+ case HWRM_CFA_ENCAP_RECORD_ALLOC:
+ case HWRM_CFA_ENCAP_RECORD_FREE:
+ case HWRM_CFA_DECAP_FILTER_ALLOC:
+ case HWRM_CFA_DECAP_FILTER_FREE:
+ case HWRM_CFA_NTUPLE_FILTER_ALLOC:
+ case HWRM_CFA_NTUPLE_FILTER_FREE:
+ case HWRM_CFA_NTUPLE_FILTER_CFG:
+ case HWRM_CFA_EM_FLOW_ALLOC:
+ case HWRM_CFA_EM_FLOW_FREE:
+ case HWRM_CFA_EM_FLOW_CFG:
+ case HWRM_CFA_FLOW_ALLOC:
+ case HWRM_CFA_FLOW_FREE:
+ case HWRM_CFA_FLOW_INFO:
+ case HWRM_CFA_FLOW_FLUSH:
+ case HWRM_CFA_FLOW_STATS:
+ case HWRM_CFA_METER_PROFILE_ALLOC:
+ case HWRM_CFA_METER_PROFILE_FREE:
+ case HWRM_CFA_METER_PROFILE_CFG:
+ case HWRM_CFA_METER_INSTANCE_ALLOC:
+ case HWRM_CFA_METER_INSTANCE_FREE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool bnxt_kong_hwrm_message(struct bnxt *bp, struct input *req)
+{
+ return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL &&
+ bnxt_cfa_hwrm_message(le16_to_cpu(req->req_type)));
+}
+
+static inline bool bnxt_hwrm_kong_chnl(struct bnxt *bp, struct input *req)
+{
+ return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL &&
+ req->resp_addr == cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr));
+}
+
+static inline void *bnxt_get_hwrm_resp_addr(struct bnxt *bp, void *req)
+{
+ if (bnxt_hwrm_kong_chnl(bp, (struct input *)req))
+ return bp->hwrm_cmd_kong_resp_addr;
+ else
+ return bp->hwrm_cmd_resp_addr;
+}
+
+static inline u16 bnxt_get_hwrm_seq_id(struct bnxt *bp, u16 dst)
+{
+ u16 seq_id;
+
+ if (dst == BNXT_HWRM_CHNL_CHIMP)
+ seq_id = bp->hwrm_cmd_seq++;
+ else
+ seq_id = bp->hwrm_cmd_kong_seq++;
+ return seq_id;
+}
+
extern const u16 bnxt_lhint_arr[];
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
@@ -1686,11 +1769,12 @@ int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
int bmap_size);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
+int bnxt_nq_rings_in_use(struct bnxt *bp);
int bnxt_hwrm_set_coal(struct bnxt *);
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
-void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
+unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp);
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
-unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp);
+unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp);
int bnxt_get_avail_msix(struct bnxt *bp, int num);
int bnxt_reserve_rings(struct bnxt *bp);
void bnxt_tx_disable(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index a85d2be986af..15c7041e937b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -471,7 +471,10 @@ static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
if (total_ets_bw > 100)
return -EINVAL;
- *tc = max_tc + 1;
+ if (max_tc >= bp->max_tc)
+ *tc = bp->max_tc;
+ else
+ *tc = max_tc + 1;
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 6b51f4de6017..adabbe94a259 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -207,6 +207,34 @@ reset_coalesce:
BNXT_TX_STATS_EXT_COS_ENTRY(6), \
BNXT_TX_STATS_EXT_COS_ENTRY(7) \
+#define BNXT_RX_STATS_PRI_ENTRY(counter, n) \
+ { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \
+ __stringify(counter##_pri##n) }
+
+#define BNXT_TX_STATS_PRI_ENTRY(counter, n) \
+ { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0), \
+ __stringify(counter##_pri##n) }
+
+#define BNXT_RX_STATS_PRI_ENTRIES(counter) \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 0), \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 1), \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 2), \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 3), \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 4), \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 5), \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 6), \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 7)
+
+#define BNXT_TX_STATS_PRI_ENTRIES(counter) \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 0), \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 1), \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 2), \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 3), \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 4), \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 5), \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 6), \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 7)
+
enum {
RX_TOTAL_DISCARDS,
TX_TOTAL_DISCARDS,
@@ -327,8 +355,41 @@ static const struct {
BNXT_TX_STATS_EXT_PFC_ENTRIES,
};
+static const struct {
+ long base_off;
+ char string[ETH_GSTRING_LEN];
+} bnxt_rx_bytes_pri_arr[] = {
+ BNXT_RX_STATS_PRI_ENTRIES(rx_bytes),
+};
+
+static const struct {
+ long base_off;
+ char string[ETH_GSTRING_LEN];
+} bnxt_rx_pkts_pri_arr[] = {
+ BNXT_RX_STATS_PRI_ENTRIES(rx_packets),
+};
+
+static const struct {
+ long base_off;
+ char string[ETH_GSTRING_LEN];
+} bnxt_tx_bytes_pri_arr[] = {
+ BNXT_TX_STATS_PRI_ENTRIES(tx_bytes),
+};
+
+static const struct {
+ long base_off;
+ char string[ETH_GSTRING_LEN];
+} bnxt_tx_pkts_pri_arr[] = {
+ BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
+};
+
#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
+#define BNXT_NUM_STATS_PRI \
+ (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \
+ ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \
+ ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \
+ ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
static int bnxt_get_num_stats(struct bnxt *bp)
{
@@ -339,9 +400,12 @@ static int bnxt_get_num_stats(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_PORT_STATS)
num_stats += BNXT_NUM_PORT_STATS;
- if (bp->flags & BNXT_FLAG_PORT_STATS_EXT)
+ if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
num_stats += bp->fw_rx_stats_ext_size +
bp->fw_tx_stats_ext_size;
+ if (bp->pri2cos_valid)
+ num_stats += BNXT_NUM_STATS_PRI;
+ }
return num_stats;
}
@@ -369,8 +433,10 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
u32 stat_fields = sizeof(struct ctx_hw_stats) / 8;
- if (!bp->bnapi)
- return;
+ if (!bp->bnapi) {
+ j += BNXT_NUM_STATS * bp->cp_nr_rings + BNXT_NUM_SW_FUNC_STATS;
+ goto skip_ring_stats;
+ }
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++)
bnxt_sw_func_stats[i].counter = 0;
@@ -395,6 +461,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++)
buf[j] = bnxt_sw_func_stats[i].counter;
+skip_ring_stats:
if (bp->flags & BNXT_FLAG_PORT_STATS) {
__le64 *port_stats = (__le64 *)bp->hw_rx_port_stats;
@@ -415,6 +482,32 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
buf[j] = le64_to_cpu(*(tx_port_stats_ext +
bnxt_tx_port_stats_ext_arr[i].offset));
}
+ if (bp->pri2cos_valid) {
+ for (i = 0; i < 8; i++, j++) {
+ long n = bnxt_rx_bytes_pri_arr[i].base_off +
+ bp->pri2cos[i];
+
+ buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
+ }
+ for (i = 0; i < 8; i++, j++) {
+ long n = bnxt_rx_pkts_pri_arr[i].base_off +
+ bp->pri2cos[i];
+
+ buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
+ }
+ for (i = 0; i < 8; i++, j++) {
+ long n = bnxt_tx_bytes_pri_arr[i].base_off +
+ bp->pri2cos[i];
+
+ buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
+ }
+ for (i = 0; i < 8; i++, j++) {
+ long n = bnxt_tx_pkts_pri_arr[i].base_off +
+ bp->pri2cos[i];
+
+ buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
+ }
+ }
}
}
@@ -493,6 +586,28 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
bnxt_tx_port_stats_ext_arr[i].string);
buf += ETH_GSTRING_LEN;
}
+ if (bp->pri2cos_valid) {
+ for (i = 0; i < 8; i++) {
+ strcpy(buf,
+ bnxt_rx_bytes_pri_arr[i].string);
+ buf += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < 8; i++) {
+ strcpy(buf,
+ bnxt_rx_pkts_pri_arr[i].string);
+ buf += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < 8; i++) {
+ strcpy(buf,
+ bnxt_tx_bytes_pri_arr[i].string);
+ buf += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < 8; i++) {
+ strcpy(buf,
+ bnxt_tx_pkts_pri_arr[i].string);
+ buf += ETH_GSTRING_LEN;
+ }
+ }
}
break;
case ETH_SS_TEST:
@@ -663,8 +778,6 @@ static int bnxt_set_channels(struct net_device *dev,
bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
bp->tx_nr_rings + bp->rx_nr_rings;
- bp->num_stat_ctxs = bp->cp_nr_rings;
-
/* After changing number of rx channels, update NTUPLE feature. */
netdev_update_features(dev);
if (netif_running(dev)) {
@@ -1526,14 +1639,22 @@ static int bnxt_flash_nvram(struct net_device *dev,
rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
+ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
+ netdev_info(dev,
+ "PF does not have admin privileges to flash the device\n");
+ rc = -EACCES;
+ } else if (rc) {
+ rc = -EIO;
+ }
return rc;
}
static int bnxt_firmware_reset(struct net_device *dev,
u16 dir_type)
{
- struct bnxt *bp = netdev_priv(dev);
struct hwrm_fw_reset_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
@@ -1573,7 +1694,15 @@ static int bnxt_firmware_reset(struct net_device *dev,
return -EINVAL;
}
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
+ netdev_info(dev,
+ "PF does not have admin privileges to reset the device\n");
+ rc = -EACCES;
+ } else if (rc) {
+ rc = -EIO;
+ }
+ return rc;
}
static int bnxt_flash_firmware(struct net_device *dev,
@@ -1780,9 +1909,9 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_nvm_install_update_input install = {0};
const struct firmware *fw;
+ int rc, hwrm_err = 0;
u32 item_len;
u16 index;
- int rc;
bnxt_hwrm_fw_set_time(bp);
@@ -1825,15 +1954,16 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
memcpy(kmem, fw->data, fw->size);
modify.host_src_addr = cpu_to_le64(dma_handle);
- rc = hwrm_send_message(bp, &modify, sizeof(modify),
- FLASH_PACKAGE_TIMEOUT);
+ hwrm_err = hwrm_send_message(bp, &modify,
+ sizeof(modify),
+ FLASH_PACKAGE_TIMEOUT);
dma_free_coherent(&bp->pdev->dev, fw->size, kmem,
dma_handle);
}
}
release_firmware(fw);
- if (rc)
- return rc;
+ if (rc || hwrm_err)
+ goto err_exit;
if ((install_type & 0xffff) == 0)
install_type >>= 16;
@@ -1841,12 +1971,10 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
install.install_type = cpu_to_le32(install_type);
mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &install, sizeof(install),
- INSTALL_PACKAGE_TIMEOUT);
- if (rc) {
- rc = -EOPNOTSUPP;
+ hwrm_err = _hwrm_send_message(bp, &install, sizeof(install),
+ INSTALL_PACKAGE_TIMEOUT);
+ if (hwrm_err)
goto flash_pkg_exit;
- }
if (resp->error_code) {
u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
@@ -1854,12 +1982,11 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
install.flags |= cpu_to_le16(
NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
- rc = _hwrm_send_message(bp, &install, sizeof(install),
- INSTALL_PACKAGE_TIMEOUT);
- if (rc) {
- rc = -EOPNOTSUPP;
+ hwrm_err = _hwrm_send_message(bp, &install,
+ sizeof(install),
+ INSTALL_PACKAGE_TIMEOUT);
+ if (hwrm_err)
goto flash_pkg_exit;
- }
}
}
@@ -1870,6 +1997,14 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
}
flash_pkg_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
+err_exit:
+ if (hwrm_err == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
+ netdev_info(dev,
+ "PF does not have admin privileges to flash the device\n");
+ rc = -EACCES;
+ } else if (hwrm_err) {
+ rc = -EOPNOTSUPP;
+ }
return rc;
}
@@ -2450,17 +2585,37 @@ static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds)
+{
+ struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_port_phy_qcaps_input req = {0};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode);
+
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
struct hwrm_port_phy_cfg_input *req)
{
struct bnxt_link_info *link_info = &bp->link_info;
- u16 fw_advertising = link_info->advertising;
+ u16 fw_advertising;
u16 fw_speed;
int rc;
if (!link_info->autoneg)
return 0;
+ rc = bnxt_query_force_speeds(bp, &fw_advertising);
+ if (rc)
+ return rc;
+
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
if (netif_carrier_ok(bp->dev))
fw_speed = bp->link_info.link_speed;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 5dd086059568..f1aaac8e6268 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -194,6 +194,8 @@ struct cmd_nums {
#define HWRM_STAT_CTX_QUERY 0xb2UL
#define HWRM_STAT_CTX_CLR_STATS 0xb3UL
#define HWRM_PORT_QSTATS_EXT 0xb4UL
+ #define HWRM_PORT_PHY_MDIO_WRITE 0xb5UL
+ #define HWRM_PORT_PHY_MDIO_READ 0xb6UL
#define HWRM_FW_RESET 0xc0UL
#define HWRM_FW_QSTATUS 0xc1UL
#define HWRM_FW_HEALTH_CHECK 0xc2UL
@@ -213,6 +215,7 @@ struct cmd_nums {
#define HWRM_WOL_FILTER_FREE 0xf1UL
#define HWRM_WOL_FILTER_QCFG 0xf2UL
#define HWRM_WOL_REASON_QCFG 0xf3UL
+ #define HWRM_CFA_METER_QCAPS 0xf4UL
#define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL
#define HWRM_CFA_METER_PROFILE_FREE 0xf6UL
#define HWRM_CFA_METER_PROFILE_CFG 0xf7UL
@@ -239,6 +242,24 @@ struct cmd_nums {
#define HWRM_FW_IPC_MSG 0x110UL
#define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL
#define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL
+ #define HWRM_CFA_FLOW_AGING_TIMER_RESET 0x113UL
+ #define HWRM_CFA_FLOW_AGING_CFG 0x114UL
+ #define HWRM_CFA_FLOW_AGING_QCFG 0x115UL
+ #define HWRM_CFA_FLOW_AGING_QCAPS 0x116UL
+ #define HWRM_CFA_CTX_MEM_RGTR 0x117UL
+ #define HWRM_CFA_CTX_MEM_UNRGTR 0x118UL
+ #define HWRM_CFA_CTX_MEM_QCTX 0x119UL
+ #define HWRM_CFA_CTX_MEM_QCAPS 0x11aUL
+ #define HWRM_CFA_COUNTER_QCAPS 0x11bUL
+ #define HWRM_CFA_COUNTER_CFG 0x11cUL
+ #define HWRM_CFA_COUNTER_QCFG 0x11dUL
+ #define HWRM_CFA_COUNTER_QSTATS 0x11eUL
+ #define HWRM_CFA_TCP_FLAG_PROCESS_QCFG 0x11fUL
+ #define HWRM_CFA_EEM_QCAPS 0x120UL
+ #define HWRM_CFA_EEM_CFG 0x121UL
+ #define HWRM_CFA_EEM_QCFG 0x122UL
+ #define HWRM_CFA_EEM_OP 0x123UL
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL
#define HWRM_ENGINE_CKV_HELLO 0x12dUL
#define HWRM_ENGINE_CKV_STATUS 0x12eUL
#define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
@@ -335,6 +356,8 @@ struct ret_codes {
#define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
#define HWRM_ERR_CODE_NO_BUFFER 0x8UL
#define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
+ #define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL
+ #define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL
#define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
#define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
#define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
@@ -363,8 +386,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 0
-#define HWRM_VERSION_RSVD 3
-#define HWRM_VERSION_STR "1.10.0.3"
+#define HWRM_VERSION_RSVD 33
+#define HWRM_VERSION_STR "1.10.0.33"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -411,6 +434,10 @@ struct hwrm_ver_get_output {
#define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL
#define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL
#define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_AGING_SUPPORTED 0x200UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED 0x400UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
u8 roce_fw_maj_8b;
u8 roce_fw_min_8b;
u8 roce_fw_bld_8b;
@@ -465,14 +492,27 @@ struct hwrm_ver_get_output {
/* eject_cmpl (size:128b/16B) */
struct eject_cmpl {
__le16 type;
- #define EJECT_CMPL_TYPE_MASK 0x3fUL
- #define EJECT_CMPL_TYPE_SFT 0
- #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
- #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
+ #define EJECT_CMPL_TYPE_MASK 0x3fUL
+ #define EJECT_CMPL_TYPE_SFT 0
+ #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
+ #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
+ #define EJECT_CMPL_FLAGS_MASK 0xffc0UL
+ #define EJECT_CMPL_FLAGS_SFT 6
+ #define EJECT_CMPL_FLAGS_ERROR 0x40UL
__le16 len;
__le32 opaque;
- __le32 v;
- #define EJECT_CMPL_V 0x1UL
+ __le16 v;
+ #define EJECT_CMPL_V 0x1UL
+ #define EJECT_CMPL_ERRORS_MASK 0xfffeUL
+ #define EJECT_CMPL_ERRORS_SFT 1
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_MASK 0xeUL
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH (0x5UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_LAST EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH
+ __le16 reserved16;
__le32 unused_2;
};
@@ -552,6 +592,10 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
#define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
#define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
__le32 event_data2;
@@ -647,6 +691,39 @@ struct hwrm_async_event_cmpl_link_speed_cfg_change {
#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
};
+/* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */
+struct hwrm_async_event_cmpl_reset_notify {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_SFT 0
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_STOP_TX_QUEUE 0x1UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN 0x2UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_SFT 8
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16
+};
+
/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
struct hwrm_async_event_cmpl_vf_cfg_change {
__le16 type;
@@ -672,6 +749,74 @@ struct hwrm_async_event_cmpl_vf_cfg_change {
#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL
};
+/* hwrm_async_event_cmpl_hw_flow_aged (size:128b/16B) */
+struct hwrm_async_event_cmpl_hw_flow_aged {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED 0x36UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_MASK 0x7fffffffUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION 0x80000000UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_RX (0x0UL << 31)
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX (0x1UL << 31)
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX
+};
+
+/* hwrm_async_event_cmpl_eem_cache_flush_req (size:128b/16B) */
+struct hwrm_async_event_cmpl_eem_cache_flush_req {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_V 0x1UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_eem_cache_flush_done (size:128b/16B) */
+struct hwrm_async_event_cmpl_eem_cache_flush_done {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_SFT 0
+};
+
/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
__le16 req_type;
@@ -867,6 +1012,8 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL
#define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL
#define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -902,7 +1049,7 @@ struct hwrm_func_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_func_qcfg_output (size:640b/80B) */
+/* hwrm_func_qcfg_output (size:704b/88B) */
struct hwrm_func_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -919,6 +1066,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
#define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
#define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
+ #define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1000,7 +1148,11 @@ struct hwrm_func_qcfg_output {
__le16 alloc_sp_tx_rings;
__le16 alloc_stat_ctx;
__le16 alloc_msix;
- u8 unused_2[5];
+ __le16 registered_vfs;
+ u8 unused_1[3];
+ u8 always_1;
+ __le32 reset_addr_poll;
+ u8 unused_2[3];
u8 valid;
};
@@ -1031,6 +1183,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL
#define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
#define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL
+ #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -1235,6 +1388,7 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
#define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
#define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -1888,7 +2042,8 @@ struct hwrm_func_drv_if_change_output {
__le16 seq_id;
__le16 resp_len;
__le32 flags;
- #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE 0x2UL
u8 unused_0[3];
u8 valid;
};
@@ -2864,6 +3019,60 @@ struct hwrm_port_phy_i2c_read_output {
u8 valid;
};
+/* hwrm_port_phy_mdio_write_input (size:320b/40B) */
+struct hwrm_port_phy_mdio_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 unused_0[2];
+ __le16 port_id;
+ u8 phy_addr;
+ u8 dev_addr;
+ __le16 reg_addr;
+ __le16 reg_data;
+ u8 cl45_mdio;
+ u8 unused_1[7];
+};
+
+/* hwrm_port_phy_mdio_write_output (size:128b/16B) */
+struct hwrm_port_phy_mdio_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_mdio_read_input (size:256b/32B) */
+struct hwrm_port_phy_mdio_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 unused_0[2];
+ __le16 port_id;
+ u8 phy_addr;
+ u8 dev_addr;
+ __le16 reg_addr;
+ u8 cl45_mdio;
+ u8 unused_1;
+};
+
+/* hwrm_port_phy_mdio_read_output (size:128b/16B) */
+struct hwrm_port_phy_mdio_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 reg_data;
+ u8 unused_0[5];
+ u8 valid;
+};
+
/* hwrm_port_led_cfg_input (size:512b/64B) */
struct hwrm_port_led_cfg_input {
__le16 req_type;
@@ -4869,6 +5078,10 @@ struct hwrm_ring_grp_free_output {
u8 unused_0[7];
u8 valid;
};
+#define DEFAULT_FLOW_ID 0xFFFFFFFFUL
+#define ROCEV1_FLOW_ID 0xFFFFFFFEUL
+#define ROCEV2_FLOW_ID 0xFFFFFFFDUL
+#define ROCEV2_CNP_FLOW_ID 0xFFFFFFFCUL
/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */
struct hwrm_cfa_l2_filter_alloc_input {
@@ -4937,20 +5150,21 @@ struct hwrm_cfa_l2_filter_alloc_input {
u8 unused_3;
__le32 src_id;
u8 tunnel_type;
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 unused_4;
__le16 dst_id;
__le16 mirror_vnic_id;
@@ -5108,20 +5322,21 @@ struct hwrm_cfa_tunnel_filter_alloc_input {
u8 l3_addr_type;
u8 t_l3_addr_type;
u8 tunnel_type;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 tunnel_flags;
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL
@@ -5326,20 +5541,21 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
__le16 dst_id;
__le16 mirror_vnic_id;
u8 tunnel_type;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 pri_hint;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
@@ -5459,20 +5675,21 @@ struct hwrm_cfa_decap_filter_alloc_input {
#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
__be32 tunnel_id;
u8 tunnel_type;
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 unused_0;
__le16 unused_1;
u8 src_macaddr[6];
@@ -5559,20 +5776,23 @@ struct hwrm_cfa_flow_alloc_input {
#define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x40UL
#define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x80UL
#define CFA_FLOW_ALLOC_REQ_FLAGS_MATCH_VXLAN_IP_VNI 0x100UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_VHOST_ID_USE_VLAN 0x200UL
__le16 src_fid;
__le32 tunnel_handle;
__le16 action_flags;
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FLOW_AGING_ENABLED 0x800UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_PRI_HINT 0x1000UL
__le16 dst_fid;
__be16 l2_rewrite_vlan_tpid;
__be16 l2_rewrite_vlan_tci;
@@ -5597,20 +5817,21 @@ struct hwrm_cfa_flow_alloc_input {
__be16 l2_rewrite_smac[3];
u8 ip_proto;
u8 tunnel_type;
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
};
/* hwrm_cfa_flow_alloc_output (size:256b/32B) */
@@ -5623,7 +5844,8 @@ struct hwrm_cfa_flow_alloc_output {
u8 unused_0[2];
__le32 flow_id;
__le64 ext_flow_handle;
- u8 unused_1[7];
+ __le32 flow_counter_id;
+ u8 unused_1[3];
u8 valid;
};
@@ -5651,6 +5873,46 @@ struct hwrm_cfa_flow_free_output {
u8 valid;
};
+/* hwrm_cfa_flow_info_input (size:256b/32B) */
+struct hwrm_cfa_flow_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flow_handle;
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_SFT 0
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL
+ u8 unused_0[6];
+ __le64 ext_flow_handle;
+};
+
+/* hwrm_cfa_flow_info_output (size:448b/56B) */
+struct hwrm_cfa_flow_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ u8 profile;
+ __le16 src_fid;
+ __le16 dst_fid;
+ __le16 l2_ctxt_id;
+ __le64 em_info;
+ __le64 tcam_info;
+ __le64 vfp_tcam_info;
+ __le16 ar_id;
+ __le16 flow_handle;
+ __le32 tunnel_handle;
+ __le16 flow_timer;
+ u8 unused_0[5];
+ u8 valid;
+};
+
/* hwrm_cfa_flow_stats_input (size:640b/80B) */
struct hwrm_cfa_flow_stats_input {
__le16 req_type;
@@ -5757,6 +6019,128 @@ struct hwrm_cfa_vfr_free_output {
u8 valid;
};
+/* hwrm_cfa_eem_qcaps_input (size:192b/24B) */
+struct hwrm_cfa_eem_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_QCAPS_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
+ __le32 unused_0;
+};
+
+/* hwrm_cfa_eem_qcaps_output (size:256b/32B) */
+struct hwrm_cfa_eem_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_RX 0x2UL
+ __le32 unused_0;
+ __le32 supported;
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY0_TABLE 0x1UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL
+ __le32 max_entries_supported;
+ __le16 key_entry_size;
+ __le16 record_entry_size;
+ __le16 efc_entry_size;
+ u8 unused_1;
+ u8 valid;
+};
+
+/* hwrm_cfa_eem_cfg_input (size:320b/40B) */
+struct hwrm_cfa_eem_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_CFG_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_CFG_REQ_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_CFG_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
+ __le32 unused_0;
+ __le32 num_entries;
+ __le32 unused_1;
+ __le16 key0_ctx_id;
+ __le16 key1_ctx_id;
+ __le16 record_ctx_id;
+ __le16 efc_ctx_id;
+};
+
+/* hwrm_cfa_eem_cfg_output (size:128b/16B) */
+struct hwrm_cfa_eem_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_eem_qcfg_input (size:192b/24B) */
+struct hwrm_cfa_eem_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_QCFG_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCFG_REQ_FLAGS_PATH_RX 0x2UL
+ __le32 unused_0;
+};
+
+/* hwrm_cfa_eem_qcfg_output (size:128b/16B) */
+struct hwrm_cfa_eem_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define CFA_EEM_QCFG_RESP_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL
+ __le32 num_entries;
+};
+
+/* hwrm_cfa_eem_op_input (size:192b/24B) */
+struct hwrm_cfa_eem_op_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_OP_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_OP_REQ_FLAGS_PATH_RX 0x2UL
+ __le16 unused_0;
+ __le16 op;
+ #define CFA_EEM_OP_REQ_OP_RESERVED 0x0UL
+ #define CFA_EEM_OP_REQ_OP_EEM_DISABLE 0x1UL
+ #define CFA_EEM_OP_REQ_OP_EEM_ENABLE 0x2UL
+ #define CFA_EEM_OP_REQ_OP_EEM_CLEANUP 0x3UL
+ #define CFA_EEM_OP_REQ_OP_LAST CFA_EEM_OP_REQ_OP_EEM_CLEANUP
+};
+
+/* hwrm_cfa_eem_op_output (size:128b/16B) */
+struct hwrm_cfa_eem_op_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_query_input {
__le16 req_type;
@@ -5765,12 +6149,13 @@ struct hwrm_tunnel_dst_port_query_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6
u8 unused_0[7];
};
@@ -5794,12 +6179,13 @@ struct hwrm_tunnel_dst_port_alloc_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6
u8 unused_0;
__be16 tunnel_dst_port_val;
u8 unused_1[4];
@@ -5824,12 +6210,13 @@ struct hwrm_tunnel_dst_port_free_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6
u8 unused_0;
__le16 tunnel_dst_port_id;
u8 unused_1[4];
@@ -6040,7 +6427,9 @@ struct hwrm_fw_reset_input {
#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
#define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE
u8 host_idx;
- u8 unused_0[5];
+ u8 flags;
+ #define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL
+ u8 unused_0[4];
};
/* hwrm_fw_reset_output (size:128b/16B) */
@@ -6137,6 +6526,7 @@ struct hwrm_struct_hdr {
#define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
#define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
#define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
+ #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL
#define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
#define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
#define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 3962f6fd543c..d80f5c981d90 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -448,16 +448,22 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
struct bnxt_pf_info *pf = &bp->pf;
int i, rc = 0, min = 1;
+ u16 vf_msix = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
- vf_cp_rings = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
- vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp);
+ vf_ring_grps = 0;
+ } else {
+ vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
+ }
+ vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp);
+ vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
else
vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
- vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
@@ -476,7 +482,8 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
req.min_l2_ctxs = cpu_to_le16(min);
req.min_vnics = cpu_to_le16(min);
req.min_stat_ctx = cpu_to_le16(min);
- req.min_hw_ring_grps = cpu_to_le16(min);
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ req.min_hw_ring_grps = cpu_to_le16(min);
} else {
vf_cp_rings /= num_vfs;
vf_tx_rings /= num_vfs;
@@ -500,6 +507,8 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
req.max_vnics = cpu_to_le16(vf_vnics);
req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ req.max_msix = cpu_to_le16(vf_msix / num_vfs);
mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < num_vfs; i++) {
@@ -525,6 +534,8 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
hw_resc->max_rsscos_ctxs -= pf->active_vfs;
hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ hw_resc->max_irqs -= vf_msix * n;
rc = pf->active_vfs;
}
@@ -539,19 +550,16 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
u32 rc = 0, mtu, i;
u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- u16 vf_ring_grps, max_stat_ctxs;
struct hwrm_func_cfg_input req = {0};
struct bnxt_pf_info *pf = &bp->pf;
int total_vf_tx_rings = 0;
+ u16 vf_ring_grps;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
- max_stat_ctxs = hw_resc->max_stat_ctxs;
-
/* Remaining rings are distributed equally amongs VF's for now */
- vf_cp_rings = (bnxt_get_max_func_cp_rings_for_en(bp) -
- bp->cp_nr_rings) / num_vfs;
- vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
+ vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs;
+ vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
num_vfs;
@@ -644,8 +652,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
*/
vfs_supported = *num_vfs;
- avail_cp = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
- avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
+ avail_cp = bnxt_get_avail_cp_rings_for_en(bp);
+ avail_stat = bnxt_get_avail_stat_ctxs_for_en(bp);
avail_cp = min_t(int, avail_cp, avail_stat);
while (vfs_supported) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 749f63beddd8..c683b5e96b1d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -337,18 +337,21 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
}
-static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
+static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
+ struct bnxt_tc_flow_node *flow_node)
{
struct hwrm_cfa_flow_free_input req = { 0 };
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
- req.flow_handle = flow_handle;
+ if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
+ req.ext_flow_handle = flow_node->ext_flow_handle;
+ else
+ req.flow_handle = flow_node->flow_handle;
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
- netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
- __func__, flow_handle, rc);
+ netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
if (rc)
rc = -EIO;
@@ -418,13 +421,14 @@ static bool bits_set(void *key, int len)
static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
__le16 ref_flow_handle,
- __le32 tunnel_handle, __le16 *flow_handle)
+ __le32 tunnel_handle,
+ struct bnxt_tc_flow_node *flow_node)
{
- struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
struct bnxt_tc_actions *actions = &flow->actions;
struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
struct hwrm_cfa_flow_alloc_input req = { 0 };
+ struct hwrm_cfa_flow_alloc_output *resp;
u16 flow_flags = 0, action_flags = 0;
int rc;
@@ -527,8 +531,23 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (!rc)
- *flow_handle = resp->flow_handle;
+ if (!rc) {
+ resp = bnxt_get_hwrm_resp_addr(bp, &req);
+ /* CFA_FLOW_ALLOC response interpretation:
+ * fw with fw with
+ * 16-bit 64-bit
+ * flow handle flow handle
+ * =========== ===========
+ * flow_handle flow handle flow context id
+ * ext_flow_handle INVALID flow handle
+ * flow_id INVALID flow counter id
+ */
+ flow_node->flow_handle = resp->flow_handle;
+ if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) {
+ flow_node->ext_flow_handle = resp->ext_flow_handle;
+ flow_node->flow_id = resp->flow_id;
+ }
+ }
mutex_unlock(&bp->hwrm_cmd_lock);
if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
@@ -544,9 +563,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
__le32 ref_decap_handle,
__le32 *decap_filter_handle)
{
- struct hwrm_cfa_decap_filter_alloc_output *resp =
- bp->hwrm_cmd_resp_addr;
struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
+ struct hwrm_cfa_decap_filter_alloc_output *resp;
struct ip_tunnel_key *tun_key = &flow->tun_key;
u32 enables = 0;
int rc;
@@ -599,10 +617,12 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (!rc)
+ if (!rc) {
+ resp = bnxt_get_hwrm_resp_addr(bp, &req);
*decap_filter_handle = resp->decap_filter_id;
- else
+ } else {
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
+ }
mutex_unlock(&bp->hwrm_cmd_lock);
if (rc)
@@ -633,9 +653,8 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
struct bnxt_tc_l2_key *l2_info,
__le32 *encap_record_handle)
{
- struct hwrm_cfa_encap_record_alloc_output *resp =
- bp->hwrm_cmd_resp_addr;
struct hwrm_cfa_encap_record_alloc_input req = { 0 };
+ struct hwrm_cfa_encap_record_alloc_output *resp;
struct hwrm_cfa_encap_data_vxlan *encap =
(struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
@@ -667,10 +686,12 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (!rc)
+ if (!rc) {
+ resp = bnxt_get_hwrm_resp_addr(bp, &req);
*encap_record_handle = resp->encap_record_id;
- else
+ } else {
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
+ }
mutex_unlock(&bp->hwrm_cmd_lock);
if (rc)
@@ -1224,7 +1245,7 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
int rc;
/* send HWRM cmd to free the flow-id */
- bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle);
+ bnxt_hwrm_cfa_flow_free(bp, flow_node);
mutex_lock(&tc_info->lock);
@@ -1246,6 +1267,12 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
return 0;
}
+static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow,
+ u16 src_fid)
+{
+ flow->dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX;
+}
+
static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
u16 src_fid)
{
@@ -1293,6 +1320,9 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
bnxt_tc_set_src_fid(bp, flow, src_fid);
+ if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
+ bnxt_tc_set_flow_dir(bp, flow, src_fid);
+
if (!bnxt_tc_can_offload(bp, flow)) {
rc = -ENOSPC;
goto free_node;
@@ -1320,7 +1350,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
/* send HWRM cmd to alloc the flow */
rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
- tunnel_handle, &new_node->flow_handle);
+ tunnel_handle, new_node);
if (rc)
goto put_tunnel;
@@ -1336,7 +1366,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
return 0;
hwrm_flow_free:
- bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle);
+ bnxt_hwrm_cfa_flow_free(bp, new_node);
put_tunnel:
bnxt_tc_put_tunnel_handle(bp, flow, new_node);
put_l2:
@@ -1397,13 +1427,40 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp,
return 0;
}
+static void bnxt_fill_cfa_stats_req(struct bnxt *bp,
+ struct bnxt_tc_flow_node *flow_node,
+ __le16 *flow_handle, __le32 *flow_id)
+{
+ u16 handle;
+
+ if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) {
+ *flow_id = flow_node->flow_id;
+
+ /* If flow_id is used to fetch flow stats then:
+ * 1. lower 12 bits of flow_handle must be set to all 1s.
+ * 2. 15th bit of flow_handle must specify the flow
+ * direction (TX/RX).
+ */
+ if (flow_node->flow.dir == BNXT_DIR_RX)
+ handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX |
+ CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
+ else
+ handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
+
+ *flow_handle = cpu_to_le16(handle);
+ } else {
+ *flow_handle = flow_node->flow_handle;
+ }
+}
+
static int
bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
struct bnxt_tc_stats_batch stats_batch[])
{
- struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_cfa_flow_stats_input req = { 0 };
+ struct hwrm_cfa_flow_stats_output *resp;
__le16 *req_flow_handles = &req.flow_handle_0;
+ __le32 *req_flow_ids = &req.flow_id_0;
int rc, i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
@@ -1411,14 +1468,19 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
for (i = 0; i < num_flows; i++) {
struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
- req_flow_handles[i] = flow_node->flow_handle;
+ bnxt_fill_cfa_stats_req(bp, flow_node,
+ &req_flow_handles[i], &req_flow_ids[i]);
}
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
- __le64 *resp_packets = &resp->packet_0;
- __le64 *resp_bytes = &resp->byte_0;
+ __le64 *resp_packets;
+ __le64 *resp_bytes;
+
+ resp = bnxt_get_hwrm_resp_addr(bp, &req);
+ resp_packets = &resp->packet_0;
+ resp_bytes = &resp->byte_0;
for (i = 0; i < num_flows; i++) {
stats_batch[i].hw_stats.packets =
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
index 97e09a880693..8a0968967bc5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
@@ -98,6 +98,9 @@ struct bnxt_tc_flow {
/* flow applicable to pkts ingressing on this fid */
u16 src_fid;
+ u8 dir;
+#define BNXT_DIR_RX 1
+#define BNXT_DIR_TX 0
struct bnxt_tc_l2_key l2_key;
struct bnxt_tc_l2_key l2_mask;
struct bnxt_tc_l3_key l3_key;
@@ -170,7 +173,9 @@ struct bnxt_tc_flow_node {
struct bnxt_tc_flow flow;
+ __le64 ext_flow_handle;
__le16 flow_handle;
+ __le32 flow_id;
/* L2 node in l2 hashtable that shares flow's l2 key */
struct bnxt_tc_l2_node *l2_node;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 0a3097baafde..ea45a9b8179e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -48,10 +48,8 @@ static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
- bp->num_stat_ctxs == max_stat_ctxs)
+ bp->cp_nr_rings == max_stat_ctxs)
return -ENOMEM;
- bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs -
- BNXT_MIN_ROCE_STAT_CTXS);
}
atomic_set(&ulp->ref_count, 0);
@@ -82,14 +80,9 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
netdev_err(bp->dev, "ulp id %d not registered\n", ulp_id);
return -EINVAL;
}
- if (ulp_id == BNXT_ROCE_ULP) {
- unsigned int max_stat_ctxs;
+ if (ulp_id == BNXT_ROCE_ULP && ulp->msix_requested)
+ edev->en_ops->bnxt_free_msix(edev, ulp_id);
- max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
- bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1);
- if (ulp->msix_requested)
- edev->en_ops->bnxt_free_msix(edev, ulp_id);
- }
if (ulp->max_async_event_id)
bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
@@ -218,6 +211,14 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp)
return 0;
}
+int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
+{
+ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
+ return BNXT_MIN_ROCE_STAT_CTXS;
+
+ return 0;
+}
+
static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
struct bnxt_fw_msg *fw_msg)
{
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index d9bea37cd211..cd78453d0bf0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -90,6 +90,7 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
int bnxt_get_ulp_msix_num(struct bnxt *bp);
int bnxt_get_ulp_msix_base(struct bnxt *bp);
+int bnxt_get_ulp_stat_ctxs(struct bnxt *bp);
void bnxt_ulp_stop(struct bnxt *bp);
void bnxt_ulp_start(struct bnxt *bp);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index bf6de02be396..0184ef6f05a7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -199,7 +199,6 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
bp->tx_nr_rings_xdp = tx_xdp;
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
- bp->num_stat_ctxs = bp->cp_nr_rings;
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index d83233ae4a15..510dfc1c236b 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -5731,7 +5731,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
if (realdev) {
dev = cnic_from_netdev(realdev);
if (dev) {
- vid |= VLAN_TAG_PRESENT;
+ vid |= VLAN_CFI_MASK; /* make non-zero */
cnic_rcv_netevent(dev->cnic_priv, event, vid);
cnic_put(dev);
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 2d6f090bf644..983245c0867c 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1169,7 +1169,7 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
break;
}
- return 0;
+ return ret;
}
static void bcmgenet_power_up(struct bcmgenet_priv *priv,
@@ -3612,36 +3612,6 @@ static int bcmgenet_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int bcmgenet_suspend(struct device *d)
-{
- struct net_device *dev = dev_get_drvdata(d);
- struct bcmgenet_priv *priv = netdev_priv(dev);
- int ret = 0;
-
- if (!netif_running(dev))
- return 0;
-
- netif_device_detach(dev);
-
- bcmgenet_netif_stop(dev);
-
- if (!device_may_wakeup(d))
- phy_suspend(dev->phydev);
-
- /* Prepare the device for Wake-on-LAN and switch to the slow clock */
- if (device_may_wakeup(d) && priv->wolopts) {
- ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
- clk_prepare_enable(priv->clk_wol);
- } else if (priv->internal_phy) {
- ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
- }
-
- /* Turn off the clocks */
- clk_disable_unprepare(priv->clk);
-
- return ret;
-}
-
static int bcmgenet_resume(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
@@ -3719,6 +3689,39 @@ out_clk_disable:
clk_disable_unprepare(priv->clk);
return ret;
}
+
+static int bcmgenet_suspend(struct device *d)
+{
+ struct net_device *dev = dev_get_drvdata(d);
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (!netif_running(dev))
+ return 0;
+
+ netif_device_detach(dev);
+
+ bcmgenet_netif_stop(dev);
+
+ if (!device_may_wakeup(d))
+ phy_suspend(dev->phydev);
+
+ /* Prepare the device for Wake-on-LAN and switch to the slow clock */
+ if (device_may_wakeup(d) && priv->wolopts) {
+ ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
+ clk_prepare_enable(priv->clk_wol);
+ } else if (priv->internal_phy) {
+ ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
+ }
+
+ /* Turn off the clocks */
+ clk_disable_unprepare(priv->clk);
+
+ if (ret)
+ bcmgenet_resume(d);
+
+ return ret;
+}
#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 2fbd027f0148..57582efa362d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -186,6 +186,8 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
}
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
+ if (!(reg & MPD_EN))
+ return; /* already powered up so skip the rest */
reg &= ~MPD_EN;
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index a6cbaca37e94..aceb9b7b55bd 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -226,7 +226,8 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
* capabilities, use that knowledge to also configure the
* Reverse MII interface correctly.
*/
- if (dev->phydev->supported & PHY_1000BT_FEATURES)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ dev->phydev->supported))
port_ctrl = PORT_MODE_EXT_RVMII_50;
else
port_ctrl = PORT_MODE_EXT_RVMII_25;
@@ -317,7 +318,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
return ret;
}
- phydev->advertising = phydev->supported;
+ linkmode_copy(phydev->advertising, phydev->supported);
/* The internal PHY has its link interrupts routed to the
* Ethernet MAC ISRs. On GENETv5 there is a hardware issue
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 432c3b867084..3b1397af81f7 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -66,11 +66,6 @@
#include <uapi/linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
-#ifdef CONFIG_SPARC
-#include <asm/idprom.h>
-#include <asm/prom.h>
-#endif
-
#define BAR_0 0
#define BAR_2 2
@@ -2157,7 +2152,8 @@ static void tg3_phy_start(struct tg3 *tp)
phydev->speed = tp->link_config.speed;
phydev->duplex = tp->link_config.duplex;
phydev->autoneg = tp->link_config.autoneg;
- phydev->advertising = tp->link_config.advertising;
+ ethtool_convert_legacy_u32_to_link_mode(
+ phydev->advertising, tp->link_config.advertising);
}
phy_start(phydev);
@@ -4057,8 +4053,9 @@ static int tg3_power_down_prepare(struct tg3 *tp)
do_low_power = false;
if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
struct phy_device *phydev;
- u32 phyid, advertising;
+ u32 phyid;
phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
@@ -4067,25 +4064,33 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tp->link_config.speed = phydev->speed;
tp->link_config.duplex = phydev->duplex;
tp->link_config.autoneg = phydev->autoneg;
- tp->link_config.advertising = phydev->advertising;
-
- advertising = ADVERTISED_TP |
- ADVERTISED_Pause |
- ADVERTISED_Autoneg |
- ADVERTISED_10baseT_Half;
+ ethtool_convert_link_mode_to_legacy_u32(
+ &tp->link_config.advertising,
+ phydev->advertising);
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ advertising);
if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
- if (tg3_flag(tp, WOL_SPEED_100MB))
- advertising |=
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full |
- ADVERTISED_10baseT_Full;
- else
- advertising |= ADVERTISED_10baseT_Full;
+ if (tg3_flag(tp, WOL_SPEED_100MB)) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ advertising);
+ } else {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ advertising);
+ }
}
- phydev->advertising = advertising;
-
+ linkmode_copy(phydev->advertising, advertising);
phy_start_aneg(phydev);
phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
@@ -6135,10 +6140,16 @@ static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
}
/* tp->lock must be held */
-static u64 tg3_refclk_read(struct tg3 *tp)
+static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
{
- u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
- return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
+ u64 stamp;
+
+ ptp_read_system_prets(sts);
+ stamp = tr32(TG3_EAV_REF_CLCK_LSB);
+ ptp_read_system_postts(sts);
+ stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
+
+ return stamp;
}
/* tp->lock must be held */
@@ -6229,13 +6240,14 @@ static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
-static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
u64 ns;
struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
tg3_full_lock(tp, 0);
- ns = tg3_refclk_read(tp);
+ ns = tg3_refclk_read(tp, sts);
ns += tp->ptp_adjust;
tg3_full_unlock(tp);
@@ -6330,7 +6342,7 @@ static const struct ptp_clock_info tg3_ptp_caps = {
.pps = 0,
.adjfreq = tg3_ptp_adjfreq,
.adjtime = tg3_ptp_adjtime,
- .gettime64 = tg3_ptp_gettime,
+ .gettimex64 = tg3_ptp_gettimex,
.settime64 = tg3_ptp_settime,
.enable = tg3_ptp_enable,
};
@@ -16973,32 +16985,6 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
return err;
}
-#ifdef CONFIG_SPARC
-static int tg3_get_macaddr_sparc(struct tg3 *tp)
-{
- struct net_device *dev = tp->dev;
- struct pci_dev *pdev = tp->pdev;
- struct device_node *dp = pci_device_to_OF_node(pdev);
- const unsigned char *addr;
- int len;
-
- addr = of_get_property(dp, "local-mac-address", &len);
- if (addr && len == ETH_ALEN) {
- memcpy(dev->dev_addr, addr, ETH_ALEN);
- return 0;
- }
- return -ENODEV;
-}
-
-static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
-{
- struct net_device *dev = tp->dev;
-
- memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
- return 0;
-}
-#endif
-
static int tg3_get_device_address(struct tg3 *tp)
{
struct net_device *dev = tp->dev;
@@ -17006,10 +16992,8 @@ static int tg3_get_device_address(struct tg3 *tp)
int addr_ok = 0;
int err;
-#ifdef CONFIG_SPARC
- if (!tg3_get_macaddr_sparc(tp))
+ if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr))
return 0;
-#endif
if (tg3_flag(tp, IS_SSB_CORE)) {
err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
@@ -17071,13 +17055,8 @@ static int tg3_get_device_address(struct tg3 *tp)
}
}
- if (!is_valid_ether_addr(&dev->dev_addr[0])) {
-#ifdef CONFIG_SPARC
- if (!tg3_get_default_macaddr_sparc(tp))
- return 0;
-#endif
+ if (!is_valid_ether_addr(&dev->dev_addr[0]))
return -EINVAL;
- }
return 0;
}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 4c816e5a841f..b126926ef7f5 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -4091,7 +4091,7 @@ static int macb_probe(struct platform_device *pdev)
if (mac) {
ether_addr_copy(bp->dev->dev_addr, mac);
} else {
- err = of_get_nvmem_mac_address(np, bp->dev->dev_addr);
+ err = nvmem_get_mac_address(&pdev->dev, bp->dev->dev_addr);
if (err) {
if (err == -EPROBE_DEFER)
goto err_out_free_netdev;
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
index 6aeb1045c302..73632b843749 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -277,10 +277,6 @@ static int cavium_ptp_probe(struct pci_dev *pdev,
writeq(clock_comp, clock->reg_base + PTP_CLOCK_COMP);
clock->ptp_clock = ptp_clock_register(&clock->ptp_info, dev);
- if (!clock->ptp_clock) {
- err = -ENODEV;
- goto error_stop;
- }
if (IS_ERR(clock->ptp_clock)) {
err = PTR_ERR(clock->ptp_clock);
goto error_stop;
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 4b3aecf98f2a..5359c1021f42 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1080,8 +1080,11 @@ static int octeon_mgmt_open(struct net_device *netdev)
/* Set the mode of the interface, RGMII/MII. */
if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) {
union cvmx_agl_prtx_ctl agl_prtx_ctl;
- int rgmii_mode = (netdev->phydev->supported &
- (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;
+ int rgmii_mode =
+ (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ netdev->phydev->supported) |
+ linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ netdev->phydev->supported)) != 0;
agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index e2cdfa75673f..e8001e974411 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -24,7 +24,8 @@ config CHELSIO_T1
---help---
This driver supports Chelsio gigabit and 10-gigabit
Ethernet cards. More information about adapter features and
- performance tuning is in <file:Documentation/networking/cxgb.txt>.
+ performance tuning is in
+ <file:Documentation/networking/device_drivers/chelsio/cxgb.txt>.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index b16f4b3ef4c5..2d1ca920601e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -404,6 +404,7 @@ struct adapter_params {
bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */
u8 fw_caps_support; /* 32-bit Port Capabilities */
bool filter2_wr_support; /* FW support for FILTER2_WR */
+ unsigned int viid_smt_extn_support:1; /* FW returns vin and smt index */
/* MPS Buffer Group Map[per Port]. Bit i is set if buffer group i is
* used by the Port
@@ -592,6 +593,13 @@ struct port_info {
bool ptp_enable;
struct sched_table *sched_tbl;
u32 eth_flags;
+
+ /* viid and smt fields either returned by fw
+ * or decoded by parsing viid by driver.
+ */
+ u8 vin;
+ u8 vivld;
+ u8 smt_idx;
};
struct dentry;
@@ -1757,7 +1765,7 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int nexact, unsigned int rcaps, unsigned int wxcaps);
int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
- unsigned int *rss_size);
+ unsigned int *rss_size, u8 *vivld, u8 *vin);
int t4_free_vi(struct adapter *adap, unsigned int mbox,
unsigned int pf, unsigned int vf,
unsigned int viid);
@@ -1783,7 +1791,7 @@ int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
unsigned int viid, unsigned int naddr,
const u8 **addr, bool sleep_ok);
int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
- int idx, const u8 *addr, bool persist, bool add_smt);
+ int idx, const u8 *addr, bool persist, u8 *smt_idx);
int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
bool ucast, u64 vec, bool sleep_ok);
int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index cab492ec8f59..b0ff9fa183f4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -378,19 +378,7 @@ static int cim_qcfg_show(struct seq_file *seq, void *v)
QUEREMFLITS_G(p[2]) * 16);
return 0;
}
-
-static int cim_qcfg_open(struct inode *inode, struct file *file)
-{
- return single_open(file, cim_qcfg_show, inode->i_private);
-}
-
-static const struct file_operations cim_qcfg_fops = {
- .owner = THIS_MODULE,
- .open = cim_qcfg_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(cim_qcfg);
static int cimq_show(struct seq_file *seq, void *v, int idx)
{
@@ -860,8 +848,7 @@ static int tx_rate_show(struct seq_file *seq, void *v)
}
return 0;
}
-
-DEFINE_SIMPLE_DEBUGFS_FILE(tx_rate);
+DEFINE_SHOW_ATTRIBUTE(tx_rate);
static int cctrl_tbl_show(struct seq_file *seq, void *v)
{
@@ -893,8 +880,7 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v)
kfree(incr);
return 0;
}
-
-DEFINE_SIMPLE_DEBUGFS_FILE(cctrl_tbl);
+DEFINE_SHOW_ATTRIBUTE(cctrl_tbl);
/* Format a value in a unit that differs from the value's native unit by the
* given factor.
@@ -955,8 +941,7 @@ static int clk_show(struct seq_file *seq, void *v)
return 0;
}
-
-DEFINE_SIMPLE_DEBUGFS_FILE(clk);
+DEFINE_SHOW_ATTRIBUTE(clk);
/* Firmware Device Log dump. */
static const char * const devlog_level_strings[] = {
@@ -1990,22 +1975,10 @@ static int sensors_show(struct seq_file *seq, void *v)
return 0;
}
-
-DEFINE_SIMPLE_DEBUGFS_FILE(sensors);
+DEFINE_SHOW_ATTRIBUTE(sensors);
#if IS_ENABLED(CONFIG_IPV6)
-static int clip_tbl_open(struct inode *inode, struct file *file)
-{
- return single_open(file, clip_tbl_show, inode->i_private);
-}
-
-static const struct file_operations clip_tbl_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = clip_tbl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release
-};
+DEFINE_SHOW_ATTRIBUTE(clip_tbl);
#endif
/*RSS Table.
@@ -2208,8 +2181,7 @@ static int rss_config_show(struct seq_file *seq, void *v)
return 0;
}
-
-DEFINE_SIMPLE_DEBUGFS_FILE(rss_config);
+DEFINE_SHOW_ATTRIBUTE(rss_config);
/* RSS Secret Key.
*/
@@ -2628,19 +2600,7 @@ static int resources_show(struct seq_file *seq, void *v)
return 0;
}
-
-static int resources_open(struct inode *inode, struct file *file)
-{
- return single_open(file, resources_show, inode->i_private);
-}
-
-static const struct file_operations resources_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = resources_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SHOW_ATTRIBUTE(resources);
/**
* ethqset2pinfo - return port_info of an Ethernet Queue Set
@@ -3233,8 +3193,7 @@ static int tid_info_show(struct seq_file *seq, void *v)
t4_read_reg(adap, LE_DB_ACT_CNT_IPV6_A));
return 0;
}
-
-DEFINE_SIMPLE_DEBUGFS_FILE(tid_info);
+DEFINE_SHOW_ATTRIBUTE(tid_info);
static void add_debugfs_mem(struct adapter *adap, const char *name,
unsigned int idx, unsigned int size_mb)
@@ -3364,21 +3323,9 @@ static int meminfo_show(struct seq_file *seq, void *v)
return 0;
}
+DEFINE_SHOW_ATTRIBUTE(meminfo);
-static int meminfo_open(struct inode *inode, struct file *file)
-{
- return single_open(file, meminfo_show, inode->i_private);
-}
-
-static const struct file_operations meminfo_fops = {
- .owner = THIS_MODULE,
- .open = meminfo_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int chcr_show(struct seq_file *seq, void *v)
+static int chcr_stats_show(struct seq_file *seq, void *v)
{
struct adapter *adap = seq->private;
@@ -3399,20 +3346,7 @@ static int chcr_show(struct seq_file *seq, void *v)
atomic_read(&adap->chcr_stats.ipsec_cnt));
return 0;
}
-
-
-static int chcr_stats_open(struct inode *inode, struct file *file)
-{
- return single_open(file, chcr_show, inode->i_private);
-}
-
-static const struct file_operations chcr_stats_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = chcr_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(chcr_stats);
#define PRINT_ADAP_STATS(string, value) \
seq_printf(seq, "%-25s %-20llu\n", (string), \
@@ -3573,8 +3507,7 @@ static int tp_stats_show(struct seq_file *seq, void *v)
return 0;
}
-
-DEFINE_SIMPLE_DEBUGFS_FILE(tp_stats);
+DEFINE_SHOW_ATTRIBUTE(tp_stats);
/* Add an array of Debug FS files.
*/
@@ -3603,7 +3536,7 @@ int t4_setup_debugfs(struct adapter *adap)
{ "cim_pif_la", &cim_pif_la_fops, 0400, 0 },
{ "cim_ma_la", &cim_ma_la_fops, 0400, 0 },
{ "cim_qcfg", &cim_qcfg_fops, 0400, 0 },
- { "clk", &clk_debugfs_fops, 0400, 0 },
+ { "clk", &clk_fops, 0400, 0 },
{ "devlog", &devlog_fops, 0400, 0 },
{ "mboxlog", &mboxlog_fops, 0400, 0 },
{ "mbox0", &mbox_debugfs_fops, 0600, 0 },
@@ -3621,11 +3554,11 @@ int t4_setup_debugfs(struct adapter *adap)
{ "l2t", &t4_l2t_fops, 0400, 0},
{ "mps_tcam", &mps_tcam_debugfs_fops, 0400, 0 },
{ "rss", &rss_debugfs_fops, 0400, 0 },
- { "rss_config", &rss_config_debugfs_fops, 0400, 0 },
+ { "rss_config", &rss_config_fops, 0400, 0 },
{ "rss_key", &rss_key_debugfs_fops, 0400, 0 },
{ "rss_pf_config", &rss_pf_config_debugfs_fops, 0400, 0 },
{ "rss_vf_config", &rss_vf_config_debugfs_fops, 0400, 0 },
- { "resources", &resources_debugfs_fops, 0400, 0 },
+ { "resources", &resources_fops, 0400, 0 },
#ifdef CONFIG_CHELSIO_T4_DCB
{ "dcb_info", &dcb_info_debugfs_fops, 0400, 0 },
#endif
@@ -3644,18 +3577,18 @@ int t4_setup_debugfs(struct adapter *adap)
{ "obq_ncsi", &cim_obq_fops, 0400, 5 },
{ "tp_la", &tp_la_fops, 0400, 0 },
{ "ulprx_la", &ulprx_la_fops, 0400, 0 },
- { "sensors", &sensors_debugfs_fops, 0400, 0 },
+ { "sensors", &sensors_fops, 0400, 0 },
{ "pm_stats", &pm_stats_debugfs_fops, 0400, 0 },
- { "tx_rate", &tx_rate_debugfs_fops, 0400, 0 },
- { "cctrl", &cctrl_tbl_debugfs_fops, 0400, 0 },
+ { "tx_rate", &tx_rate_fops, 0400, 0 },
+ { "cctrl", &cctrl_tbl_fops, 0400, 0 },
#if IS_ENABLED(CONFIG_IPV6)
- { "clip_tbl", &clip_tbl_debugfs_fops, 0400, 0 },
+ { "clip_tbl", &clip_tbl_fops, 0400, 0 },
#endif
- { "tids", &tid_info_debugfs_fops, 0400, 0},
+ { "tids", &tid_info_fops, 0400, 0},
{ "blocked_fl", &blocked_fl_fops, 0600, 0 },
{ "meminfo", &meminfo_fops, 0400, 0 },
- { "crypto", &chcr_stats_debugfs_fops, 0400, 0 },
- { "tp_stats", &tp_stats_debugfs_fops, 0400, 0 },
+ { "crypto", &chcr_stats_fops, 0400, 0 },
+ { "tp_stats", &tp_stats_fops, 0400, 0 },
};
/* Debug FS nodes common to all T5 and later adapters.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
index 23f43a0f8950..ba95e13d52da 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
@@ -37,19 +37,6 @@
#include <linux/export.h>
-#define DEFINE_SIMPLE_DEBUGFS_FILE(name) \
-static int name##_open(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, name##_show, inode->i_private); \
-} \
-static const struct file_operations name##_debugfs_fops = { \
- .owner = THIS_MODULE, \
- .open = name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release \
-}
-
struct t4_debugfs_entry {
const char *name;
const struct file_operations *ops;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index d49db46254cd..6ba9099ca7fe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -453,7 +453,7 @@ static int link_start(struct net_device *dev)
if (ret == 0) {
ret = t4_change_mac(pi->adapter, mb, pi->viid,
pi->xact_addr_filt, dev->dev_addr, true,
- true);
+ &pi->smt_idx);
if (ret >= 0) {
pi->xact_addr_filt = ret;
ret = 0;
@@ -1585,28 +1585,6 @@ unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
/**
- * cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI
- * @chip: chip type
- * @viid: VI id of the given port
- *
- * Return the SMT index for this VI.
- */
-unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid)
-{
- /* In T4/T5, SMT contains 256 SMAC entries organized in
- * 128 rows of 2 entries each.
- * In T6, SMT contains 256 SMAC entries in 256 rows.
- * TODO: The below code needs to be updated when we add support
- * for 256 VFs.
- */
- if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
- return ((viid & 0x7f) << 1);
- else
- return (viid & 0x7f);
-}
-EXPORT_SYMBOL(cxgb4_tp_smt_idx);
-
-/**
* cxgb4_port_chan - get the HW channel of a port
* @dev: the net device for the port
*
@@ -2280,8 +2258,6 @@ static int cxgb_up(struct adapter *adap)
#if IS_ENABLED(CONFIG_IPV6)
update_clip(adap);
#endif
- /* Initialize hash mac addr list*/
- INIT_LIST_HEAD(&adap->mac_hlist);
return err;
irq_err:
@@ -2303,6 +2279,7 @@ static void cxgb_down(struct adapter *adapter)
t4_sge_stop(adapter);
t4_free_sge_resources(adapter);
+
adapter->flags &= ~FULL_INIT_DONE;
}
@@ -2669,7 +2646,7 @@ static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
vf < nvfs; vf++) {
- macaddr[5] = adap->pf * 16 + vf;
+ macaddr[5] = adap->pf * nvfs + vf;
ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
}
}
@@ -2863,7 +2840,8 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
return -EADDRNOTAVAIL;
ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
- pi->xact_addr_filt, addr->sa_data, true, true);
+ pi->xact_addr_filt, addr->sa_data, true,
+ &pi->smt_idx);
if (ret < 0)
return ret;
@@ -4467,6 +4445,15 @@ static int adap_init0(struct adapter *adap)
adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
}
+ /* Check if FW supports returning vin and smt index.
+ * If this is not supported, driver will interpret
+ * these values from viid.
+ */
+ params[0] = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
+ 1, params, val);
+ adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0);
+
/*
* Get device capabilities so we can determine what resources we need
* to manage.
@@ -4777,14 +4764,26 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
for_each_port(adap, i) {
- struct port_info *p = adap2pinfo(adap, i);
+ struct port_info *pi = adap2pinfo(adap, i);
+ u8 vivld = 0, vin = 0;
- ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
- NULL, NULL);
+ ret = t4_alloc_vi(adap, adap->mbox, pi->tx_chan, adap->pf, 0, 1,
+ NULL, NULL, &vivld, &vin);
if (ret < 0)
return PCI_ERS_RESULT_DISCONNECT;
- p->viid = ret;
- p->xact_addr_filt = -1;
+ pi->viid = ret;
+ pi->xact_addr_filt = -1;
+ /* If fw supports returning the VIN as part of FW_VI_CMD,
+ * save the returned values.
+ */
+ if (adap->params.viid_smt_extn_support) {
+ pi->vivld = vivld;
+ pi->vin = vin;
+ } else {
+ /* Retrieve the values from VIID */
+ pi->vivld = FW_VIID_VIVLD_G(pi->viid);
+ pi->vin = FW_VIID_VIN_G(pi->viid);
+ }
}
t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
@@ -5621,6 +5620,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
(is_t5(adapter->params.chip) ? STATMODE_V(0) :
T6_STATMODE_V(0)));
+ /* Initialize hash mac addr list */
+ INIT_LIST_HEAD(&adapter->mac_hlist);
+
for_each_port(adapter, i) {
netdev = alloc_etherdev_mq(sizeof(struct port_info),
MAX_ETH_QSETS);
@@ -5899,6 +5901,7 @@ fw_attach_fail:
static void remove_one(struct pci_dev *pdev)
{
struct adapter *adapter = pci_get_drvdata(pdev);
+ struct hash_mac_addr *entry, *tmp;
if (!adapter) {
pci_release_regions(pdev);
@@ -5948,6 +5951,12 @@ static void remove_one(struct pci_dev *pdev)
if (adapter->num_uld || adapter->num_ofld_uld)
t4_uld_mem_free(adapter);
free_some_resources(adapter);
+ list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
+ list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+
#if IS_ENABLED(CONFIG_IPV6)
t4_cleanup_clip_tbl(adapter);
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 99022c0898b5..4852febbfec3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -495,14 +495,11 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
if (tp->vnic_shift >= 0 && (tp->ingress_config & VNIC_F)) {
- u32 viid = cxgb4_port_viid(dev);
- u32 vf = FW_VIID_VIN_G(viid);
- u32 pf = FW_VIID_PFN_G(viid);
- u32 vld = FW_VIID_VIVLD_G(viid);
-
- ntuple |= (u64)(FT_VNID_ID_VF_V(vf) |
- FT_VNID_ID_PF_V(pf) |
- FT_VNID_ID_VLD_V(vld)) << tp->vnic_shift;
+ struct port_info *pi = (struct port_info *)netdev_priv(dev);
+
+ ntuple |= (u64)(FT_VNID_ID_VF_V(pi->vin) |
+ FT_VNID_ID_PF_V(adap->pf) |
+ FT_VNID_ID_VLD_V(pi->vivld)) << tp->vnic_shift;
}
return ntuple;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index cb523949c812..e8c34292a0ec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -5880,7 +5880,6 @@ int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
{
int i, ofst = idx * 4;
u32 data_reg, mask_reg, cfg;
- u32 multitrc = TRCMULTIFILTER_F;
if (!enable) {
t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
@@ -5900,7 +5899,6 @@ int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
* maximum packet capture size of 9600 bytes is recommended.
* Also in this mode, only trace0 can be enabled and running.
*/
- multitrc = 0;
if (tp->snap_len > 9600 || idx)
return -EINVAL;
}
@@ -7141,21 +7139,10 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
unsigned int cache_line_size)
{
unsigned int page_shift = fls(page_size) - 1;
- unsigned int sge_hps = page_shift - 10;
unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
unsigned int fl_align_log = fls(fl_align) - 1;
- t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
- HOSTPAGESIZEPF0_V(sge_hps) |
- HOSTPAGESIZEPF1_V(sge_hps) |
- HOSTPAGESIZEPF2_V(sge_hps) |
- HOSTPAGESIZEPF3_V(sge_hps) |
- HOSTPAGESIZEPF4_V(sge_hps) |
- HOSTPAGESIZEPF5_V(sge_hps) |
- HOSTPAGESIZEPF6_V(sge_hps) |
- HOSTPAGESIZEPF7_V(sge_hps));
-
if (is_t4(adap->params.chip)) {
t4_set_reg_field(adap, SGE_CONTROL_A,
INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
@@ -7488,7 +7475,7 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
*/
int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
- unsigned int *rss_size)
+ unsigned int *rss_size, u8 *vivld, u8 *vin)
{
int ret;
struct fw_vi_cmd c;
@@ -7523,6 +7510,13 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
}
if (rss_size)
*rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
+
+ if (vivld)
+ *vivld = FW_VI_CMD_VFVLD_G(be32_to_cpu(c.alloc_to_len16));
+
+ if (vin)
+ *vin = FW_VI_CMD_VIN_G(be32_to_cpu(c.alloc_to_len16));
+
return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
}
@@ -7980,7 +7974,7 @@ int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
* MAC value.
*/
int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
- int idx, const u8 *addr, bool persist, bool add_smt)
+ int idx, const u8 *addr, bool persist, u8 *smt_idx)
{
int ret, mode;
struct fw_vi_mac_cmd c;
@@ -7989,7 +7983,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
if (idx < 0) /* new allocation */
idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
- mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
+ mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
memset(&c, 0, sizeof(c));
c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
@@ -8006,6 +8000,23 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
if (ret >= max_mac_addr)
ret = -ENOMEM;
+ if (smt_idx) {
+ if (adap->params.viid_smt_extn_support) {
+ *smt_idx = FW_VI_MAC_CMD_SMTID_G
+ (be32_to_cpu(c.op_to_viid));
+ } else {
+ /* In T4/T5, SMT contains 256 SMAC entries
+ * organized in 128 rows of 2 entries each.
+ * In T6, SMT contains 256 SMAC entries in
+ * 256 rows.
+ */
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <=
+ CHELSIO_T5)
+ *smt_idx = (viid & FW_VIID_VIN_M) << 1;
+ else
+ *smt_idx = (viid & FW_VIID_VIN_M);
+ }
+ }
}
return ret;
}
@@ -8593,7 +8604,7 @@ int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
{
unsigned int fw_caps = pi->adapter->params.fw_caps_support;
struct fw_port_cmd port_cmd;
- unsigned int action, link_ok, speed, mtu;
+ unsigned int action, link_ok, mtu;
fw_port_cap32_t linkattr;
int ret;
@@ -8627,7 +8638,6 @@ int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
mtu = FW_PORT_CMD_MTU32_G(
be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
}
- speed = fwcap_to_speed(linkattr);
*link_okp = link_ok;
*speedp = fwcap_to_speed(linkattr);
@@ -9374,6 +9384,7 @@ int t4_init_portinfo(struct port_info *pi, int mbox,
enum fw_port_type port_type;
int mdio_addr;
fw_port_cap32_t pcaps, acaps;
+ u8 vivld = 0, vin = 0;
int ret;
/* If we haven't yet determined whether we're talking to Firmware
@@ -9428,7 +9439,8 @@ int t4_init_portinfo(struct port_info *pi, int mbox,
acaps = be32_to_cpu(cmd.u.info32.acaps32);
}
- ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size);
+ ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size,
+ &vivld, &vin);
if (ret < 0)
return ret;
@@ -9437,6 +9449,18 @@ int t4_init_portinfo(struct port_info *pi, int mbox,
pi->lport = port;
pi->rss_size = rss_size;
+ /* If fw supports returning the VIN as part of FW_VI_CMD,
+ * save the returned values.
+ */
+ if (adapter->params.viid_smt_extn_support) {
+ pi->vivld = vivld;
+ pi->vin = vin;
+ } else {
+ /* Retrieve the values from VIID */
+ pi->vivld = FW_VIID_VIVLD_G(pi->viid);
+ pi->vin = FW_VIID_VIN_G(pi->viid);
+ }
+
pi->port_type = port_type;
pi->mdio_addr = mdio_addr;
pi->mod_type = FW_PORT_MOD_TYPE_NA;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 60df66f4d21c..bf7325f6d553 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -217,6 +217,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x6087), /* Custom T6225-CR */
CH_PCI_ID_TABLE_FENTRY(0x6088), /* Custom T62100-CR */
CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */
+ CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 57584ab32043..1d9b3e1e5f94 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1253,6 +1253,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_HMA_SIZE = 0x20,
FW_PARAMS_PARAM_DEV_RDMA_WRITE_WITH_IMM = 0x21,
FW_PARAMS_PARAM_DEV_RI_WRITE_CMPL_WR = 0x24,
+ FW_PARAMS_PARAM_DEV_OPAQUE_VIID_SMT_EXTN = 0x27,
};
/*
@@ -2109,6 +2110,19 @@ struct fw_vi_cmd {
#define FW_VI_CMD_FREE_V(x) ((x) << FW_VI_CMD_FREE_S)
#define FW_VI_CMD_FREE_F FW_VI_CMD_FREE_V(1U)
+#define FW_VI_CMD_VFVLD_S 24
+#define FW_VI_CMD_VFVLD_M 0x1
+#define FW_VI_CMD_VFVLD_V(x) ((x) << FW_VI_CMD_VFVLD_S)
+#define FW_VI_CMD_VFVLD_G(x) \
+ (((x) >> FW_VI_CMD_VFVLD_S) & FW_VI_CMD_VFVLD_M)
+#define FW_VI_CMD_VFVLD_F FW_VI_CMD_VFVLD_V(1U)
+
+#define FW_VI_CMD_VIN_S 16
+#define FW_VI_CMD_VIN_M 0xff
+#define FW_VI_CMD_VIN_V(x) ((x) << FW_VI_CMD_VIN_S)
+#define FW_VI_CMD_VIN_G(x) \
+ (((x) >> FW_VI_CMD_VIN_S) & FW_VI_CMD_VIN_M)
+
#define FW_VI_CMD_VIID_S 0
#define FW_VI_CMD_VIID_M 0xfff
#define FW_VI_CMD_VIID_V(x) ((x) << FW_VI_CMD_VIID_S)
@@ -2182,6 +2196,12 @@ struct fw_vi_mac_cmd {
} u;
};
+#define FW_VI_MAC_CMD_SMTID_S 12
+#define FW_VI_MAC_CMD_SMTID_M 0xff
+#define FW_VI_MAC_CMD_SMTID_V(x) ((x) << FW_VI_MAC_CMD_SMTID_S)
+#define FW_VI_MAC_CMD_SMTID_G(x) \
+ (((x) >> FW_VI_MAC_CMD_SMTID_S) & FW_VI_MAC_CMD_SMTID_M)
+
#define FW_VI_MAC_CMD_VIID_S 0
#define FW_VI_MAC_CMD_VIID_V(x) ((x) << FW_VI_MAC_CMD_VIID_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index ff84791a0ff8..2fab87e86561 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -722,6 +722,7 @@ static int adapter_up(struct adapter *adapter)
if (adapter->flags & USING_MSIX)
name_msix_vecs(adapter);
+
adapter->flags |= FULL_INIT_DONE;
}
@@ -747,8 +748,6 @@ static int adapter_up(struct adapter *adapter)
enable_rx(adapter);
t4vf_sge_start(adapter);
- /* Initialize hash mac addr list*/
- INIT_LIST_HEAD(&adapter->mac_hlist);
return 0;
}
@@ -2324,19 +2323,7 @@ static int resources_show(struct seq_file *seq, void *v)
return 0;
}
-
-static int resources_open(struct inode *inode, struct file *file)
-{
- return single_open(file, resources_show, inode->i_private);
-}
-
-static const struct file_operations resources_proc_fops = {
- .owner = THIS_MODULE,
- .open = resources_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(resources);
/*
* Show Virtual Interfaces.
@@ -2420,7 +2407,7 @@ static struct cxgb4vf_debugfs_entry debugfs_files[] = {
{ "mboxlog", 0444, &mboxlog_fops },
{ "sge_qinfo", 0444, &sge_qinfo_debugfs_fops },
{ "sge_qstats", 0444, &sge_qstats_proc_fops },
- { "resources", 0444, &resources_proc_fops },
+ { "resources", 0444, &resources_fops },
{ "interfaces", 0444, &interfaces_proc_fops },
};
@@ -3036,6 +3023,9 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
if (err)
goto err_unmap_bar;
+ /* Initialize hash mac addr list */
+ INIT_LIST_HEAD(&adapter->mac_hlist);
+
/*
* Allocate our "adapter ports" and stitch everything together.
*/
@@ -3287,6 +3277,7 @@ err_disable_device:
static void cxgb4vf_pci_remove(struct pci_dev *pdev)
{
struct adapter *adapter = pci_get_drvdata(pdev);
+ struct hash_mac_addr *entry, *tmp;
/*
* Tear down driver state associated with device.
@@ -3337,6 +3328,11 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
kfree(adapter->mbox_log);
+ list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
+ list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
kfree(adapter);
}
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index ec0b545197e2..e9a0213b08c4 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -23,7 +23,7 @@ config CS89x0
---help---
Support for CS89x0 chipset based Ethernet cards. If you have a
network (Ethernet) card of this type, say Y and read the file
- <file:Documentation/networking/cs89x0.txt>.
+ <file:Documentation/networking/device_drivers/cirrus/cs89x0.txt>.
To compile this driver as a module, choose M here. The module
will be called cs89x0.
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index f42f7a6e1559..ebd5c2cf1efe 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -241,7 +241,7 @@ static int enic_set_ringparam(struct net_device *netdev,
}
enic_init_vnic_resources(enic);
if (running) {
- err = dev_open(netdev);
+ err = dev_open(netdev, NULL);
if (err)
goto err_out;
}
diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig
index 1003201b5d80..264e9b413e94 100644
--- a/drivers/net/ethernet/dec/tulip/Kconfig
+++ b/drivers/net/ethernet/dec/tulip/Kconfig
@@ -113,7 +113,7 @@ config DE4X5
These include the DE425, DE434, DE435, DE450 and DE500 models. If
you have a network card of this type, say Y. More specific
information is contained in
- <file:Documentation/networking/de4x5.txt>.
+ <file:Documentation/networking/device_drivers/dec/de4x5.txt>.
To compile this driver as a module, choose M here. The module will
be called de4x5.
@@ -137,7 +137,7 @@ config DM9102
This driver is for DM9102(A)/DM9132/DM9801 compatible PCI cards from
Davicom (<http://www.davicom.com.tw/>). If you have such a network
(Ethernet) card, say Y. Some information is contained in the file
- <file:Documentation/networking/dmfe.txt>.
+ <file:Documentation/networking/device_drivers/dec/dmfe.txt>.
To compile this driver as a module, choose M here. The module will
be called dmfe.
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index f0536b16b3c3..d8d423f22c4f 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1881,7 +1881,7 @@ Compile command:
gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c
-Read Documentation/networking/dl2k.txt for details.
+Read Documentation/networking/device_drivers/dlink/dl2k.txt for details.
*/
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c5ad7a4f4d83..852f5bfe5f6d 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -796,7 +796,7 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
u16 vlan_tag;
vlan_tag = skb_vlan_tag_get(skb);
- vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ vlan_prio = skb_vlan_tag_get_prio(skb);
/* If vlan priority provided by OS is NOT in available bmap */
if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
@@ -1049,30 +1049,35 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
struct be_wrb_params
*wrb_params)
{
+ bool insert_vlan = false;
u16 vlan_tag = 0;
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
return skb;
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tag_present(skb)) {
vlan_tag = be_get_tx_vlan_tag(adapter, skb);
+ insert_vlan = true;
+ }
if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
- if (!vlan_tag)
+ if (!insert_vlan) {
vlan_tag = adapter->pvid;
+ insert_vlan = true;
+ }
/* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
* skip VLAN insertion
*/
BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
}
- if (vlan_tag) {
+ if (insert_vlan) {
skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
vlan_tag);
if (unlikely(!skb))
return skb;
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
}
/* Insert the outer VLAN, if any */
@@ -4950,7 +4955,7 @@ fw_exit:
}
static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
- u16 flags)
+ u16 flags, struct netlink_ext_ack *extack)
{
struct be_adapter *adapter = netdev_priv(dev);
struct nlattr *attr, *br_spec;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 6e0f47f2c8a3..f53090cde041 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -51,9 +51,9 @@
#include <linux/percpu.h>
#include <linux/dma-mapping.h>
#include <linux/sort.h>
+#include <linux/phy_fixed.h>
#include <soc/fsl/bman.h>
#include <soc/fsl/qman.h>
-
#include "fman.h"
#include "fman_port.h"
#include "mac.h"
@@ -2475,6 +2475,7 @@ static void dpaa_adjust_link(struct net_device *net_dev)
static int dpaa_phy_init(struct net_device *net_dev)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct mac_device *mac_dev;
struct phy_device *phy_dev;
struct dpaa_priv *priv;
@@ -2491,7 +2492,9 @@ static int dpaa_phy_init(struct net_device *net_dev)
}
/* Remove any features not supported by the controller */
- phy_dev->supported &= mac_dev->if_support;
+ ethtool_convert_legacy_u32_to_link_mode(mask, mac_dev->if_support);
+ linkmode_and(phy_dev->supported, phy_dev->supported, mask);
+
phy_support_asym_pause(phy_dev);
mac_dev->phy_dev = phy_dev;
@@ -2613,6 +2616,7 @@ static const struct net_device_ops dpaa_ops = {
.ndo_stop = dpaa_eth_stop,
.ndo_tx_timeout = dpaa_tx_timeout,
.ndo_get_stats64 = dpaa_get_stats64,
+ .ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = dpaa_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = dpaa_set_rx_mode,
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 13d6e2272ece..62497119c85f 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -529,6 +529,75 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
return 0;
}
+static int dpaa_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *c)
+{
+ struct qman_portal *portal;
+ u32 period;
+ u8 thresh;
+
+ portal = qman_get_affine_portal(smp_processor_id());
+ qman_portal_get_iperiod(portal, &period);
+ qman_dqrr_get_ithresh(portal, &thresh);
+
+ c->rx_coalesce_usecs = period;
+ c->rx_max_coalesced_frames = thresh;
+ c->use_adaptive_rx_coalesce = false;
+
+ return 0;
+}
+
+static int dpaa_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *c)
+{
+ const cpumask_t *cpus = qman_affine_cpus();
+ bool needs_revert[NR_CPUS] = {false};
+ struct qman_portal *portal;
+ u32 period, prev_period;
+ u8 thresh, prev_thresh;
+ int cpu, res;
+
+ if (c->use_adaptive_rx_coalesce)
+ return -EINVAL;
+
+ period = c->rx_coalesce_usecs;
+ thresh = c->rx_max_coalesced_frames;
+
+ /* save previous values */
+ portal = qman_get_affine_portal(smp_processor_id());
+ qman_portal_get_iperiod(portal, &prev_period);
+ qman_dqrr_get_ithresh(portal, &prev_thresh);
+
+ /* set new values */
+ for_each_cpu(cpu, cpus) {
+ portal = qman_get_affine_portal(cpu);
+ res = qman_portal_set_iperiod(portal, period);
+ if (res)
+ goto revert_values;
+ res = qman_dqrr_set_ithresh(portal, thresh);
+ if (res) {
+ qman_portal_set_iperiod(portal, prev_period);
+ goto revert_values;
+ }
+ needs_revert[cpu] = true;
+ }
+
+ return 0;
+
+revert_values:
+ /* restore previous values */
+ for_each_cpu(cpu, cpus) {
+ if (!needs_revert[cpu])
+ continue;
+ portal = qman_get_affine_portal(cpu);
+ /* previous values will not fail, ignore return value */
+ qman_portal_set_iperiod(portal, prev_period);
+ qman_dqrr_set_ithresh(portal, prev_thresh);
+ }
+
+ return res;
+}
+
const struct ethtool_ops dpaa_ethtool_ops = {
.get_drvinfo = dpaa_get_drvinfo,
.get_msglevel = dpaa_get_msglevel,
@@ -545,4 +614,6 @@ const struct ethtool_ops dpaa_ethtool_ops = {
.get_rxnfc = dpaa_get_rxnfc,
.set_rxnfc = dpaa_set_rxnfc,
.get_ts_info = dpaa_get_ts_info,
+ .get_coalesce = dpaa_get_coalesce,
+ .set_coalesce = dpaa_set_coalesce,
};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 88f7acce38dc..1ca9a18139ec 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -13,7 +13,8 @@
#include <linux/iommu.h>
#include <linux/net_tstamp.h>
#include <linux/fsl/mc.h>
-
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <net/sock.h>
#include "dpaa2-eth.h"
@@ -86,7 +87,7 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv,
addr = dpaa2_sg_get_addr(&sgt[i]);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
skb_free_frag(sg_vaddr);
if (dpaa2_sg_is_final(&sgt[i]))
@@ -144,7 +145,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
sg_addr = dpaa2_sg_get_addr(sge);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
sg_length = dpaa2_sg_get_len(sge);
@@ -199,12 +200,148 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
return skb;
}
+/* Free buffers acquired from the buffer pool or which were meant to
+ * be released in the pool
+ */
+static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ void *vaddr;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
+ dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
+ skb_free_frag(vaddr);
+ }
+}
+
+static void xdp_release_buf(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ dma_addr_t addr)
+{
+ int err;
+
+ ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr;
+ if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD)
+ return;
+
+ while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
+ ch->xdp.drop_bufs,
+ ch->xdp.drop_cnt)) == -EBUSY)
+ cpu_relax();
+
+ if (err) {
+ free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
+ ch->buf_count -= ch->xdp.drop_cnt;
+ }
+
+ ch->xdp.drop_cnt = 0;
+}
+
+static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
+ void *buf_start, u16 queue_id)
+{
+ struct dpaa2_eth_fq *fq;
+ struct dpaa2_faead *faead;
+ u32 ctrl, frc;
+ int i, err;
+
+ /* Mark the egress frame hardware annotation area as valid */
+ frc = dpaa2_fd_get_frc(fd);
+ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
+ dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
+
+ /* Instruct hardware to release the FD buffer directly into
+ * the buffer pool once transmission is completed, instead of
+ * sending a Tx confirmation frame to us
+ */
+ ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
+ faead = dpaa2_get_faead(buf_start, false);
+ faead->ctrl = cpu_to_le32(ctrl);
+ faead->conf_fqid = 0;
+
+ fq = &priv->fq[queue_id];
+ for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
+ err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
+ priv->tx_qdid, 0,
+ fq->tx_qdbin, fd);
+ if (err != -EBUSY)
+ break;
+ }
+
+ return err;
+}
+
+static u32 run_xdp(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *rx_fq,
+ struct dpaa2_fd *fd, void *vaddr)
+{
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ struct rtnl_link_stats64 *percpu_stats;
+ struct bpf_prog *xdp_prog;
+ struct xdp_buff xdp;
+ u32 xdp_act = XDP_PASS;
+ int err;
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+
+ rcu_read_lock();
+
+ xdp_prog = READ_ONCE(ch->xdp.prog);
+ if (!xdp_prog)
+ goto out;
+
+ xdp.data = vaddr + dpaa2_fd_get_offset(fd);
+ xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
+ xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
+ xdp_set_data_meta_invalid(&xdp);
+
+ xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ /* xdp.data pointer may have changed */
+ dpaa2_fd_set_offset(fd, xdp.data - vaddr);
+ dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
+
+ switch (xdp_act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ err = xdp_enqueue(priv, fd, vaddr, rx_fq->flowid);
+ if (err) {
+ xdp_release_buf(priv, ch, addr);
+ percpu_stats->tx_errors++;
+ ch->stats.xdp_tx_err++;
+ } else {
+ percpu_stats->tx_packets++;
+ percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
+ ch->stats.xdp_tx++;
+ }
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(xdp_act);
+ /* fall through */
+ case XDP_ABORTED:
+ trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
+ /* fall through */
+ case XDP_DROP:
+ xdp_release_buf(priv, ch, addr);
+ ch->stats.xdp_drop++;
+ break;
+ }
+
+out:
+ rcu_read_unlock();
+ return xdp_act;
+}
+
/* Main Rx frame processing routine */
static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch,
const struct dpaa2_fd *fd,
- struct napi_struct *napi,
- u16 queue_id)
+ struct dpaa2_eth_fq *fq)
{
dma_addr_t addr = dpaa2_fd_get_addr(fd);
u8 fd_format = dpaa2_fd_get_format(fd);
@@ -216,12 +353,14 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
struct dpaa2_fas *fas;
void *buf_data;
u32 status = 0;
+ u32 xdp_act;
/* Tracing point */
trace_dpaa2_rx_fd(priv->net_dev, fd);
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
- dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
fas = dpaa2_get_fas(vaddr, false);
prefetch(fas);
@@ -232,8 +371,21 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
percpu_extras = this_cpu_ptr(priv->percpu_extras);
if (fd_format == dpaa2_fd_single) {
+ xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
+ if (xdp_act != XDP_PASS) {
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+ return;
+ }
+
+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
skb = build_linear_skb(ch, fd, vaddr);
} else if (fd_format == dpaa2_fd_sg) {
+ WARN_ON(priv->xdp_prog);
+
+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
skb = build_frag_skb(priv, ch, buf_data);
skb_free_frag(vaddr);
percpu_extras->rx_sg_frames++;
@@ -267,12 +419,12 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
}
skb->protocol = eth_type_trans(skb, priv->net_dev);
- skb_record_rx_queue(skb, queue_id);
+ skb_record_rx_queue(skb, fq->flowid);
percpu_stats->rx_packets++;
percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
- napi_gro_receive(napi, skb);
+ napi_gro_receive(&ch->napi, skb);
return;
@@ -289,7 +441,7 @@ err_frame_format:
* Observance of NAPI budget is not our concern, leaving that to the caller.
*/
static int consume_frames(struct dpaa2_eth_channel *ch,
- enum dpaa2_eth_fq_type *type)
+ struct dpaa2_eth_fq **src)
{
struct dpaa2_eth_priv *priv = ch->priv;
struct dpaa2_eth_fq *fq = NULL;
@@ -312,7 +464,7 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
fd = dpaa2_dq_fd(dq);
fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
- fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
+ fq->consume(priv, ch, fd, fq);
cleaned++;
} while (!is_last);
@@ -320,13 +472,12 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
return 0;
fq->stats.frames += cleaned;
- ch->stats.frames += cleaned;
/* A dequeue operation only pulls frames from a single queue
- * into the store. Return the frame queue type as an out param.
+ * into the store. Return the frame queue as an out param.
*/
- if (type)
- *type = fq->type;
+ if (src)
+ *src = fq;
return cleaned;
}
@@ -571,8 +722,10 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_eth_drv_stats *percpu_extras;
struct dpaa2_eth_fq *fq;
+ struct netdev_queue *nq;
u16 queue_mapping;
unsigned int needed_headroom;
+ u32 fd_len;
int err, i;
percpu_stats = this_cpu_ptr(priv->percpu_stats);
@@ -644,8 +797,12 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
/* Clean up everything, including freeing the skb */
free_tx_fd(priv, &fd);
} else {
+ fd_len = dpaa2_fd_get_len(&fd);
percpu_stats->tx_packets++;
- percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
+ percpu_stats->tx_bytes += fd_len;
+
+ nq = netdev_get_tx_queue(net_dev, queue_mapping);
+ netdev_tx_sent_queue(nq, fd_len);
}
return NETDEV_TX_OK;
@@ -661,11 +818,11 @@ err_alloc_headroom:
static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch __always_unused,
const struct dpaa2_fd *fd,
- struct napi_struct *napi __always_unused,
- u16 queue_id __always_unused)
+ struct dpaa2_eth_fq *fq)
{
struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_eth_drv_stats *percpu_extras;
+ u32 fd_len = dpaa2_fd_get_len(fd);
u32 fd_errors;
/* Tracing point */
@@ -673,7 +830,10 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
percpu_extras = this_cpu_ptr(priv->percpu_extras);
percpu_extras->tx_conf_frames++;
- percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
+ percpu_extras->tx_conf_bytes += fd_len;
+
+ fq->dq_frames++;
+ fq->dq_bytes += fd_len;
/* Check frame errors in the FD field */
fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
@@ -735,23 +895,6 @@ static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
return 0;
}
-/* Free buffers acquired from the buffer pool or which were meant to
- * be released in the pool
- */
-static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
-{
- struct device *dev = priv->net_dev->dev.parent;
- void *vaddr;
- int i;
-
- for (i = 0; i < count; i++) {
- vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
- dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
- skb_free_frag(vaddr);
- }
-}
-
/* Perform a single release command to add buffers
* to the specified buffer pool
*/
@@ -775,7 +918,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
buf = PTR_ALIGN(buf, priv->rx_buf_align);
addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, addr)))
goto err_map;
@@ -934,8 +1077,9 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
struct dpaa2_eth_channel *ch;
struct dpaa2_eth_priv *priv;
int rx_cleaned = 0, txconf_cleaned = 0;
- enum dpaa2_eth_fq_type type = 0;
- int store_cleaned;
+ struct dpaa2_eth_fq *fq, *txc_fq = NULL;
+ struct netdev_queue *nq;
+ int store_cleaned, work_done;
int err;
ch = container_of(napi, struct dpaa2_eth_channel, napi);
@@ -949,18 +1093,25 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
/* Refill pool if appropriate */
refill_pool(priv, ch, priv->bpid);
- store_cleaned = consume_frames(ch, &type);
- if (type == DPAA2_RX_FQ)
+ store_cleaned = consume_frames(ch, &fq);
+ if (!store_cleaned)
+ break;
+ if (fq->type == DPAA2_RX_FQ) {
rx_cleaned += store_cleaned;
- else
+ } else {
txconf_cleaned += store_cleaned;
+ /* We have a single Tx conf FQ on this channel */
+ txc_fq = fq;
+ }
/* If we either consumed the whole NAPI budget with Rx frames
* or we reached the Tx confirmations threshold, we're done.
*/
if (rx_cleaned >= budget ||
- txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI)
- return budget;
+ txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
+ work_done = budget;
+ goto out;
+ }
} while (store_cleaned);
/* We didn't consume the entire budget, so finish napi and
@@ -974,7 +1125,18 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
ch->nctx.desired_cpu);
- return max(rx_cleaned, 1);
+ work_done = max(rx_cleaned, 1);
+
+out:
+ if (txc_fq) {
+ nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
+ netdev_tx_completed_queue(nq, txc_fq->dq_frames,
+ txc_fq->dq_bytes);
+ txc_fq->dq_frames = 0;
+ txc_fq->dq_bytes = 0;
+ }
+
+ return work_done;
}
static void enable_ch_napi(struct dpaa2_eth_priv *priv)
@@ -1400,6 +1562,174 @@ static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return -EINVAL;
}
+static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
+{
+ int mfl, linear_mfl;
+
+ mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
+ linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE -
+ dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
+
+ if (mfl > linear_mfl) {
+ netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
+ linear_mfl - VLAN_ETH_HLEN);
+ return false;
+ }
+
+ return true;
+}
+
+static int set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
+{
+ int mfl, err;
+
+ /* We enforce a maximum Rx frame length based on MTU only if we have
+ * an XDP program attached (in order to avoid Rx S/G frames).
+ * Otherwise, we accept all incoming frames as long as they are not
+ * larger than maximum size supported in hardware
+ */
+ if (has_xdp)
+ mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
+ else
+ mfl = DPAA2_ETH_MFL;
+
+ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ int err;
+
+ if (!priv->xdp_prog)
+ goto out;
+
+ if (!xdp_mtu_valid(priv, new_mtu))
+ return -EINVAL;
+
+ err = set_rx_mfl(priv, new_mtu, true);
+ if (err)
+ return err;
+
+out:
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
+{
+ struct dpni_buffer_layout buf_layout = {0};
+ int err;
+
+ err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX, &buf_layout);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
+ return err;
+ }
+
+ /* Reserve extra headroom for XDP header size changes */
+ buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
+ (has_xdp ? XDP_PACKET_HEADROOM : 0);
+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX, &buf_layout);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpaa2_eth_channel *ch;
+ struct bpf_prog *old;
+ bool up, need_update;
+ int i, err;
+
+ if (prog && !xdp_mtu_valid(priv, dev->mtu))
+ return -EINVAL;
+
+ if (prog) {
+ prog = bpf_prog_add(prog, priv->num_channels);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+ }
+
+ up = netif_running(dev);
+ need_update = (!!priv->xdp_prog != !!prog);
+
+ if (up)
+ dpaa2_eth_stop(dev);
+
+ /* While in xdp mode, enforce a maximum Rx frame size based on MTU.
+ * Also, when switching between xdp/non-xdp modes we need to reconfigure
+ * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
+ * so we are sure no old format buffers will be used from now on.
+ */
+ if (need_update) {
+ err = set_rx_mfl(priv, dev->mtu, !!prog);
+ if (err)
+ goto out_err;
+ err = update_rx_buffer_headroom(priv, !!prog);
+ if (err)
+ goto out_err;
+ }
+
+ old = xchg(&priv->xdp_prog, prog);
+ if (old)
+ bpf_prog_put(old);
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ old = xchg(&ch->xdp.prog, prog);
+ if (old)
+ bpf_prog_put(old);
+ }
+
+ if (up) {
+ err = dpaa2_eth_open(dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+
+out_err:
+ if (prog)
+ bpf_prog_sub(prog, priv->num_channels);
+ if (up)
+ dpaa2_eth_open(dev);
+
+ return err;
+}
+
+static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return setup_xdp(dev, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct net_device_ops dpaa2_eth_ops = {
.ndo_open = dpaa2_eth_open,
.ndo_start_xmit = dpaa2_eth_tx,
@@ -1409,6 +1739,8 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
.ndo_set_features = dpaa2_eth_set_features,
.ndo_do_ioctl = dpaa2_eth_ioctl,
+ .ndo_change_mtu = dpaa2_eth_change_mtu,
+ .ndo_bpf = dpaa2_eth_xdp,
};
static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
@@ -1434,8 +1766,11 @@ static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
FSL_MC_POOL_DPCON, &dpcon);
if (err) {
- dev_info(dev, "Not enough DPCONs, will go on as-is\n");
- return NULL;
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_info(dev, "Not enough DPCONs, will go on as-is\n");
+ return ERR_PTR(err);
}
err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
@@ -1493,8 +1828,10 @@ alloc_channel(struct dpaa2_eth_priv *priv)
return NULL;
channel->dpcon = setup_dpcon(priv);
- if (!channel->dpcon)
+ if (IS_ERR_OR_NULL(channel->dpcon)) {
+ err = PTR_ERR(channel->dpcon);
goto err_setup;
+ }
err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
&attr);
@@ -1513,7 +1850,7 @@ err_get_attr:
free_dpcon(priv, channel->dpcon);
err_setup:
kfree(channel);
- return NULL;
+ return ERR_PTR(err);
}
static void free_channel(struct dpaa2_eth_priv *priv,
@@ -1547,10 +1884,11 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
for_each_online_cpu(i) {
/* Try to allocate a channel */
channel = alloc_channel(priv);
- if (!channel) {
- dev_info(dev,
- "No affine channel for cpu %d and above\n", i);
- err = -ENODEV;
+ if (IS_ERR_OR_NULL(channel)) {
+ err = PTR_ERR(channel);
+ if (err != -EPROBE_DEFER)
+ dev_info(dev,
+ "No affine channel for cpu %d and above\n", i);
goto err_alloc_ch;
}
@@ -1597,7 +1935,7 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
/* Stop if we already have enough channels to accommodate all
* RX and TX conf queues
*/
- if (priv->num_channels == dpaa2_eth_queue_count(priv))
+ if (priv->num_channels == priv->dpni_attrs.num_queues)
break;
}
@@ -1608,9 +1946,12 @@ err_set_cdan:
err_service_reg:
free_channel(priv, channel);
err_alloc_ch:
+ if (err == -EPROBE_DEFER)
+ return err;
+
if (cpumask_empty(&priv->dpio_cpumask)) {
dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
- return err;
+ return -ENODEV;
}
dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
@@ -1732,7 +2073,10 @@ static int setup_dpbp(struct dpaa2_eth_priv *priv)
err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
&dpbp_dev);
if (err) {
- dev_err(dev, "DPBP device allocation failed\n");
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "DPBP device allocation failed\n");
return err;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 452a8e9c4f0e..69c965de192b 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -139,7 +139,9 @@ struct dpaa2_faead {
};
#define DPAA2_FAEAD_A2V 0x20000000
+#define DPAA2_FAEAD_A4V 0x08000000
#define DPAA2_FAEAD_UPDV 0x00001000
+#define DPAA2_FAEAD_EBDDV 0x00002000
#define DPAA2_FAEAD_UPD 0x00000010
/* Accessors for the hardware annotation fields that we use */
@@ -243,12 +245,14 @@ struct dpaa2_eth_fq_stats {
struct dpaa2_eth_ch_stats {
/* Volatile dequeues retried due to portal busy */
__u64 dequeue_portal_busy;
- /* Number of CDANs; useful to estimate avg NAPI len */
- __u64 cdan;
- /* Number of frames received on queues from this channel */
- __u64 frames;
/* Pull errors */
__u64 pull_err;
+ /* Number of CDANs; useful to estimate avg NAPI len */
+ __u64 cdan;
+ /* XDP counters */
+ __u64 xdp_drop;
+ __u64 xdp_tx;
+ __u64 xdp_tx_err;
};
/* Maximum number of queues associated with a DPNI */
@@ -271,17 +275,24 @@ struct dpaa2_eth_fq {
u32 tx_qdbin;
u16 flowid;
int target_cpu;
+ u32 dq_frames;
+ u32 dq_bytes;
struct dpaa2_eth_channel *channel;
enum dpaa2_eth_fq_type type;
void (*consume)(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch,
const struct dpaa2_fd *fd,
- struct napi_struct *napi,
- u16 queue_id);
+ struct dpaa2_eth_fq *fq);
struct dpaa2_eth_fq_stats stats;
};
+struct dpaa2_eth_ch_xdp {
+ struct bpf_prog *prog;
+ u64 drop_bufs[DPAA2_ETH_BUFS_PER_CMD];
+ int drop_cnt;
+};
+
struct dpaa2_eth_channel {
struct dpaa2_io_notification_ctx nctx;
struct fsl_mc_device *dpcon;
@@ -293,6 +304,7 @@ struct dpaa2_eth_channel {
struct dpaa2_eth_priv *priv;
int buf_count;
struct dpaa2_eth_ch_stats stats;
+ struct dpaa2_eth_ch_xdp xdp;
};
struct dpaa2_eth_dist_fields {
@@ -352,6 +364,7 @@ struct dpaa2_eth_priv {
u64 rx_hash_fields;
struct dpaa2_eth_cls_rule *cls_rules;
u8 rx_cls_enabled;
+ struct bpf_prog *xdp_prog;
};
#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
@@ -434,9 +447,10 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
DPAA2_ETH_RX_HWA_SIZE;
}
+/* We have exactly one {Rx, Tx conf} queue per channel */
static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
{
- return priv->dpni_attrs.num_queues;
+ return priv->num_channels;
}
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 26bd5a2bd8ed..a7389e722c49 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -45,6 +45,15 @@ static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
"[drv] dequeue portal busy",
"[drv] channel pull errors",
"[drv] cdan",
+ "[drv] xdp drop",
+ "[drv] xdp tx",
+ "[drv] xdp tx errors",
+ /* FQ stats */
+ "[qbman] rx pending frames",
+ "[qbman] rx pending bytes",
+ "[qbman] tx conf pending frames",
+ "[qbman] tx conf pending bytes",
+ "[qbman] buffer count",
};
#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
@@ -174,8 +183,10 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
int j, k, err;
int num_cnt;
union dpni_statistics dpni_stats;
- u64 cdan = 0;
- u64 portal_busy = 0, pull_err = 0;
+ u32 fcnt, bcnt;
+ u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
+ u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
+ u32 buf_cnt;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpaa2_eth_drv_stats *extras;
struct dpaa2_eth_ch_stats *ch_stats;
@@ -212,16 +223,43 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
}
i += j;
- for (j = 0; j < priv->num_channels; j++) {
- ch_stats = &priv->channel[j]->stats;
- cdan += ch_stats->cdan;
- portal_busy += ch_stats->dequeue_portal_busy;
- pull_err += ch_stats->pull_err;
+ /* Per-channel stats */
+ for (k = 0; k < priv->num_channels; k++) {
+ ch_stats = &priv->channel[k]->stats;
+ for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64); j++)
+ *((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
}
+ i += j;
+
+ for (j = 0; j < priv->num_fqs; j++) {
+ /* Print FQ instantaneous counts */
+ err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
+ &fcnt, &bcnt);
+ if (err) {
+ netdev_warn(net_dev, "FQ query error %d", err);
+ return;
+ }
- *(data + i++) = portal_busy;
- *(data + i++) = pull_err;
- *(data + i++) = cdan;
+ if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
+ fcnt_tx_total += fcnt;
+ bcnt_tx_total += bcnt;
+ } else {
+ fcnt_rx_total += fcnt;
+ bcnt_rx_total += bcnt;
+ }
+ }
+
+ *(data + i++) = fcnt_rx_total;
+ *(data + i++) = bcnt_rx_total;
+ *(data + i++) = fcnt_tx_total;
+ *(data + i++) = bcnt_tx_total;
+
+ err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
+ if (err) {
+ netdev_warn(net_dev, "Buffer count query error %d\n", err);
+ return;
+ }
+ *(data + i++) = buf_cnt;
}
static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
index 84b942b1eccc..9b150db3b510 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -140,7 +140,10 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
if (err) {
- dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
goto err_exit;
}
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index bf80855dd0dd..f79e57f735b3 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -531,7 +531,6 @@ struct fec_enet_private {
/* Phylib and MDIO interface */
struct mii_bus *mii_bus;
- int mii_timeout;
uint phy_speed;
phy_interface_t phy_interface;
struct device_node *phy_node;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 6db69ba30dcd..ae0f88bce9aa 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1714,12 +1714,6 @@ static void fec_enet_adjust_link(struct net_device *ndev)
struct phy_device *phy_dev = ndev->phydev;
int status_change = 0;
- /* Prevent a state halted on mii error */
- if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
- phy_dev->state = PHY_RESUMING;
- return;
- }
-
/*
* If the netdev is down, or is going down, we're not interested
* in link state events, so just mark our idea of the link as down
@@ -1779,7 +1773,6 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
if (ret < 0)
return ret;
- fep->mii_timeout = 0;
reinit_completion(&fep->mdio_done);
/* start a read op */
@@ -1791,7 +1784,6 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
time_left = wait_for_completion_timeout(&fep->mdio_done,
usecs_to_jiffies(FEC_MII_TIMEOUT));
if (time_left == 0) {
- fep->mii_timeout = 1;
netdev_err(fep->netdev, "MDIO read timeout\n");
ret = -ETIMEDOUT;
goto out;
@@ -1820,7 +1812,6 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
else
ret = 0;
- fep->mii_timeout = 0;
reinit_completion(&fep->mdio_done);
/* start a write op */
@@ -1833,7 +1824,6 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
time_left = wait_for_completion_timeout(&fep->mdio_done,
usecs_to_jiffies(FEC_MII_TIMEOUT));
if (time_left == 0) {
- fep->mii_timeout = 1;
netdev_err(fep->netdev, "MDIO write timeout\n");
ret = -ETIMEDOUT;
}
@@ -2001,8 +1991,6 @@ static int fec_enet_mii_init(struct platform_device *pdev)
return -ENOENT;
}
- fep->mii_timeout = 0;
-
/*
* Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
*
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index d79e4e009d63..71f4205f14e7 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -393,7 +393,7 @@ void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
*/
/* get local capabilities */
- lcl_adv = ethtool_adv_to_lcl_adv_t(phy_dev->advertising);
+ lcl_adv = linkmode_adv_to_lcl_adv_t(phy_dev->advertising);
/* get link partner capabilities */
rmt_adv = 0;
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 82722d05fedb..88a396fd242f 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -473,7 +473,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
if (data->get_tbipa) {
for_each_child_of_node(np, tbi) {
- if (strcmp(tbi->type, "tbi-phy") == 0) {
+ if (of_node_is_type(tbi, "tbi-phy")) {
dev_dbg(&pdev->dev, "found TBI PHY node %pOFP\n",
tbi);
break;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 3c8da1a18ba0..45fcc96be90e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -500,6 +500,7 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_tx_timeout = gfar_timeout,
.ndo_do_ioctl = gfar_ioctl,
.ndo_get_stats = gfar_get_stats,
+ .ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = gfar_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -720,7 +721,7 @@ static int gfar_of_group_count(struct device_node *np)
int num = 0;
for_each_available_child_of_node(np, child)
- if (!of_node_cmp(child->name, "queue-group"))
+ if (of_node_name_eq(child, "queue-group"))
num++;
return num;
@@ -838,7 +839,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
/* Parse and initialize group specific information */
if (priv->mode == MQ_MG_MODE) {
for_each_available_child_of_node(np, child) {
- if (of_node_cmp(child->name, "queue-group"))
+ if (!of_node_name_eq(child, "queue-group"))
continue;
err = gfar_parse_group(child, priv, model);
@@ -1784,14 +1785,20 @@ static phy_interface_t gfar_get_interface(struct net_device *dev)
*/
static int init_phy(struct net_device *dev)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct gfar_private *priv = netdev_priv(dev);
- uint gigabit_support =
- priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
- GFAR_SUPPORTED_GBIT : 0;
phy_interface_t interface;
struct phy_device *phydev;
struct ethtool_eee edata;
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
+
priv->oldlink = 0;
priv->oldspeed = 0;
priv->oldduplex = -1;
@@ -1809,8 +1816,8 @@ static int init_phy(struct net_device *dev)
gfar_configure_serdes(dev);
/* Remove any features not supported by the controller */
- phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
- phydev->advertising = phydev->supported;
+ linkmode_and(phydev->supported, phydev->supported, mask);
+ linkmode_copy(phydev->advertising, phydev->supported);
/* Add support for flow control */
phy_support_asym_pause(phydev);
@@ -3656,7 +3663,7 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
if (phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising);
+ lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (flowctrl & FLOW_CTRL_TX)
val |= MACCFG1_TX_FLOW;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 0d76e15cd6dd..241325c35cb4 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1134,11 +1134,9 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
prio = vlan_tci_prio(rule);
prio_mask = vlan_tci_priom(rule);
- if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
- vlan |= RQFPR_CFI;
- vlan_mask |= RQFPR_CFI;
- } else if (cfi != VLAN_TAG_PRESENT &&
- cfi_mask == VLAN_TAG_PRESENT) {
+ if (cfi_mask) {
+ if (cfi)
+ vlan |= RQFPR_CFI;
vlan_mask |= RQFPR_CFI;
}
}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 32e02700feaa..c3d539e209ed 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -30,6 +30,7 @@
#include <linux/dma-mapping.h>
#include <linux/mii.h>
#include <linux/phy.h>
+#include <linux/phy_fixed.h>
#include <linux/workqueue.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -1742,12 +1743,7 @@ static int init_phy(struct net_device *dev)
if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
uec_configure_serdes(dev);
- phy_set_max_speed(phydev, SPEED_100);
-
- if (priv->max_speed == SPEED_1000)
- phydev->supported |= ADVERTISED_1000baseT_Full;
-
- phydev->advertising = phydev->supported;
+ phy_set_max_speed(phydev, priv->max_speed);
priv->phydev = phydev;
@@ -3681,6 +3677,7 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
.ndo_stop = ucc_geth_close,
.ndo_start_xmit = ucc_geth_start_xmit,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = ucc_geth_set_mac_addr,
.ndo_set_rx_mode = ucc_geth_set_multi,
.ndo_tx_timeout = ucc_geth_timeout,
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 25152715396b..fee4664c9189 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -118,6 +118,7 @@ config HNS3_ENET
tristate "Hisilicon HNS3 Ethernet Device Support"
default m
depends on 64BIT && PCI
+ depends on INET
---help---
This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 6242249c9f4c..5748d3f722f6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1163,6 +1163,7 @@ static void hns_nic_adjust_link(struct net_device *ndev)
*/
int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct phy_device *phy_dev = h->phy_dev;
int ret;
@@ -1180,8 +1181,9 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
if (unlikely(ret))
return -ENODEV;
- phy_dev->supported &= h->if_support;
- phy_dev->advertising = phy_dev->supported;
+ ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support);
+ linkmode_and(phy_dev->supported, phy_dev->supported, supported);
+ linkmode_copy(phy_dev->advertising, phy_dev->supported);
if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
phy_dev->autoneg = false;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 774beda040a1..8e9b95871d30 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -624,7 +624,7 @@ static void hns_nic_self_test(struct net_device *ndev,
clear_bit(NIC_STATE_TESTING, &priv->state);
if (if_running)
- (void)dev_open(ndev);
+ (void)dev_open(ndev, NULL);
}
/* Online tests aren't run; pass by default */
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index 002534f12b66..d01bf536eb86 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -9,6 +9,6 @@ obj-$(CONFIG_HNS3) += hns3vf/
obj-$(CONFIG_HNS3) += hnae3.o
obj-$(CONFIG_HNS3_ENET) += hns3.o
-hns3-objs = hns3_enet.o hns3_ethtool.o
+hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o
hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 038326cfda93..691d12174902 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -36,6 +36,10 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_BIND_FUNC_QUEUE, /* (VF -> PF) bind function and queue */
HCLGE_MBX_GET_LINK_STATUS, /* (VF -> PF) get link status */
HCLGE_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */
+ HCLGE_MBX_KEEP_ALIVE, /* (VF -> PF) send keep alive cmd */
+ HCLGE_MBX_SET_ALIVE, /* (VF -> PF) set alive state */
+ HCLGE_MBX_SET_MTU, /* (VF -> PF) set mtu */
+ HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */
};
/* below are per-VF mac-vlan subcodes */
@@ -85,6 +89,12 @@ struct hclge_mbx_pf_to_vf_cmd {
u16 msg[8];
};
+struct hclge_vf_rst_cmd {
+ u8 dest_vfid;
+ u8 vf_rst;
+ u8 rsv[22];
+};
+
/* used by VF to store the received Async responses from PF */
struct hclgevf_mbx_arq_ring {
#define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 055b40606dbc..36eab37d8a40 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -52,6 +52,7 @@
#define HNAE3_UNIC_CLIENT_INITED_B 0x4
#define HNAE3_ROCE_CLIENT_INITED_B 0x5
#define HNAE3_DEV_SUPPORT_FD_B 0x6
+#define HNAE3_DEV_SUPPORT_GRO_B 0x7
#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
@@ -65,6 +66,9 @@
#define hnae3_dev_fd_supported(hdev) \
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B)
+#define hnae3_dev_gro_supported(hdev) \
+ hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B)
+
#define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
#define ring_ptr_move_bw(ring, p) \
@@ -124,14 +128,23 @@ enum hnae3_reset_notify_type {
enum hnae3_reset_type {
HNAE3_VF_RESET,
+ HNAE3_VF_FUNC_RESET,
+ HNAE3_VF_PF_FUNC_RESET,
HNAE3_VF_FULL_RESET,
+ HNAE3_FLR_RESET,
HNAE3_FUNC_RESET,
HNAE3_CORE_RESET,
HNAE3_GLOBAL_RESET,
HNAE3_IMP_RESET,
+ HNAE3_UNKNOWN_RESET,
HNAE3_NONE_RESET,
};
+enum hnae3_flr_state {
+ HNAE3_FLR_DOWN,
+ HNAE3_FLR_DONE,
+};
+
struct hnae3_vector_info {
u8 __iomem *io_addr;
int vector;
@@ -162,6 +175,7 @@ struct hnae3_client_ops {
int (*setup_tc)(struct hnae3_handle *handle, u8 tc);
int (*reset_notify)(struct hnae3_handle *handle,
enum hnae3_reset_notify_type type);
+ enum hnae3_reset_type (*process_hw_error)(struct hnae3_handle *handle);
};
#define HNAE3_CLIENT_NAME_LENGTH 16
@@ -197,6 +211,10 @@ struct hnae3_ae_dev {
* Enable the hardware
* stop()
* Disable the hardware
+ * start_client()
+ * Inform the hclge that client has been started
+ * stop_client()
+ * Inform the hclge that client has been stopped
* get_status()
* Get the carrier state of the back channel of the handle, 1 for ok, 0 for
* non-ok
@@ -292,17 +310,22 @@ struct hnae3_ae_dev {
* Set vlan filter config of vf
* enable_hw_strip_rxvtag()
* Enable/disable hardware strip vlan tag of packets received
+ * set_gro_en
+ * Enable/disable HW GRO
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
void (*uninit_ae_dev)(struct hnae3_ae_dev *ae_dev);
-
+ void (*flr_prepare)(struct hnae3_ae_dev *ae_dev);
+ void (*flr_done)(struct hnae3_ae_dev *ae_dev);
int (*init_client_instance)(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev);
void (*uninit_client_instance)(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev);
int (*start)(struct hnae3_handle *handle);
void (*stop)(struct hnae3_handle *handle);
+ int (*client_start)(struct hnae3_handle *handle);
+ void (*client_stop)(struct hnae3_handle *handle);
int (*get_status)(struct hnae3_handle *handle);
void (*get_ksettings_an_result)(struct hnae3_handle *handle,
u8 *auto_neg, u32 *speed, u8 *duplex);
@@ -403,6 +426,8 @@ struct hnae3_ae_ops {
u16 vlan, u8 qos, __be16 proto);
int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
void (*reset_event)(struct pci_dev *pdev, struct hnae3_handle *handle);
+ void (*set_default_reset_request)(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type);
void (*get_channels)(struct hnae3_handle *handle,
struct ethtool_channels *ch);
void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
@@ -429,7 +454,14 @@ struct hnae3_ae_ops {
struct ethtool_rxnfc *cmd, u32 *rule_locs);
int (*restore_fd_rules)(struct hnae3_handle *handle);
void (*enable_fd)(struct hnae3_handle *handle, bool enable);
- pci_ers_result_t (*process_hw_error)(struct hnae3_ae_dev *ae_dev);
+ int (*dbg_run_cmd)(struct hnae3_handle *handle, char *cmd_buf);
+ pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev);
+ bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
+ bool (*ae_dev_resetting)(struct hnae3_handle *handle);
+ unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle);
+ int (*set_gro_en)(struct hnae3_handle *handle, int enable);
+ u16 (*get_global_queue_id)(struct hnae3_handle *handle, u16 queue_id);
+ void (*set_timer_task)(struct hnae3_handle *handle, bool enable);
};
struct hnae3_dcb_ops {
@@ -488,6 +520,14 @@ struct hnae3_roce_private_info {
void __iomem *roce_io_base;
int base_vector;
int num_vectors;
+
+ /* The below attributes defined for RoCE client, hnae3 gives
+ * initial values to them, and RoCE client can modify and use
+ * them.
+ */
+ unsigned long reset_state;
+ unsigned long instance_state;
+ unsigned long state;
};
struct hnae3_unic_private_info {
@@ -520,9 +560,6 @@ struct hnae3_handle {
struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */
u64 flags; /* Indicate the capabilities for this handle*/
- unsigned long last_reset_time;
- enum hnae3_reset_type reset_level;
-
union {
struct net_device *netdev; /* first member */
struct hnae3_knic_private_info kinfo;
@@ -533,6 +570,7 @@ struct hnae3_handle {
u32 numa_node_mask; /* for multi-chip support */
u8 netdev_flags;
+ struct dentry *hnae3_dbgfs;
};
#define hnae3_set_field(origin, mask, shift, val) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index ea5f8a84070d..b6fabbbdfd5b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -9,6 +9,9 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (h->kinfo.dcb_ops->ieee_getets)
return h->kinfo.dcb_ops->ieee_getets(h, ets);
@@ -20,6 +23,9 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (h->kinfo.dcb_ops->ieee_setets)
return h->kinfo.dcb_ops->ieee_setets(h, ets);
@@ -31,6 +37,9 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (h->kinfo.dcb_ops->ieee_getpfc)
return h->kinfo.dcb_ops->ieee_getpfc(h, pfc);
@@ -42,6 +51,9 @@ int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (h->kinfo.dcb_ops->ieee_setpfc)
return h->kinfo.dcb_ops->ieee_setpfc(h, pfc);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
new file mode 100644
index 000000000000..0de543faa5b1
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+
+#include "hnae3.h"
+#include "hns3_enet.h"
+
+#define HNS3_DBG_READ_LEN 256
+
+static struct dentry *hns3_dbgfs_root;
+
+static int hns3_dbg_queue_info(struct hnae3_handle *h, char *cmd_buf)
+{
+ struct hns3_nic_priv *priv = h->priv;
+ struct hns3_nic_ring_data *ring_data;
+ struct hns3_enet_ring *ring;
+ u32 base_add_l, base_add_h;
+ u32 queue_num, queue_max;
+ u32 value, i = 0;
+ int cnt;
+
+ if (!priv->ring_data) {
+ dev_err(&h->pdev->dev, "ring_data is NULL\n");
+ return -EFAULT;
+ }
+
+ queue_max = h->kinfo.num_tqps;
+ cnt = kstrtouint(&cmd_buf[11], 0, &queue_num);
+ if (cnt)
+ queue_num = 0;
+ else
+ queue_max = queue_num + 1;
+
+ dev_info(&h->pdev->dev, "queue info\n");
+
+ if (queue_num >= h->kinfo.num_tqps) {
+ dev_err(&h->pdev->dev,
+ "Queue number(%u) is out of range(%u)\n", queue_num,
+ h->kinfo.num_tqps - 1);
+ return -EINVAL;
+ }
+
+ ring_data = priv->ring_data;
+ for (i = queue_num; i < queue_max; i++) {
+ /* Each cycle needs to determine whether the instance is reset,
+ * to prevent reference to invalid memory. And need to ensure
+ * that the following code is executed within 100ms.
+ */
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return -EPERM;
+
+ ring = ring_data[(u32)(i + h->kinfo.num_tqps)].ring;
+ base_add_h = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BASEADDR_H_REG);
+ base_add_l = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BASEADDR_L_REG);
+ dev_info(&h->pdev->dev, "RX(%d) BASE ADD: 0x%08x%08x\n", i,
+ base_add_h, base_add_l);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BD_NUM_REG);
+ dev_info(&h->pdev->dev, "RX(%d) RING BD NUM: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BD_LEN_REG);
+ dev_info(&h->pdev->dev, "RX(%d) RING BD LEN: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_TAIL_REG);
+ dev_info(&h->pdev->dev, "RX(%d) RING TAIL: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_HEAD_REG);
+ dev_info(&h->pdev->dev, "RX(%d) RING HEAD: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_FBDNUM_REG);
+ dev_info(&h->pdev->dev, "RX(%d) RING FBDNUM: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_PKTNUM_RECORD_REG);
+ dev_info(&h->pdev->dev, "RX(%d) RING PKTNUM: %u\n", i, value);
+
+ ring = ring_data[i].ring;
+ base_add_h = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_BASEADDR_H_REG);
+ base_add_l = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_BASEADDR_L_REG);
+ dev_info(&h->pdev->dev, "TX(%d) BASE ADD: 0x%08x%08x\n", i,
+ base_add_h, base_add_l);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_BD_NUM_REG);
+ dev_info(&h->pdev->dev, "TX(%d) RING BD NUM: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_TC_REG);
+ dev_info(&h->pdev->dev, "TX(%d) RING TC: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_TAIL_REG);
+ dev_info(&h->pdev->dev, "TX(%d) RING TAIL: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_HEAD_REG);
+ dev_info(&h->pdev->dev, "TX(%d) RING HEAD: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_FBDNUM_REG);
+ dev_info(&h->pdev->dev, "TX(%d) RING FBDNUM: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_OFFSET_REG);
+ dev_info(&h->pdev->dev, "TX(%d) RING OFFSET: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_PKTNUM_RECORD_REG);
+ dev_info(&h->pdev->dev, "TX(%d) RING PKTNUM: %u\n\n", i,
+ value);
+ }
+
+ return 0;
+}
+
+static int hns3_dbg_queue_map(struct hnae3_handle *h)
+{
+ struct hns3_nic_priv *priv = h->priv;
+ struct hns3_nic_ring_data *ring_data;
+ int i;
+
+ if (!h->ae_algo->ops->get_global_queue_id)
+ return -EOPNOTSUPP;
+
+ dev_info(&h->pdev->dev, "map info for queue id and vector id\n");
+ dev_info(&h->pdev->dev,
+ "local queue id | global queue id | vector id\n");
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ u16 global_qid;
+
+ global_qid = h->ae_algo->ops->get_global_queue_id(h, i);
+ ring_data = &priv->ring_data[i];
+ if (!ring_data || !ring_data->ring ||
+ !ring_data->ring->tqp_vector)
+ continue;
+
+ dev_info(&h->pdev->dev,
+ " %4d %4d %4d\n",
+ i, global_qid,
+ ring_data->ring->tqp_vector->vector_irq);
+ }
+
+ return 0;
+}
+
+static int hns3_dbg_bd_info(struct hnae3_handle *h, char *cmd_buf)
+{
+ struct hns3_nic_priv *priv = h->priv;
+ struct hns3_nic_ring_data *ring_data;
+ struct hns3_desc *rx_desc, *tx_desc;
+ struct device *dev = &h->pdev->dev;
+ struct hns3_enet_ring *ring;
+ u32 tx_index, rx_index;
+ u32 q_num, value;
+ int cnt;
+
+ cnt = sscanf(&cmd_buf[8], "%u %u", &q_num, &tx_index);
+ if (cnt == 2) {
+ rx_index = tx_index;
+ } else if (cnt != 1) {
+ dev_err(dev, "bd info: bad command string, cnt=%d\n", cnt);
+ return -EINVAL;
+ }
+
+ if (q_num >= h->kinfo.num_tqps) {
+ dev_err(dev, "Queue number(%u) is out of range(%u)\n", q_num,
+ h->kinfo.num_tqps - 1);
+ return -EINVAL;
+ }
+
+ ring_data = priv->ring_data;
+ ring = ring_data[q_num].ring;
+ value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
+ tx_index = (cnt == 1) ? value : tx_index;
+
+ if (tx_index >= ring->desc_num) {
+ dev_err(dev, "bd index (%u) is out of range(%u)\n", tx_index,
+ ring->desc_num - 1);
+ return -EINVAL;
+ }
+
+ tx_desc = &ring->desc[tx_index];
+ dev_info(dev, "TX Queue Num: %u, BD Index: %u\n", q_num, tx_index);
+ dev_info(dev, "(TX) addr: 0x%llx\n", tx_desc->addr);
+ dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.vlan_tag);
+ dev_info(dev, "(TX)send_size: %u\n", tx_desc->tx.send_size);
+ dev_info(dev, "(TX)vlan_tso: %u\n", tx_desc->tx.type_cs_vlan_tso);
+ dev_info(dev, "(TX)l2_len: %u\n", tx_desc->tx.l2_len);
+ dev_info(dev, "(TX)l3_len: %u\n", tx_desc->tx.l3_len);
+ dev_info(dev, "(TX)l4_len: %u\n", tx_desc->tx.l4_len);
+ dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.outer_vlan_tag);
+ dev_info(dev, "(TX)tv: %u\n", tx_desc->tx.tv);
+ dev_info(dev, "(TX)vlan_msec: %u\n", tx_desc->tx.ol_type_vlan_msec);
+ dev_info(dev, "(TX)ol2_len: %u\n", tx_desc->tx.ol2_len);
+ dev_info(dev, "(TX)ol3_len: %u\n", tx_desc->tx.ol3_len);
+ dev_info(dev, "(TX)ol4_len: %u\n", tx_desc->tx.ol4_len);
+ dev_info(dev, "(TX)paylen: %u\n", tx_desc->tx.paylen);
+ dev_info(dev, "(TX)vld_ra_ri: %u\n", tx_desc->tx.bdtp_fe_sc_vld_ra_ri);
+ dev_info(dev, "(TX)mss: %u\n", tx_desc->tx.mss);
+
+ ring = ring_data[q_num + h->kinfo.num_tqps].ring;
+ value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG);
+ rx_index = (cnt == 1) ? value : tx_index;
+ rx_desc = &ring->desc[rx_index];
+
+ dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index);
+ dev_info(dev, "(RX)addr: 0x%llx\n", rx_desc->addr);
+ dev_info(dev, "(RX)pkt_len: %u\n", rx_desc->rx.pkt_len);
+ dev_info(dev, "(RX)size: %u\n", rx_desc->rx.size);
+ dev_info(dev, "(RX)rss_hash: %u\n", rx_desc->rx.rss_hash);
+ dev_info(dev, "(RX)fd_id: %u\n", rx_desc->rx.fd_id);
+ dev_info(dev, "(RX)vlan_tag: %u\n", rx_desc->rx.vlan_tag);
+ dev_info(dev, "(RX)o_dm_vlan_id_fb: %u\n", rx_desc->rx.o_dm_vlan_id_fb);
+ dev_info(dev, "(RX)ot_vlan_tag: %u\n", rx_desc->rx.ot_vlan_tag);
+ dev_info(dev, "(RX)bd_base_info: %u\n", rx_desc->rx.bd_base_info);
+
+ return 0;
+}
+
+static void hns3_dbg_help(struct hnae3_handle *h)
+{
+#define HNS3_DBG_BUF_LEN 256
+
+ char printf_buf[HNS3_DBG_BUF_LEN];
+
+ dev_info(&h->pdev->dev, "available commands\n");
+ dev_info(&h->pdev->dev, "queue info [number]\n");
+ dev_info(&h->pdev->dev, "queue map\n");
+ dev_info(&h->pdev->dev, "bd info [q_num] <bd index>\n");
+ dev_info(&h->pdev->dev, "dump fd tcam\n");
+ dev_info(&h->pdev->dev, "dump tc\n");
+ dev_info(&h->pdev->dev, "dump tm map [q_num]\n");
+ dev_info(&h->pdev->dev, "dump tm\n");
+ dev_info(&h->pdev->dev, "dump qos pause cfg\n");
+ dev_info(&h->pdev->dev, "dump qos pri map\n");
+ dev_info(&h->pdev->dev, "dump qos buf cfg\n");
+ dev_info(&h->pdev->dev, "dump mng tbl\n");
+
+ memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
+ strncat(printf_buf, "dump reg [[bios common] [ssu <prt_id>]",
+ HNS3_DBG_BUF_LEN - 1);
+ strncat(printf_buf + strlen(printf_buf),
+ " [igu egu <prt_id>] [rpu <tc_queue_num>]",
+ HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
+ strncat(printf_buf + strlen(printf_buf),
+ " [rtc] [ppp] [rcb] [tqp <q_num>]]\n",
+ HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
+ dev_info(&h->pdev->dev, "%s", printf_buf);
+
+ memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
+ strncat(printf_buf, "dump reg dcb [port_id] [pri_id] [pg_id]",
+ HNS3_DBG_BUF_LEN - 1);
+ strncat(printf_buf + strlen(printf_buf), " [rq_id] [nq_id] [qset_id]\n",
+ HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
+ dev_info(&h->pdev->dev, "%s", printf_buf);
+}
+
+static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int uncopy_bytes;
+ char *buf;
+ int len;
+
+ if (*ppos != 0)
+ return 0;
+
+ if (count < HNS3_DBG_READ_LEN)
+ return -ENOSPC;
+
+ buf = kzalloc(HNS3_DBG_READ_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len = snprintf(buf, HNS3_DBG_READ_LEN, "%s\n",
+ "Please echo help to cmd to get help information");
+ uncopy_bytes = copy_to_user(buffer, buf, len);
+
+ kfree(buf);
+
+ if (uncopy_bytes)
+ return -EFAULT;
+
+ return (*ppos = len);
+}
+
+static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct hnae3_handle *handle = filp->private_data;
+ struct hns3_nic_priv *priv = handle->priv;
+ char *cmd_buf, *cmd_buf_tmp;
+ int uncopied_bytes;
+ int ret = 0;
+
+ if (*ppos != 0)
+ return 0;
+
+ /* Judge if the instance is being reset. */
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return 0;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return count;
+
+ uncopied_bytes = copy_from_user(cmd_buf, buffer, count);
+ if (uncopied_bytes) {
+ kfree(cmd_buf);
+ return -EFAULT;
+ }
+
+ cmd_buf[count] = '\0';
+
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ if (strncmp(cmd_buf, "help", 4) == 0)
+ hns3_dbg_help(handle);
+ else if (strncmp(cmd_buf, "queue info", 10) == 0)
+ ret = hns3_dbg_queue_info(handle, cmd_buf);
+ else if (strncmp(cmd_buf, "queue map", 9) == 0)
+ ret = hns3_dbg_queue_map(handle);
+ else if (strncmp(cmd_buf, "bd info", 7) == 0)
+ ret = hns3_dbg_bd_info(handle, cmd_buf);
+ else if (handle->ae_algo->ops->dbg_run_cmd)
+ ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
+
+ if (ret)
+ hns3_dbg_help(handle);
+
+ kfree(cmd_buf);
+ cmd_buf = NULL;
+
+ return count;
+}
+
+static const struct file_operations hns3_dbg_cmd_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = hns3_dbg_cmd_read,
+ .write = hns3_dbg_cmd_write,
+};
+
+void hns3_dbg_init(struct hnae3_handle *handle)
+{
+ const char *name = pci_name(handle->pdev);
+ struct dentry *pfile;
+
+ handle->hnae3_dbgfs = debugfs_create_dir(name, hns3_dbgfs_root);
+ if (!handle->hnae3_dbgfs)
+ return;
+
+ pfile = debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle,
+ &hns3_dbg_cmd_fops);
+ if (!pfile) {
+ debugfs_remove_recursive(handle->hnae3_dbgfs);
+ handle->hnae3_dbgfs = NULL;
+ dev_warn(&handle->pdev->dev, "create file for %s fail\n",
+ name);
+ }
+}
+
+void hns3_dbg_uninit(struct hnae3_handle *handle)
+{
+ debugfs_remove_recursive(handle->hnae3_dbgfs);
+ handle->hnae3_dbgfs = NULL;
+}
+
+void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
+{
+ hns3_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
+ if (!hns3_dbgfs_root) {
+ pr_warn("Register debugfs for %s fail\n", debugfs_dir_name);
+ return;
+ }
+}
+
+void hns3_dbg_unregister_debugfs(void)
+{
+ debugfs_remove_recursive(hns3_dbgfs_root);
+ hns3_dbgfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 20fcf0d1c2ce..d3b9aaf96c1c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -15,6 +15,7 @@
#include <linux/vermagic.h>
#include <net/gre.h>
#include <net/pkt_cls.h>
+#include <net/tcp.h>
#include <net/vxlan.h>
#include "hnae3.h"
@@ -239,7 +240,6 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
- tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
}
@@ -312,6 +312,24 @@ static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
return min_t(u16, rss_size, max_rss_size);
}
+static void hns3_tqp_enable(struct hnae3_queue *tqp)
+{
+ u32 rcb_reg;
+
+ rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
+ rcb_reg |= BIT(HNS3_RING_EN_B);
+ hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
+}
+
+static void hns3_tqp_disable(struct hnae3_queue *tqp)
+{
+ u32 rcb_reg;
+
+ rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
+ rcb_reg &= ~BIT(HNS3_RING_EN_B);
+ hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
+}
+
static int hns3_nic_net_up(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -334,6 +352,10 @@ static int hns3_nic_net_up(struct net_device *netdev)
for (i = 0; i < priv->vector_num; i++)
hns3_vector_enable(&priv->tqp_vector[i]);
+ /* enable rcb */
+ for (j = 0; j < h->kinfo.num_tqps; j++)
+ hns3_tqp_enable(h->kinfo.tqp[j]);
+
/* start the ae_dev */
ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
if (ret)
@@ -344,6 +366,9 @@ static int hns3_nic_net_up(struct net_device *netdev)
return 0;
out_start_err:
+ while (j--)
+ hns3_tqp_disable(h->kinfo.tqp[j]);
+
for (j = i - 1; j >= 0; j--)
hns3_vector_disable(&priv->tqp_vector[j]);
@@ -359,6 +384,9 @@ static int hns3_nic_net_open(struct net_device *netdev)
struct hnae3_knic_private_info *kinfo;
int i, ret;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
netif_carrier_off(netdev);
ret = hns3_nic_set_real_num_queue(netdev);
@@ -378,23 +406,27 @@ static int hns3_nic_net_open(struct net_device *netdev)
kinfo->prio_tc[i]);
}
- priv->ae_handle->last_reset_time = jiffies;
+ if (h->ae_algo->ops->set_timer_task)
+ h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
+
return 0;
}
static void hns3_nic_net_down(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops;
int i;
- if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
- return;
-
/* disable vectors */
for (i = 0; i < priv->vector_num; i++)
hns3_vector_disable(&priv->tqp_vector[i]);
+ /* disable rcb */
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_disable(h->kinfo.tqp[i]);
+
/* stop ae_dev */
ops = priv->ae_handle->ae_algo->ops;
if (ops->stop)
@@ -408,6 +440,15 @@ static void hns3_nic_net_down(struct net_device *netdev)
static int hns3_nic_net_stop(struct net_device *netdev)
{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ return 0;
+
+ if (h->ae_algo->ops->set_timer_task)
+ h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
+
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
@@ -1312,6 +1353,15 @@ static int hns3_nic_set_features(struct net_device *netdev,
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
}
+ if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
+ if (features & NETIF_F_GRO_HW)
+ ret = h->ae_algo->ops->set_gro_en(h, true);
+ else
+ ret = h->ae_algo->ops->set_gro_en(h, false);
+ if (ret)
+ return ret;
+ }
+
if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
h->ae_algo->ops->enable_vlan_filter) {
if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
@@ -1530,18 +1580,11 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- bool if_running = netif_running(netdev);
int ret;
if (!h->ae_algo->ops->set_mtu)
return -EOPNOTSUPP;
- /* if this was called with netdev up then bring netdevice down */
- if (if_running) {
- (void)hns3_nic_net_stop(netdev);
- msleep(100);
- }
-
ret = h->ae_algo->ops->set_mtu(h, new_mtu);
if (ret)
netdev_err(netdev, "failed to change MTU in hardware %d\n",
@@ -1549,10 +1592,6 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
else
netdev->mtu = new_mtu;
- /* if the netdev was running earlier, bring it up again */
- if (if_running && hns3_nic_net_open(netdev))
- ret = -EINVAL;
-
return ret;
}
@@ -1615,10 +1654,9 @@ static void hns3_nic_net_timeout(struct net_device *ndev)
priv->tx_timeout_count++;
- if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo)))
- return;
-
- /* request the reset */
+ /* request the reset, and let the hclge to determine
+ * which reset level should be done
+ */
if (h->ae_algo->ops->reset_event)
h->ae_algo->ops->reset_event(h->pdev, h);
}
@@ -1682,8 +1720,10 @@ static void hns3_disable_sriov(struct pci_dev *pdev)
static void hns3_get_dev_capability(struct pci_dev *pdev,
struct hnae3_ae_dev *ae_dev)
{
- if (pdev->revision >= 0x21)
+ if (pdev->revision >= 0x21) {
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1);
+ }
}
/* hns3_probe - Device initialization routine
@@ -1795,8 +1835,8 @@ static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_NONE;
}
- if (ae_dev->ops->process_hw_error)
- ret = ae_dev->ops->process_hw_error(ae_dev);
+ if (ae_dev->ops->handle_hw_ras_error)
+ ret = ae_dev->ops->handle_hw_ras_error(ae_dev);
else
return PCI_ERS_RESULT_NONE;
@@ -1819,9 +1859,29 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
}
+static void hns3_reset_prepare(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "hns3 flr prepare\n");
+ if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
+ ae_dev->ops->flr_prepare(ae_dev);
+}
+
+static void hns3_reset_done(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "hns3 flr done\n");
+ if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
+ ae_dev->ops->flr_done(ae_dev);
+}
+
static const struct pci_error_handlers hns3_err_handler = {
.error_detected = hns3_error_detected,
.slot_reset = hns3_slot_reset,
+ .reset_prepare = hns3_reset_prepare,
+ .reset_done = hns3_reset_done,
};
static struct pci_driver hns3_driver = {
@@ -1875,7 +1935,9 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
if (pdev->revision >= 0x21) {
- netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_GRO_HW;
+ netdev->features |= NETIF_F_GRO_HW;
if (!(h->flags & HNAE3_SUPPORT_VF)) {
netdev->hw_features |= NETIF_F_NTUPLE;
@@ -2253,6 +2315,12 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
if (!(netdev->features & NETIF_F_RXCSUM))
return;
+ /* We MUST enable hardware checksum before enabling hardware GRO */
+ if (skb_shinfo(skb)->gso_size) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return;
+ }
+
/* check if hardware has done checksum */
if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
return;
@@ -2296,6 +2364,9 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
{
+ if (skb_has_frag_list(skb))
+ napi_gro_flush(&ring->tqp_vector->napi, false);
+
napi_gro_receive(&ring->tqp_vector->napi, skb);
}
@@ -2329,12 +2400,166 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
}
}
+static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
+ unsigned char *va)
+{
+#define HNS3_NEED_ADD_FRAG 1
+ struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
+ struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct sk_buff *skb;
+
+ ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
+ skb = ring->skb;
+ if (unlikely(!skb)) {
+ netdev_err(netdev, "alloc rx skb fail\n");
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+
+ return -ENOMEM;
+ }
+
+ prefetchw(skb->data);
+
+ ring->pending_buf = 1;
+ ring->frag_num = 0;
+ ring->tail_skb = NULL;
+ if (length <= HNS3_RX_HEAD_SIZE) {
+ memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
+
+ /* We can reuse buffer as-is, just make sure it is local */
+ if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
+ desc_cb->reuse_flag = 1;
+ else /* This page cannot be reused so discard it */
+ put_page(desc_cb->priv);
+
+ ring_ptr_move_fw(ring, next_to_clean);
+ return 0;
+ }
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.seg_pkt_cnt++;
+ u64_stats_update_end(&ring->syncp);
+
+ ring->pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
+ __skb_put(skb, ring->pull_len);
+ hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
+ desc_cb);
+ ring_ptr_move_fw(ring, next_to_clean);
+
+ return HNS3_NEED_ADD_FRAG;
+}
+
+static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
+ struct sk_buff **out_skb, bool pending)
+{
+ struct sk_buff *skb = *out_skb;
+ struct sk_buff *head_skb = *out_skb;
+ struct sk_buff *new_skb;
+ struct hns3_desc_cb *desc_cb;
+ struct hns3_desc *pre_desc;
+ u32 bd_base_info;
+ int pre_bd;
+
+ /* if there is pending bd, the SW param next_to_clean has moved
+ * to next and the next is NULL
+ */
+ if (pending) {
+ pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
+ ring->desc_num;
+ pre_desc = &ring->desc[pre_bd];
+ bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
+ } else {
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ }
+
+ while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
+ desc = &ring->desc[ring->next_to_clean];
+ desc_cb = &ring->desc_cb[ring->next_to_clean];
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ if (!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))
+ return -ENXIO;
+
+ if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
+ new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
+ HNS3_RX_HEAD_SIZE);
+ if (unlikely(!new_skb)) {
+ netdev_err(ring->tqp->handle->kinfo.netdev,
+ "alloc rx skb frag fail\n");
+ return -ENXIO;
+ }
+ ring->frag_num = 0;
+
+ if (ring->tail_skb) {
+ ring->tail_skb->next = new_skb;
+ ring->tail_skb = new_skb;
+ } else {
+ skb_shinfo(skb)->frag_list = new_skb;
+ ring->tail_skb = new_skb;
+ }
+ }
+
+ if (ring->tail_skb) {
+ head_skb->truesize += hnae3_buf_size(ring);
+ head_skb->data_len += le16_to_cpu(desc->rx.size);
+ head_skb->len += le16_to_cpu(desc->rx.size);
+ skb = ring->tail_skb;
+ }
+
+ hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
+ ring_ptr_move_fw(ring, next_to_clean);
+ ring->pending_buf++;
+ }
+
+ return 0;
+}
+
+static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
+ u32 bd_base_info)
+{
+ u16 gro_count;
+ u32 l3_type;
+
+ gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
+ HNS3_RXD_GRO_COUNT_S);
+ /* if there is no HW GRO, do not set gro params */
+ if (!gro_count)
+ return;
+
+ /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
+ * to skb_shinfo(skb)->gso_segs
+ */
+ NAPI_GRO_CB(skb)->count = gro_count;
+
+ l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
+ HNS3_RXD_L3ID_S);
+ if (l3_type == HNS3_L3_TYPE_IPV4)
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ else if (l3_type == HNS3_L3_TYPE_IPV6)
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ return;
+
+ skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
+ HNS3_RXD_GRO_SIZE_M,
+ HNS3_RXD_GRO_SIZE_S);
+ if (skb_shinfo(skb)->gso_size)
+ tcp_gro_complete(skb);
+}
+
static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
struct sk_buff *skb)
{
- struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
struct hnae3_handle *handle = ring->tqp->handle;
enum pkt_hash_types rss_type;
+ struct hns3_desc *desc;
+ int last_bd;
+
+ /* When driver handle the rss type, ring->next_to_clean indicates the
+ * first descriptor of next packet, need -1 here.
+ */
+ last_bd = (ring->next_to_clean - 1 + ring->desc_num) % ring->desc_num;
+ desc = &ring->desc[last_bd];
if (le32_to_cpu(desc->rx.rss_hash))
rss_type = handle->kinfo.rss_type;
@@ -2345,18 +2570,16 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
}
static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
- struct sk_buff **out_skb, int *out_bnum)
+ struct sk_buff **out_skb)
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct sk_buff *skb = ring->skb;
struct hns3_desc_cb *desc_cb;
struct hns3_desc *desc;
- struct sk_buff *skb;
- unsigned char *va;
u32 bd_base_info;
- int pull_len;
u32 l234info;
int length;
- int bnum;
+ int ret;
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
@@ -2368,9 +2591,10 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
/* Check valid BD */
if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
- return -EFAULT;
+ return -ENXIO;
- va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
+ if (!skb)
+ ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
/* Prefetch first cache line of first page
* Idea is to cache few bytes of the header of the packet. Our L1 Cache
@@ -2379,62 +2603,42 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
* lines. In such a case, single fetch would suffice to cache in the
* relevant part of the header.
*/
- prefetch(va);
+ prefetch(ring->va);
#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
+ prefetch(ring->va + L1_CACHE_BYTES);
#endif
- skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
- HNS3_RX_HEAD_SIZE);
- if (unlikely(!skb)) {
- netdev_err(netdev, "alloc rx skb fail\n");
-
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
-
- return -ENOMEM;
- }
-
- prefetchw(skb->data);
-
- bnum = 1;
- if (length <= HNS3_RX_HEAD_SIZE) {
- memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
+ if (!skb) {
+ ret = hns3_alloc_skb(ring, length, ring->va);
+ *out_skb = skb = ring->skb;
- /* We can reuse buffer as-is, just make sure it is local */
- if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
- desc_cb->reuse_flag = 1;
- else /* This page cannot be reused so discard it */
- put_page(desc_cb->priv);
+ if (ret < 0) /* alloc buffer fail */
+ return ret;
+ if (ret > 0) { /* need add frag */
+ ret = hns3_add_frag(ring, desc, &skb, false);
+ if (ret)
+ return ret;
- ring_ptr_move_fw(ring, next_to_clean);
+ /* As the head data may be changed when GRO enable, copy
+ * the head data in after other data rx completed
+ */
+ memcpy(skb->data, ring->va,
+ ALIGN(ring->pull_len, sizeof(long)));
+ }
} else {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.seg_pkt_cnt++;
- u64_stats_update_end(&ring->syncp);
-
- pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
-
- memcpy(__skb_put(skb, pull_len), va,
- ALIGN(pull_len, sizeof(long)));
-
- hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
- ring_ptr_move_fw(ring, next_to_clean);
+ ret = hns3_add_frag(ring, desc, &skb, true);
+ if (ret)
+ return ret;
- while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
- desc = &ring->desc[ring->next_to_clean];
- desc_cb = &ring->desc_cb[ring->next_to_clean];
- bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
- hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
- ring_ptr_move_fw(ring, next_to_clean);
- bnum++;
- }
+ /* As the head data may be changed when GRO enable, copy
+ * the head data in after other data rx completed
+ */
+ memcpy(skb->data, ring->va,
+ ALIGN(ring->pull_len, sizeof(long)));
}
- *out_bnum = bnum;
-
l234info = le32_to_cpu(desc->rx.l234_info);
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
/* Based on hw strategy, the tag offloaded will be stored at
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
@@ -2484,7 +2688,11 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ring->tqp_vector->rx_group.total_bytes += skb->len;
+ /* This is needed in order to enable forwarding support */
+ hns3_set_gro_param(skb, l234info, bd_base_info);
+
hns3_rx_checksum(ring, skb, desc);
+ *out_skb = skb;
hns3_set_rx_skb_rss_type(ring, skb);
return 0;
@@ -2497,9 +2705,9 @@ int hns3_clean_rx_ring(
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
int recv_pkts, recv_bds, clean_count, err;
- int unused_count = hns3_desc_unused(ring);
- struct sk_buff *skb = NULL;
- int num, bnum = 0;
+ int unused_count = hns3_desc_unused(ring) - ring->pending_buf;
+ struct sk_buff *skb = ring->skb;
+ int num;
num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
rmb(); /* Make sure num taken effect before the other data is touched */
@@ -2513,24 +2721,32 @@ int hns3_clean_rx_ring(
hns3_nic_alloc_rx_buffers(ring,
clean_count + unused_count);
clean_count = 0;
- unused_count = hns3_desc_unused(ring);
+ unused_count = hns3_desc_unused(ring) -
+ ring->pending_buf;
}
/* Poll one pkt */
- err = hns3_handle_rx_bd(ring, &skb, &bnum);
+ err = hns3_handle_rx_bd(ring, &skb);
if (unlikely(!skb)) /* This fault cannot be repaired */
goto out;
- recv_bds += bnum;
- clean_count += bnum;
- if (unlikely(err)) { /* Do jump the err */
- recv_pkts++;
+ if (err == -ENXIO) { /* Do not get FE for the packet */
+ goto out;
+ } else if (unlikely(err)) { /* Do jump the err */
+ recv_bds += ring->pending_buf;
+ clean_count += ring->pending_buf;
+ ring->skb = NULL;
+ ring->pending_buf = 0;
continue;
}
/* Do update ip stack process */
skb->protocol = eth_type_trans(skb, netdev);
rx_fn(ring, skb);
+ recv_bds += ring->pending_buf;
+ clean_count += ring->pending_buf;
+ ring->skb = NULL;
+ ring->pending_buf = 0;
recv_pkts++;
}
@@ -2644,10 +2860,10 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
bool rx_update, tx_update;
- if (tqp_vector->int_adapt_down > 0) {
- tqp_vector->int_adapt_down--;
+ /* update param every 1000ms */
+ if (time_before(jiffies,
+ tqp_vector->last_jiffies + msecs_to_jiffies(1000)))
return;
- }
if (rx_group->coal.gl_adapt_enable) {
rx_update = hns3_get_new_int_gl(rx_group);
@@ -2664,11 +2880,11 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
}
tqp_vector->last_jiffies = jiffies;
- tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
}
static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
{
+ struct hns3_nic_priv *priv = netdev_priv(napi->dev);
struct hns3_enet_ring *ring;
int rx_pkt_total = 0;
@@ -2677,6 +2893,11 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
bool clean_complete = true;
int rx_budget;
+ if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
+ napi_complete(napi);
+ return 0;
+ }
+
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
@@ -2701,9 +2922,11 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
- napi_complete(napi);
- hns3_update_new_int_gl(tqp_vector);
- hns3_mask_vector_irq(tqp_vector, 1);
+ if (napi_complete(napi) &&
+ likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
+ hns3_update_new_int_gl(tqp_vector);
+ hns3_mask_vector_irq(tqp_vector, 1);
+ }
return rx_pkt_total;
}
@@ -2783,9 +3006,10 @@ err_free_chain:
cur_chain = head->next;
while (cur_chain) {
chain = cur_chain->next;
- devm_kfree(&pdev->dev, chain);
+ devm_kfree(&pdev->dev, cur_chain);
cur_chain = chain;
}
+ head->next = NULL;
return -ENOMEM;
}
@@ -2876,7 +3100,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
ret = hns3_get_vector_ring_chain(tqp_vector,
&vector_ring_chain);
if (ret)
- return ret;
+ goto map_ring_fail;
ret = h->ae_algo->ops->map_ring_to_vector(h,
tqp_vector->vector_irq, &vector_ring_chain);
@@ -2901,6 +3125,8 @@ map_ring_fail:
static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
{
+#define HNS3_VECTOR_PF_MAX_NUM 64
+
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_tqp_vector *tqp_vector;
struct hnae3_vector_info *vector;
@@ -2913,6 +3139,8 @@ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
/* RSS size, cpu online and vector_num should be the same */
/* Should consider 2p/4p later */
vector_num = min_t(u16, num_online_cpus(), tqp_num);
+ vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM);
+
vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
GFP_KERNEL);
if (!vector)
@@ -2970,12 +3198,12 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
- if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
- (void)irq_set_affinity_hint(
- priv->tqp_vector[i].vector_irq,
- NULL);
- free_irq(priv->tqp_vector[i].vector_irq,
- &priv->tqp_vector[i]);
+ if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
+ irq_set_affinity_notifier(tqp_vector->vector_irq,
+ NULL);
+ irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
+ free_irq(tqp_vector->vector_irq, tqp_vector);
+ tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
}
priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
@@ -3319,6 +3547,22 @@ static void hns3_nic_set_priv_ops(struct net_device *netdev)
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
}
+static int hns3_client_start(struct hnae3_handle *handle)
+{
+ if (!handle->ae_algo->ops->client_start)
+ return 0;
+
+ return handle->ae_algo->ops->client_start(handle);
+}
+
+static void hns3_client_stop(struct hnae3_handle *handle)
+{
+ if (!handle->ae_algo->ops->client_stop)
+ return;
+
+ handle->ae_algo->ops->client_stop(handle);
+}
+
static int hns3_client_init(struct hnae3_handle *handle)
{
struct pci_dev *pdev = handle->pdev;
@@ -3337,7 +3581,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->dev = &pdev->dev;
priv->netdev = netdev;
priv->ae_handle = handle;
- priv->ae_handle->last_reset_time = jiffies;
priv->tx_timeout_count = 0;
handle->kinfo.netdev = netdev;
@@ -3357,11 +3600,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- if (handle->flags & HNAE3_SUPPORT_VF)
- handle->reset_level = HNAE3_VF_RESET;
- else
- handle->reset_level = HNAE3_FUNC_RESET;
-
ret = hns3_get_ring_config(priv);
if (ret) {
ret = -ENOMEM;
@@ -3392,10 +3630,20 @@ static int hns3_client_init(struct hnae3_handle *handle)
goto out_reg_netdev_fail;
}
+ ret = hns3_client_start(handle);
+ if (ret) {
+ dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
+ goto out_reg_netdev_fail;
+ }
+
hns3_dcbnl_setup(handle);
- /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
- netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+ hns3_dbg_init(handle);
+
+ /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
+ netdev->max_mtu = HNS3_MAX_MTU;
+
+ set_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
@@ -3418,11 +3666,18 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
+ hns3_client_stop(handle);
+
hns3_remove_hw_addr(netdev);
if (netdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(netdev);
+ if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ netdev_warn(netdev, "already uninitialized\n");
+ goto out_netdev_free;
+ }
+
hns3_del_all_fd_rules(netdev, true);
hns3_force_clear_all_rx_ring(handle);
@@ -3441,8 +3696,11 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
hns3_put_ring_config(priv);
+ hns3_dbg_uninit(handle);
+
priv->ring_data = NULL;
+out_netdev_free:
free_netdev(netdev);
}
@@ -3708,8 +3966,22 @@ static void hns3_restore_coal(struct hns3_nic_priv *priv)
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct net_device *ndev = kinfo->netdev;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+
+ if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return 0;
+
+ /* it is cumbersome for hardware to pick-and-choose entries for deletion
+ * from table space. Hence, for function reset software intervention is
+ * required to delete the entries
+ */
+ if (hns3_dev_ongoing_func_reset(ae_dev)) {
+ hns3_remove_hw_addr(ndev);
+ hns3_del_all_fd_rules(ndev, false);
+ }
if (!netif_running(ndev))
return 0;
@@ -3720,6 +3992,7 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
int ret = 0;
if (netif_running(kinfo->netdev)) {
@@ -3729,9 +4002,10 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
"hns net up fail, ret=%d!\n", ret);
return ret;
}
- handle->last_reset_time = jiffies;
}
+ clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
+
return ret;
}
@@ -3771,28 +4045,44 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
+ ret = hns3_nic_alloc_vector_data(priv);
+ if (ret)
+ return ret;
+
hns3_restore_coal(priv);
ret = hns3_nic_init_vector_data(priv);
if (ret)
- return ret;
+ goto err_dealloc_vector;
ret = hns3_init_all_ring(priv);
- if (ret) {
- hns3_nic_uninit_vector_data(priv);
- priv->ring_data = NULL;
- }
+ if (ret)
+ goto err_uninit_vector;
+
+ set_bit(HNS3_NIC_STATE_INITED, &priv->state);
+
+ return ret;
+
+err_uninit_vector:
+ hns3_nic_uninit_vector_data(priv);
+ priv->ring_data = NULL;
+err_dealloc_vector:
+ hns3_nic_dealloc_vector_data(priv);
return ret;
}
static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ netdev_warn(netdev, "already uninitialized\n");
+ return 0;
+ }
+
hns3_force_clear_all_rx_ring(handle);
ret = hns3_nic_uninit_vector_data(priv);
@@ -3803,18 +4093,15 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
hns3_store_coal(priv);
+ ret = hns3_nic_dealloc_vector_data(priv);
+ if (ret)
+ netdev_err(netdev, "dealloc vector error\n");
+
ret = hns3_uninit_all_ring(priv);
if (ret)
netdev_err(netdev, "uninit ring error\n");
- /* it is cumbersome for hardware to pick-and-choose entries for deletion
- * from table space. Hence, for function reset software intervention is
- * required to delete the entries
- */
- if (hns3_dev_ongoing_func_reset(ae_dev)) {
- hns3_remove_hw_addr(netdev);
- hns3_del_all_fd_rules(netdev, false);
- }
+ clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
}
@@ -3980,15 +4267,23 @@ static int __init hns3_init_module(void)
INIT_LIST_HEAD(&client.node);
+ hns3_dbg_register_debugfs(hns3_driver_name);
+
ret = hnae3_register_client(&client);
if (ret)
- return ret;
+ goto err_reg_client;
ret = pci_register_driver(&hns3_driver);
if (ret)
- hnae3_unregister_client(&client);
+ goto err_reg_driver;
return ret;
+
+err_reg_driver:
+ hnae3_unregister_client(&client);
+err_reg_client:
+ hns3_dbg_unregister_debugfs();
+ return ret;
}
module_init(hns3_init_module);
@@ -4000,6 +4295,7 @@ static void __exit hns3_exit_module(void)
{
pci_unregister_driver(&hns3_driver);
hnae3_unregister_client(&client);
+ hns3_dbg_unregister_debugfs();
}
module_exit(hns3_exit_module);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index d3636d088aa3..e55995e93bb0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -15,7 +15,7 @@ extern const char hns3_driver_version[];
enum hns3_nic_state {
HNS3_NIC_STATE_TESTING,
HNS3_NIC_STATE_RESETTING,
- HNS3_NIC_STATE_REINITING,
+ HNS3_NIC_STATE_INITED,
HNS3_NIC_STATE_DOWN,
HNS3_NIC_STATE_DISABLED,
HNS3_NIC_STATE_REMOVING,
@@ -47,7 +47,7 @@ enum hns3_nic_state {
#define HNS3_RING_PREFETCH_EN_REG 0x0007C
#define HNS3_RING_CFG_VF_NUM_REG 0x00080
#define HNS3_RING_ASID_REG 0x0008C
-#define HNS3_RING_RX_VM_REG 0x00090
+#define HNS3_RING_EN_REG 0x00090
#define HNS3_RING_T0_BE_RST 0x00094
#define HNS3_RING_COULD_BE_RST 0x00098
#define HNS3_RING_WRR_WEIGHT_REG 0x0009c
@@ -76,7 +76,10 @@ enum hns3_nic_state {
#define HNS3_RING_MAX_PENDING 32768
#define HNS3_RING_MIN_PENDING 8
#define HNS3_RING_BD_MULTIPLE 8
-#define HNS3_MAX_MTU 9728
+/* max frame size of mac */
+#define HNS3_MAC_MAX_FRAME 9728
+#define HNS3_MAX_MTU \
+ (HNS3_MAC_MAX_FRAME - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
#define HNS3_BD_SIZE_512_TYPE 0
#define HNS3_BD_SIZE_1024_TYPE 1
@@ -109,6 +112,10 @@ enum hns3_nic_state {
#define HNS3_RXD_DOI_B 21
#define HNS3_RXD_OL3E_B 22
#define HNS3_RXD_OL4E_B 23
+#define HNS3_RXD_GRO_COUNT_S 24
+#define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S)
+#define HNS3_RXD_GRO_FIXID_B 30
+#define HNS3_RXD_GRO_ECN_B 31
#define HNS3_RXD_ODMAC_S 0
#define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S)
@@ -135,9 +142,8 @@ enum hns3_nic_state {
#define HNS3_RXD_TSIND_S 12
#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S)
#define HNS3_RXD_LKBK_B 15
-#define HNS3_RXD_HDL_S 16
-#define HNS3_RXD_HDL_M (0x7ff << HNS3_RXD_HDL_S)
-#define HNS3_RXD_HSIND_B 31
+#define HNS3_RXD_GRO_SIZE_S 16
+#define HNS3_RXD_GRO_SIZE_M (0x3ff << HNS3_RXD_GRO_SIZE_S)
#define HNS3_TXD_L3T_S 0
#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S)
@@ -194,6 +200,8 @@ enum hns3_nic_state {
#define HNS3_VECTOR_RL_OFFSET 0x900
#define HNS3_VECTOR_RL_EN_B 6
+#define HNS3_RING_EN_B 0
+
enum hns3_pkt_l3t_type {
HNS3_L3T_NONE,
HNS3_L3T_IPV6,
@@ -399,11 +407,19 @@ struct hns3_enet_ring {
*/
int next_to_clean;
+ int pull_len; /* head length for current packet */
+ u32 frag_num;
+ unsigned char *va; /* first buffer address for current packet */
+
u32 flag; /* ring attribute */
int irq_init_flag;
int numa_node;
cpumask_t affinity_mask;
+
+ int pending_buf;
+ struct sk_buff *skb;
+ struct sk_buff *tail_skb;
};
struct hns_queue;
@@ -460,8 +476,6 @@ enum hns3_link_mode_bits {
#define HNS3_INT_RL_MAX 0x00EC
#define HNS3_INT_RL_ENABLE_MASK 0x40
-#define HNS3_INT_ADAPT_DOWN_START 100
-
struct hns3_enet_coalesce {
u16 int_gl;
u8 gl_adapt_enable;
@@ -496,8 +510,6 @@ struct hns3_enet_tqp_vector {
char name[HNAE3_INT_NAME_LEN];
- /* when 0 should adjust interrupt coalesce parameter */
- u8 int_adapt_down;
unsigned long last_jiffies;
} ____cacheline_internodealigned_in_smp;
@@ -577,6 +589,11 @@ static inline int is_ring_empty(struct hns3_enet_ring *ring)
return ring->next_to_use == ring->next_to_clean;
}
+static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
+{
+ return readl(base + reg);
+}
+
static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
{
u8 __iomem *reg_addr = READ_ONCE(base);
@@ -586,7 +603,21 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev)
{
- return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET));
+ return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET ||
+ ae_dev->reset_type == HNAE3_FLR_RESET ||
+ ae_dev->reset_type == HNAE3_VF_FUNC_RESET ||
+ ae_dev->reset_type == HNAE3_VF_FULL_RESET ||
+ ae_dev->reset_type == HNAE3_VF_PF_FUNC_RESET));
+}
+
+#define hns3_read_dev(a, reg) \
+ hns3_read_reg((a)->io_base, (reg))
+
+static inline bool hns3_nic_resetting(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ return test_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
}
#define hns3_write_dev(a, reg, value) \
@@ -648,4 +679,8 @@ void hns3_dcbnl_setup(struct hnae3_handle *handle);
static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {}
#endif
+void hns3_dbg_init(struct hnae3_handle *handle);
+void hns3_dbg_uninit(struct hnae3_handle *handle);
+void hns3_dbg_register_debugfs(const char *debugfs_dir_name);
+void hns3_dbg_unregister_debugfs(void);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index a4762c2b8ba1..e678b6939da3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -291,6 +291,11 @@ static void hns3_self_test(struct net_device *ndev,
int test_index = 0;
u32 i;
+ if (hns3_nic_resetting(ndev)) {
+ netdev_err(ndev, "dev resetting!");
+ return;
+ }
+
/* Only do offline selftest, or pass by default */
if (eth_test->flags != ETH_TEST_FL_OFFLINE)
return;
@@ -530,6 +535,11 @@ static void hns3_get_ringparam(struct net_device *netdev,
struct hnae3_handle *h = priv->ae_handle;
int queue_num = h->kinfo.num_tqps;
+ if (hns3_nic_resetting(netdev)) {
+ netdev_err(netdev, "dev resetting!");
+ return;
+ }
+
param->tx_max_pending = HNS3_RING_MAX_PENDING;
param->rx_max_pending = HNS3_RING_MAX_PENDING;
@@ -760,6 +770,9 @@ static int hns3_set_ringparam(struct net_device *ndev,
u32 old_desc_num, new_desc_num;
int ret;
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (param->rx_mini_pending || param->rx_jumbo_pending)
return -EINVAL;
@@ -808,7 +821,7 @@ static int hns3_set_ringparam(struct net_device *ndev,
}
if (if_running)
- ret = dev_open(ndev);
+ ret = dev_open(ndev, NULL);
return ret;
}
@@ -872,6 +885,9 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
struct hnae3_handle *h = priv->ae_handle;
u16 queue_num = h->kinfo.num_tqps;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
if (queue >= queue_num) {
netdev_err(netdev,
"Invalid queue value %d! Queue max id=%d\n",
@@ -1033,6 +1049,9 @@ static int hns3_set_coalesce(struct net_device *netdev,
int ret;
int i;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
ret = hns3_check_coalesce_para(netdev, cmd);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index 580e81743681..fffe8c1c45d3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -6,6 +6,6 @@
ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o
+hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o
hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 690f62ed87dc..8af0cef5609b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -350,11 +350,20 @@ int hclge_cmd_init(struct hclge_dev *hdev)
hdev->hw.cmq.crq.next_to_use = 0;
hclge_cmd_init_regs(&hdev->hw);
- clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
spin_unlock_bh(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+ clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+
+ /* Check if there is new reset pending, because the higher level
+ * reset may happen when lower level reset is being processed.
+ */
+ if ((hclge_is_reset_pending(hdev))) {
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ return -EBUSY;
+ }
+
ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) {
dev_err(&hdev->pdev->dev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 872cd4bdd70d..f23042b24c09 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -86,11 +86,24 @@ enum hclge_opcode_type {
HCLGE_OPC_QUERY_REG_NUM = 0x0040,
HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
HCLGE_OPC_QUERY_64_BIT_REG = 0x0042,
+ HCLGE_OPC_DFX_BD_NUM = 0x0043,
+ HCLGE_OPC_DFX_BIOS_COMMON_REG = 0x0044,
+ HCLGE_OPC_DFX_SSU_REG_0 = 0x0045,
+ HCLGE_OPC_DFX_SSU_REG_1 = 0x0046,
+ HCLGE_OPC_DFX_IGU_EGU_REG = 0x0047,
+ HCLGE_OPC_DFX_RPU_REG_0 = 0x0048,
+ HCLGE_OPC_DFX_RPU_REG_1 = 0x0049,
+ HCLGE_OPC_DFX_NCSI_REG = 0x004A,
+ HCLGE_OPC_DFX_RTC_REG = 0x004B,
+ HCLGE_OPC_DFX_PPP_REG = 0x004C,
+ HCLGE_OPC_DFX_RCB_REG = 0x004D,
+ HCLGE_OPC_DFX_TQP_REG = 0x004E,
+ HCLGE_OPC_DFX_SSU_REG_2 = 0x004F,
+ HCLGE_OPC_DFX_QUERY_CHIP_CAP = 0x0050,
/* MAC command */
HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
HCLGE_OPC_CONFIG_AN_MODE = 0x0304,
- HCLGE_OPC_QUERY_AN_RESULT = 0x0306,
HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
@@ -126,6 +139,16 @@ enum hclge_opcode_type {
HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813,
HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
+ HCLGE_OPC_ETS_TC_WEIGHT = 0x0843,
+ HCLGE_OPC_QSET_DFX_STS = 0x0844,
+ HCLGE_OPC_PRI_DFX_STS = 0x0845,
+ HCLGE_OPC_PG_DFX_STS = 0x0846,
+ HCLGE_OPC_PORT_DFX_STS = 0x0847,
+ HCLGE_OPC_SCH_NQ_CNT = 0x0848,
+ HCLGE_OPC_SCH_RQ_CNT = 0x0849,
+ HCLGE_OPC_TM_INTERNAL_STS = 0x0850,
+ HCLGE_OPC_TM_INTERNAL_CNT = 0x0851,
+ HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852,
/* Packet buffer allocate commands */
HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
@@ -142,6 +165,7 @@ enum hclge_opcode_type {
HCLGE_OPC_CFG_TX_QUEUE = 0x0B01,
HCLGE_OPC_QUERY_TX_POINTER = 0x0B02,
HCLGE_OPC_QUERY_TX_STATUS = 0x0B03,
+ HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04,
HCLGE_OPC_CFG_RX_QUEUE = 0x0B11,
HCLGE_OPC_QUERY_RX_POINTER = 0x0B12,
HCLGE_OPC_QUERY_RX_STATUS = 0x0B13,
@@ -152,6 +176,7 @@ enum hclge_opcode_type {
/* TSO command */
HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
+ HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
/* RSS commands */
HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
@@ -210,27 +235,34 @@ enum hclge_opcode_type {
/* Led command */
HCLGE_OPC_LED_STATUS_CFG = 0xB000,
+ /* SFP command */
+ HCLGE_OPC_SFP_GET_SPEED = 0x7104,
+
/* Error INT commands */
+ HCLGE_MAC_COMMON_INT_EN = 0x030E,
HCLGE_TM_SCH_ECC_INT_EN = 0x0829,
- HCLGE_TM_SCH_ECC_ERR_RINT_CMD = 0x082d,
- HCLGE_TM_SCH_ECC_ERR_RINT_CE = 0x082f,
- HCLGE_TM_SCH_ECC_ERR_RINT_NFE = 0x0830,
- HCLGE_TM_SCH_ECC_ERR_RINT_FE = 0x0831,
- HCLGE_TM_SCH_MBIT_ECC_INFO_CMD = 0x0833,
+ HCLGE_SSU_ECC_INT_CMD = 0x0989,
+ HCLGE_SSU_COMMON_INT_CMD = 0x098C,
+ HCLGE_PPU_MPF_ECC_INT_CMD = 0x0B40,
+ HCLGE_PPU_MPF_OTHER_INT_CMD = 0x0B41,
+ HCLGE_PPU_PF_OTHER_INT_CMD = 0x0B42,
HCLGE_COMMON_ECC_INT_CFG = 0x1505,
- HCLGE_IGU_EGU_TNL_INT_QUERY = 0x1802,
+ HCLGE_QUERY_RAS_INT_STS_BD_NUM = 0x1510,
+ HCLGE_QUERY_CLEAR_MPF_RAS_INT = 0x1511,
+ HCLGE_QUERY_CLEAR_PF_RAS_INT = 0x1512,
+ HCLGE_QUERY_MSIX_INT_STS_BD_NUM = 0x1513,
+ HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
+ HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515,
+ HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580,
+ HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581,
+ HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584,
HCLGE_IGU_EGU_TNL_INT_EN = 0x1803,
- HCLGE_IGU_EGU_TNL_INT_CLR = 0x1804,
- HCLGE_IGU_COMMON_INT_QUERY = 0x1805,
HCLGE_IGU_COMMON_INT_EN = 0x1806,
- HCLGE_IGU_COMMON_INT_CLR = 0x1807,
HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14,
- HCLGE_TM_QCN_MEM_INT_INFO_CMD = 0x1A17,
HCLGE_PPP_CMD0_INT_CMD = 0x2100,
HCLGE_PPP_CMD1_INT_CMD = 0x2101,
- HCLGE_NCSI_INT_QUERY = 0x2400,
+ HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105,
HCLGE_NCSI_INT_EN = 0x2401,
- HCLGE_NCSI_INT_CLR = 0x2402,
};
#define HCLGE_TQP_REG_OFFSET 0x80000
@@ -388,7 +420,9 @@ struct hclge_pf_res_cmd {
#define HCLGE_PF_VEC_NUM_M GENMASK(7, 0)
__le16 pf_intr_vector_number;
__le16 pf_own_fun_number;
- __le32 rsv[3];
+ __le16 tx_buf_size;
+ __le16 dv_buf_size;
+ __le32 rsv[2];
};
#define HCLGE_CFG_OFFSET_S 0
@@ -542,20 +576,6 @@ struct hclge_config_mac_speed_dup_cmd {
u8 rsv[22];
};
-#define HCLGE_QUERY_SPEED_S 3
-#define HCLGE_QUERY_AN_B 0
-#define HCLGE_QUERY_DUPLEX_B 2
-
-#define HCLGE_QUERY_SPEED_M GENMASK(4, 0)
-#define HCLGE_QUERY_AN_M BIT(HCLGE_QUERY_AN_B)
-#define HCLGE_QUERY_DUPLEX_M BIT(HCLGE_QUERY_DUPLEX_B)
-
-struct hclge_query_an_speed_dup_cmd {
- u8 an_syn_dup_speed;
- u8 pause;
- u8 rsv[23];
-};
-
#define HCLGE_RING_ID_MASK GENMASK(9, 0)
#define HCLGE_TQP_ENABLE_B 0
@@ -572,6 +592,11 @@ struct hclge_config_auto_neg_cmd {
u8 rsv[20];
};
+struct hclge_sfp_speed_cmd {
+ __le32 sfp_speed;
+ u32 rsv[5];
+};
+
#define HCLGE_MAC_UPLINK_PORT 0x100
struct hclge_config_max_frm_size_cmd {
@@ -746,6 +771,24 @@ struct hclge_cfg_tx_queue_pointer_cmd {
u8 rsv[14];
};
+#pragma pack(1)
+struct hclge_mac_ethertype_idx_rd_cmd {
+ u8 flags;
+ u8 resp_code;
+ __le16 vlan_tag;
+ u8 mac_add[6];
+ __le16 index;
+ __le16 ethter_type;
+ __le16 egress_port;
+ __le16 egress_queue;
+ __le16 rev0;
+ u8 i_port_bitmap;
+ u8 i_port_direction;
+ u8 rev1[2];
+};
+
+#pragma pack()
+
#define HCLGE_TSO_MSS_MIN_S 0
#define HCLGE_TSO_MSS_MIN_M GENMASK(13, 0)
@@ -758,6 +801,12 @@ struct hclge_cfg_tso_status_cmd {
u8 rsv[20];
};
+#define HCLGE_GRO_EN_B 0
+struct hclge_cfg_gro_status_cmd {
+ __le16 gro_en;
+ u8 rsv[22];
+};
+
#define HCLGE_TSO_MSS_MIN 256
#define HCLGE_TSO_MSS_MAX 9668
@@ -792,6 +841,7 @@ struct hclge_serdes_lb_cmd {
#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */
#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */
+#define HCLGE_NON_DCB_ADDITIONAL_BUF 0x200 /* 512 byte */
#define HCLGE_TYPE_CRQ 0
#define HCLGE_TYPE_CSQ 1
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index e72f724123d7..f6323b2501dc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -35,7 +35,9 @@ static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
}
}
- return hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
+ hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
+
+ return 0;
}
static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
@@ -70,25 +72,61 @@ static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
return 0;
}
+static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
+ u8 *prio_tc)
+{
+ int i;
+
+ if (num_tc > hdev->tc_max) {
+ dev_err(&hdev->pdev->dev,
+ "tc num checking failed, %u > tc_max(%u)\n",
+ num_tc, hdev->tc_max);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
+ if (prio_tc[i] >= num_tc) {
+ dev_err(&hdev->pdev->dev,
+ "prio_tc[%u] checking failed, %u >= num_tc(%u)\n",
+ i, prio_tc[i], num_tc);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ if (num_tc > hdev->vport[i].alloc_tqps) {
+ dev_err(&hdev->pdev->dev,
+ "allocated tqp(%u) checking failed, %u > tqp(%u)\n",
+ i, num_tc, hdev->vport[i].alloc_tqps);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
u8 *tc, bool *changed)
{
bool has_ets_tc = false;
u32 total_ets_bw = 0;
u8 max_tc = 0;
+ int ret;
u8 i;
- for (i = 0; i < HNAE3_MAX_TC; i++) {
- if (ets->prio_tc[i] >= hdev->tc_max ||
- i >= hdev->tc_max)
- return -EINVAL;
-
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
*changed = true;
if (ets->prio_tc[i] > max_tc)
max_tc = ets->prio_tc[i];
+ }
+ ret = hclge_dcb_common_validate(hdev, max_tc + 1, ets->prio_tc);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
if (hdev->tm_info.tc_info[i].tc_sch_mode !=
@@ -184,9 +222,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
if (ret)
return ret;
- ret = hclge_tm_schd_info_update(hdev, num_tc);
- if (ret)
- return ret;
+ hclge_tm_schd_info_update(hdev, num_tc);
ret = hclge_ieee_ets_to_tm_info(hdev, ets);
if (ret)
@@ -305,20 +341,12 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
return -EINVAL;
- if (tc > hdev->tc_max) {
- dev_err(&hdev->pdev->dev,
- "setup tc failed, tc(%u) > tc_max(%u)\n",
- tc, hdev->tc_max);
- return -EINVAL;
- }
-
- ret = hclge_tm_schd_info_update(hdev, tc);
+ ret = hclge_dcb_common_validate(hdev, tc, prio_tc);
if (ret)
- return ret;
+ return -EINVAL;
- ret = hclge_tm_prio_tc_info_update(hdev, prio_tc);
- if (ret)
- return ret;
+ hclge_tm_schd_info_update(hdev, tc);
+ hclge_tm_prio_tc_info_update(hdev, prio_tc);
ret = hclge_tm_init_hw(hdev);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
new file mode 100644
index 000000000000..26d80504c730
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -0,0 +1,933 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+#include <linux/device.h>
+
+#include "hclge_debugfs.h"
+#include "hclge_cmd.h"
+#include "hclge_main.h"
+#include "hclge_tm.h"
+#include "hnae3.h"
+
+static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
+{
+ struct hclge_desc desc[4];
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
+ desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
+ desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
+
+ ret = hclge_cmd_send(&hdev->hw, desc, 4);
+ if (ret != HCLGE_CMD_EXEC_SUCCESS) {
+ dev_err(&hdev->pdev->dev,
+ "get dfx bdnum fail, status is %d.\n", ret);
+ return ret;
+ }
+
+ return (int)desc[offset / 6].data[offset % 6];
+}
+
+static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
+ struct hclge_desc *desc_src,
+ int index, int bd_num,
+ enum hclge_opcode_type cmd)
+{
+ struct hclge_desc *desc = desc_src;
+ int ret, i;
+
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ desc->data[0] = cpu_to_le32(index);
+
+ for (i = 1; i < bd_num; i++) {
+ desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc++;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "read reg cmd send fail, status is %d.\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
+ struct hclge_dbg_dfx_message *dfx_message,
+ char *cmd_buf, int msg_num, int offset,
+ enum hclge_opcode_type cmd)
+{
+ struct hclge_desc *desc_src;
+ struct hclge_desc *desc;
+ int bd_num, buf_len;
+ int ret, i;
+ int index;
+ int max;
+
+ ret = kstrtouint(cmd_buf, 10, &index);
+ index = (ret != 0) ? 0 : index;
+
+ bd_num = hclge_dbg_get_dfx_bd_num(hdev, offset);
+ if (bd_num <= 0)
+ return;
+
+ buf_len = sizeof(struct hclge_desc) * bd_num;
+ desc_src = kzalloc(buf_len, GFP_KERNEL);
+ if (!desc_src) {
+ dev_err(&hdev->pdev->dev, "call kzalloc failed\n");
+ return;
+ }
+
+ desc = desc_src;
+ ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, cmd);
+ if (ret != HCLGE_CMD_EXEC_SUCCESS) {
+ kfree(desc_src);
+ return;
+ }
+
+ max = (bd_num * 6) <= msg_num ? (bd_num * 6) : msg_num;
+
+ desc = desc_src;
+ for (i = 0; i < max; i++) {
+ (((i / 6) > 0) && ((i % 6) == 0)) ? desc++ : desc;
+ if (dfx_message->flag)
+ dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
+ dfx_message->message, desc->data[i % 6]);
+
+ dfx_message++;
+ }
+
+ kfree(desc_src);
+}
+
+static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *cmd_buf)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_dbg_bitmap_cmd *bitmap;
+ int rq_id, pri_id, qset_id;
+ int port_id, nq_id, pg_id;
+ struct hclge_desc desc[2];
+
+ int cnt, ret;
+
+ cnt = sscanf(cmd_buf, "%i %i %i %i %i %i",
+ &port_id, &pri_id, &pg_id, &rq_id, &nq_id, &qset_id);
+ if (cnt != 6) {
+ dev_err(&hdev->pdev->dev,
+ "dump dcb: bad command parameter, cnt=%d\n", cnt);
+ return;
+ }
+
+ ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1,
+ HCLGE_OPC_QSET_DFX_STS);
+ if (ret)
+ return;
+
+ bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
+ dev_info(dev, "roce_qset_mask: 0x%x\n", bitmap->bit0);
+ dev_info(dev, "nic_qs_mask: 0x%x\n", bitmap->bit1);
+ dev_info(dev, "qs_shaping_pass: 0x%x\n", bitmap->bit2);
+ dev_info(dev, "qs_bp_sts: 0x%x\n", bitmap->bit3);
+
+ ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, HCLGE_OPC_PRI_DFX_STS);
+ if (ret)
+ return;
+
+ bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
+ dev_info(dev, "pri_mask: 0x%x\n", bitmap->bit0);
+ dev_info(dev, "pri_cshaping_pass: 0x%x\n", bitmap->bit1);
+ dev_info(dev, "pri_pshaping_pass: 0x%x\n", bitmap->bit2);
+
+ ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, HCLGE_OPC_PG_DFX_STS);
+ if (ret)
+ return;
+
+ bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
+ dev_info(dev, "pg_mask: 0x%x\n", bitmap->bit0);
+ dev_info(dev, "pg_cshaping_pass: 0x%x\n", bitmap->bit1);
+ dev_info(dev, "pg_pshaping_pass: 0x%x\n", bitmap->bit2);
+
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
+ HCLGE_OPC_PORT_DFX_STS);
+ if (ret)
+ return;
+
+ bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
+ dev_info(dev, "port_mask: 0x%x\n", bitmap->bit0);
+ dev_info(dev, "port_shaping_pass: 0x%x\n", bitmap->bit1);
+
+ ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_NQ_CNT);
+ if (ret)
+ return;
+
+ dev_info(dev, "sch_nq_cnt: 0x%x\n", desc[0].data[1]);
+
+ ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_RQ_CNT);
+ if (ret)
+ return;
+
+ dev_info(dev, "sch_rq_cnt: 0x%x\n", desc[0].data[1]);
+
+ ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, HCLGE_OPC_TM_INTERNAL_STS);
+ if (ret)
+ return;
+
+ dev_info(dev, "pri_bp: 0x%x\n", desc[0].data[1]);
+ dev_info(dev, "fifo_dfx_info: 0x%x\n", desc[0].data[2]);
+ dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n", desc[0].data[3]);
+ dev_info(dev, "tx_private_waterline: 0x%x\n", desc[0].data[4]);
+ dev_info(dev, "tm_bypass_en: 0x%x\n", desc[0].data[5]);
+ dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", desc[1].data[0]);
+ dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", desc[1].data[1]);
+
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
+ HCLGE_OPC_TM_INTERNAL_CNT);
+ if (ret)
+ return;
+
+ dev_info(dev, "SCH_NIC_NUM: 0x%x\n", desc[0].data[1]);
+ dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", desc[0].data[2]);
+
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
+ HCLGE_OPC_TM_INTERNAL_STS_1);
+ if (ret)
+ return;
+
+ dev_info(dev, "TC_MAP_SEL: 0x%x\n", desc[0].data[1]);
+ dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", desc[0].data[2]);
+ dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", desc[0].data[3]);
+ dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[4]);
+ dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[5]);
+}
+
+static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, char *cmd_buf)
+{
+ int msg_num;
+
+ if (strncmp(&cmd_buf[9], "bios common", 11) == 0) {
+ msg_num = sizeof(hclge_dbg_bios_common_reg) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_bios_common_reg,
+ &cmd_buf[21], msg_num,
+ HCLGE_DBG_DFX_BIOS_OFFSET,
+ HCLGE_OPC_DFX_BIOS_COMMON_REG);
+ } else if (strncmp(&cmd_buf[9], "ssu", 3) == 0) {
+ msg_num = sizeof(hclge_dbg_ssu_reg_0) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_0,
+ &cmd_buf[13], msg_num,
+ HCLGE_DBG_DFX_SSU_0_OFFSET,
+ HCLGE_OPC_DFX_SSU_REG_0);
+
+ msg_num = sizeof(hclge_dbg_ssu_reg_1) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_1,
+ &cmd_buf[13], msg_num,
+ HCLGE_DBG_DFX_SSU_1_OFFSET,
+ HCLGE_OPC_DFX_SSU_REG_1);
+
+ msg_num = sizeof(hclge_dbg_ssu_reg_2) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_2,
+ &cmd_buf[13], msg_num,
+ HCLGE_DBG_DFX_SSU_2_OFFSET,
+ HCLGE_OPC_DFX_SSU_REG_2);
+ } else if (strncmp(&cmd_buf[9], "igu egu", 7) == 0) {
+ msg_num = sizeof(hclge_dbg_igu_egu_reg) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_igu_egu_reg,
+ &cmd_buf[17], msg_num,
+ HCLGE_DBG_DFX_IGU_OFFSET,
+ HCLGE_OPC_DFX_IGU_EGU_REG);
+ } else if (strncmp(&cmd_buf[9], "rpu", 3) == 0) {
+ msg_num = sizeof(hclge_dbg_rpu_reg_0) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_0,
+ &cmd_buf[13], msg_num,
+ HCLGE_DBG_DFX_RPU_0_OFFSET,
+ HCLGE_OPC_DFX_RPU_REG_0);
+
+ msg_num = sizeof(hclge_dbg_rpu_reg_1) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_1,
+ &cmd_buf[13], msg_num,
+ HCLGE_DBG_DFX_RPU_1_OFFSET,
+ HCLGE_OPC_DFX_RPU_REG_1);
+ } else if (strncmp(&cmd_buf[9], "ncsi", 4) == 0) {
+ msg_num = sizeof(hclge_dbg_ncsi_reg) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_ncsi_reg,
+ &cmd_buf[14], msg_num,
+ HCLGE_DBG_DFX_NCSI_OFFSET,
+ HCLGE_OPC_DFX_NCSI_REG);
+ } else if (strncmp(&cmd_buf[9], "rtc", 3) == 0) {
+ msg_num = sizeof(hclge_dbg_rtc_reg) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_rtc_reg,
+ &cmd_buf[13], msg_num,
+ HCLGE_DBG_DFX_RTC_OFFSET,
+ HCLGE_OPC_DFX_RTC_REG);
+ } else if (strncmp(&cmd_buf[9], "ppp", 3) == 0) {
+ msg_num = sizeof(hclge_dbg_ppp_reg) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_ppp_reg,
+ &cmd_buf[13], msg_num,
+ HCLGE_DBG_DFX_PPP_OFFSET,
+ HCLGE_OPC_DFX_PPP_REG);
+ } else if (strncmp(&cmd_buf[9], "rcb", 3) == 0) {
+ msg_num = sizeof(hclge_dbg_rcb_reg) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_rcb_reg,
+ &cmd_buf[13], msg_num,
+ HCLGE_DBG_DFX_RCB_OFFSET,
+ HCLGE_OPC_DFX_RCB_REG);
+ } else if (strncmp(&cmd_buf[9], "tqp", 3) == 0) {
+ msg_num = sizeof(hclge_dbg_tqp_reg) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_tqp_reg,
+ &cmd_buf[13], msg_num,
+ HCLGE_DBG_DFX_TQP_OFFSET,
+ HCLGE_OPC_DFX_TQP_REG);
+ } else if (strncmp(&cmd_buf[9], "dcb", 3) == 0) {
+ hclge_dbg_dump_dcb(hdev, &cmd_buf[13]);
+ } else {
+ dev_info(&hdev->pdev->dev, "unknown command\n");
+ return;
+ }
+}
+
+static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index,
+ char *title_buf, char *true_buf,
+ char *false_buf)
+{
+ if (flag)
+ dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
+ true_buf);
+ else
+ dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
+ false_buf);
+}
+
+static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
+{
+ struct hclge_ets_tc_weight_cmd *ets_weight;
+ struct hclge_desc desc;
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "dump tc fail, status is %d.\n", ret);
+ return;
+ }
+
+ ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
+
+ dev_info(&hdev->pdev->dev, "dump tc\n");
+ dev_info(&hdev->pdev->dev, "weight_offset: %u\n",
+ ets_weight->weight_offset);
+
+ for (i = 0; i < HNAE3_MAX_TC; i++)
+ hclge_title_idx_print(hdev, ets_weight->tc_weight[i], i,
+ "tc", "no sp mode", "sp mode");
+}
+
+static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
+{
+ struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
+ struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
+ struct hclge_pg_shapping_cmd *pg_shap_cfg_cmd;
+ enum hclge_opcode_type cmd;
+ struct hclge_desc desc;
+ int ret;
+
+ cmd = HCLGE_OPC_TM_PG_C_SHAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
+ dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n",
+ pg_shap_cfg_cmd->pg_shapping_para);
+
+ cmd = HCLGE_OPC_TM_PG_P_SHAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
+ dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n",
+ pg_shap_cfg_cmd->pg_shapping_para);
+
+ cmd = HCLGE_OPC_TM_PORT_SHAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n",
+ port_shap_cfg_cmd->port_shapping_para);
+
+ cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n", desc.data[0]);
+
+ cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "PRI_SCH pg_id: %u\n", desc.data[0]);
+
+ cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "QS_SCH pg_id: %u\n", desc.data[0]);
+
+ cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_id: %u\n",
+ bp_to_qs_map_cmd->tc_id);
+ dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_shapping: 0x%x\n",
+ bp_to_qs_map_cmd->qs_group_id);
+ dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
+ bp_to_qs_map_cmd->qs_bit_map);
+ return;
+
+err_tm_pg_cmd_send:
+ dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), status is %d\n",
+ cmd, ret);
+}
+
+static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
+{
+ struct hclge_priority_weight_cmd *priority_weight;
+ struct hclge_pg_to_pri_link_cmd *pg_to_pri_map;
+ struct hclge_qs_to_pri_link_cmd *qs_to_pri_map;
+ struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
+ struct hclge_pri_shapping_cmd *shap_cfg_cmd;
+ struct hclge_pg_weight_cmd *pg_weight;
+ struct hclge_qs_weight_cmd *qs_weight;
+ enum hclge_opcode_type cmd;
+ struct hclge_desc desc;
+ int ret;
+
+ cmd = HCLGE_OPC_TM_PG_TO_PRI_LINK;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ pg_to_pri_map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "dump tm\n");
+ dev_info(&hdev->pdev->dev, "PG_TO_PRI gp_id: %u\n",
+ pg_to_pri_map->pg_id);
+ dev_info(&hdev->pdev->dev, "PG_TO_PRI map: 0x%x\n",
+ pg_to_pri_map->pri_bit_map);
+
+ cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n",
+ qs_to_pri_map->qs_id);
+ dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n",
+ qs_to_pri_map->priority);
+ dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n",
+ qs_to_pri_map->link_vld);
+
+ cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id);
+ dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: %u\n",
+ nq_to_qs_map->qset_id);
+
+ cmd = HCLGE_OPC_TM_PG_WEIGHT;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ pg_weight = (struct hclge_pg_weight_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PG pg_id: %u\n", pg_weight->pg_id);
+ dev_info(&hdev->pdev->dev, "PG dwrr: %u\n", pg_weight->dwrr);
+
+ cmd = HCLGE_OPC_TM_QS_WEIGHT;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "QS qs_id: %u\n", qs_weight->qs_id);
+ dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr);
+
+ cmd = HCLGE_OPC_TM_PRI_WEIGHT;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PRI pri_id: %u\n", priority_weight->pri_id);
+ dev_info(&hdev->pdev->dev, "PRI dwrr: %u\n", priority_weight->dwrr);
+
+ cmd = HCLGE_OPC_TM_PRI_C_SHAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id);
+ dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n",
+ shap_cfg_cmd->pri_shapping_para);
+
+ cmd = HCLGE_OPC_TM_PRI_P_SHAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id);
+ dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n",
+ shap_cfg_cmd->pri_shapping_para);
+
+ hclge_dbg_dump_tm_pg(hdev);
+
+ return;
+
+err_tm_cmd_send:
+ dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), status is %d\n",
+ cmd, ret);
+}
+
+static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *cmd_buf)
+{
+ struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
+ struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
+ struct hclge_qs_to_pri_link_cmd *map;
+ struct hclge_tqp_tx_queue_tc_cmd *tc;
+ enum hclge_opcode_type cmd;
+ struct hclge_desc desc;
+ int queue_id, group_id;
+ u32 qset_maping[32];
+ int tc_id, qset_id;
+ int pri_id, ret;
+ u32 i;
+
+ ret = kstrtouint(&cmd_buf[12], 10, &queue_id);
+ queue_id = (ret != 0) ? 0 : queue_id;
+
+ cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
+ nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ nq_to_qs_map->nq_id = cpu_to_le16(queue_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_map_cmd_send;
+ qset_id = nq_to_qs_map->qset_id & 0x3FF;
+
+ cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
+ map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ map->qs_id = cpu_to_le16(qset_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_map_cmd_send;
+ pri_id = map->priority;
+
+ cmd = HCLGE_OPC_TQP_TX_QUEUE_TC;
+ tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ tc->queue_id = cpu_to_le16(queue_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_map_cmd_send;
+ tc_id = tc->tc_id & 0x7;
+
+ dev_info(&hdev->pdev->dev, "queue_id | qset_id | pri_id | tc_id\n");
+ dev_info(&hdev->pdev->dev, "%04d | %04d | %02d | %02d\n",
+ queue_id, qset_id, pri_id, tc_id);
+
+ cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
+ bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
+ for (group_id = 0; group_id < 32; group_id++) {
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ bp_to_qs_map_cmd->tc_id = tc_id;
+ bp_to_qs_map_cmd->qs_group_id = group_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_map_cmd_send;
+
+ qset_maping[group_id] = bp_to_qs_map_cmd->qs_bit_map;
+ }
+
+ dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n");
+
+ i = 0;
+ for (group_id = 0; group_id < 4; group_id++) {
+ dev_info(&hdev->pdev->dev,
+ "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
+ group_id * 256, qset_maping[(u32)(i + 7)],
+ qset_maping[(u32)(i + 6)], qset_maping[(u32)(i + 5)],
+ qset_maping[(u32)(i + 4)], qset_maping[(u32)(i + 3)],
+ qset_maping[(u32)(i + 2)], qset_maping[(u32)(i + 1)],
+ qset_maping[i]);
+ i += 8;
+ }
+
+ return;
+
+err_tm_map_cmd_send:
+ dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), status is %d\n",
+ cmd, ret);
+}
+
+static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_cfg_pause_param_cmd *pause_param;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "dump checksum fail, status is %d.\n",
+ ret);
+ return;
+ }
+
+ pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "dump qos pause cfg\n");
+ dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n",
+ pause_param->pause_trans_gap);
+ dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n",
+ pause_param->pause_trans_time);
+}
+
+static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
+{
+ struct hclge_qos_pri_map_cmd *pri_map;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "dump qos pri map fail, status is %d.\n", ret);
+ return;
+ }
+
+ pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "dump qos pri map\n");
+ dev_info(&hdev->pdev->dev, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri);
+ dev_info(&hdev->pdev->dev, "pri_0_to_tc: 0x%x\n", pri_map->pri0_tc);
+ dev_info(&hdev->pdev->dev, "pri_1_to_tc: 0x%x\n", pri_map->pri1_tc);
+ dev_info(&hdev->pdev->dev, "pri_2_to_tc: 0x%x\n", pri_map->pri2_tc);
+ dev_info(&hdev->pdev->dev, "pri_3_to_tc: 0x%x\n", pri_map->pri3_tc);
+ dev_info(&hdev->pdev->dev, "pri_4_to_tc: 0x%x\n", pri_map->pri4_tc);
+ dev_info(&hdev->pdev->dev, "pri_5_to_tc: 0x%x\n", pri_map->pri5_tc);
+ dev_info(&hdev->pdev->dev, "pri_6_to_tc: 0x%x\n", pri_map->pri6_tc);
+ dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc);
+}
+
+static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
+ struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
+ struct hclge_rx_priv_wl_buf *rx_priv_wl;
+ struct hclge_rx_com_wl *rx_packet_cnt;
+ struct hclge_rx_com_thrd *rx_com_thrd;
+ struct hclge_rx_com_wl *rx_com_wl;
+ enum hclge_opcode_type cmd;
+ struct hclge_desc desc[2];
+ int i, ret;
+
+ cmd = HCLGE_OPC_TX_BUFF_ALLOC;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
+
+ tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
+ for (i = 0; i < HCLGE_TC_NUM; i++)
+ dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
+ tx_buf_cmd->tx_pkt_buff[i]);
+
+ cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "\n");
+ rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
+ for (i = 0; i < HCLGE_TC_NUM; i++)
+ dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
+ rx_buf_cmd->buf_num[i]);
+
+ dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
+ rx_buf_cmd->shared_buf);
+
+ cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 2);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "\n");
+ rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
+ for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+ dev_info(&hdev->pdev->dev,
+ "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
+ rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
+
+ rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
+ for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+ dev_info(&hdev->pdev->dev,
+ "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i + 4,
+ rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
+
+ cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 2);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "\n");
+ rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
+ for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+ dev_info(&hdev->pdev->dev,
+ "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
+ rx_com_thrd->com_thrd[i].high,
+ rx_com_thrd->com_thrd[i].low);
+
+ rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
+ for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+ dev_info(&hdev->pdev->dev,
+ "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i + 4,
+ rx_com_thrd->com_thrd[i].high,
+ rx_com_thrd->com_thrd[i].low);
+
+ cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
+ dev_info(&hdev->pdev->dev, "\n");
+ dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
+ rx_com_wl->com_wl.high, rx_com_wl->com_wl.low);
+
+ cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
+ dev_info(&hdev->pdev->dev,
+ "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
+ rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low);
+
+ return;
+
+err_qos_cmd_send:
+ dev_err(&hdev->pdev->dev,
+ "dump qos buf cfg fail(0x%x), status is %d\n", cmd, ret);
+}
+
+static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
+{
+ struct hclge_mac_ethertype_idx_rd_cmd *req0;
+ char printf_buf[HCLGE_DBG_BUF_LEN];
+ struct hclge_desc desc;
+ int ret, i;
+
+ dev_info(&hdev->pdev->dev, "mng tab:\n");
+ memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
+ strncat(printf_buf,
+ "entry|mac_addr |mask|ether|mask|vlan|mask",
+ HCLGE_DBG_BUF_LEN - 1);
+ strncat(printf_buf + strlen(printf_buf),
+ "|i_map|i_dir|e_type|pf_id|vf_id|q_id|drop\n",
+ HCLGE_DBG_BUF_LEN - strlen(printf_buf) - 1);
+
+ dev_info(&hdev->pdev->dev, "%s", printf_buf);
+
+ for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
+ true);
+ req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
+ req0->index = cpu_to_le16(i);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "call hclge_cmd_send fail, ret = %d\n", ret);
+ return;
+ }
+
+ if (!req0->resp_code)
+ continue;
+
+ memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
+ snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
+ "%02u |%02x:%02x:%02x:%02x:%02x:%02x|",
+ req0->index, req0->mac_add[0], req0->mac_add[1],
+ req0->mac_add[2], req0->mac_add[3], req0->mac_add[4],
+ req0->mac_add[5]);
+
+ snprintf(printf_buf + strlen(printf_buf),
+ HCLGE_DBG_BUF_LEN - strlen(printf_buf),
+ "%x |%04x |%x |%04x|%x |%02x |%02x |",
+ !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
+ req0->ethter_type,
+ !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
+ req0->vlan_tag & HCLGE_DBG_MNG_VLAN_TAG,
+ !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
+ req0->i_port_bitmap, req0->i_port_direction);
+
+ snprintf(printf_buf + strlen(printf_buf),
+ HCLGE_DBG_BUF_LEN - strlen(printf_buf),
+ "%d |%d |%02d |%04d|%x\n",
+ !!(req0->egress_port & HCLGE_DBG_MNG_E_TYPE_B),
+ req0->egress_port & HCLGE_DBG_MNG_PF_ID,
+ (req0->egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
+ req0->egress_queue,
+ !!(req0->egress_port & HCLGE_DBG_MNG_DROP_B));
+
+ dev_info(&hdev->pdev->dev, "%s", printf_buf);
+ }
+}
+
+static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
+ bool sel_x, u32 loc)
+{
+ struct hclge_fd_tcam_config_1_cmd *req1;
+ struct hclge_fd_tcam_config_2_cmd *req2;
+ struct hclge_fd_tcam_config_3_cmd *req3;
+ struct hclge_desc desc[3];
+ int ret, i;
+ u32 *req;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
+ desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
+
+ req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
+ req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
+ req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
+
+ req1->stage = stage;
+ req1->xy_sel = sel_x ? 1 : 0;
+ req1->index = cpu_to_le32(loc);
+
+ ret = hclge_cmd_send(&hdev->hw, desc, 3);
+ if (ret)
+ return;
+
+ dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
+ sel_x ? "x" : "y", loc);
+
+ req = (u32 *)req1->tcam_data;
+ for (i = 0; i < 2; i++)
+ dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+
+ req = (u32 *)req2->tcam_data;
+ for (i = 0; i < 6; i++)
+ dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+
+ req = (u32 *)req3->tcam_data;
+ for (i = 0; i < 5; i++)
+ dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+}
+
+static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
+{
+ u32 i;
+
+ for (i = 0; i < hdev->fd_cfg.rule_num[0]; i++) {
+ hclge_dbg_fd_tcam_read(hdev, 0, true, i);
+ hclge_dbg_fd_tcam_read(hdev, 0, false, i);
+ }
+}
+
+int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (strncmp(cmd_buf, "dump fd tcam", 12) == 0) {
+ hclge_dbg_fd_tcam(hdev);
+ } else if (strncmp(cmd_buf, "dump tc", 7) == 0) {
+ hclge_dbg_dump_tc(hdev);
+ } else if (strncmp(cmd_buf, "dump tm map", 11) == 0) {
+ hclge_dbg_dump_tm_map(hdev, cmd_buf);
+ } else if (strncmp(cmd_buf, "dump tm", 7) == 0) {
+ hclge_dbg_dump_tm(hdev);
+ } else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) {
+ hclge_dbg_dump_qos_pause_cfg(hdev);
+ } else if (strncmp(cmd_buf, "dump qos pri map", 16) == 0) {
+ hclge_dbg_dump_qos_pri_map(hdev);
+ } else if (strncmp(cmd_buf, "dump qos buf cfg", 16) == 0) {
+ hclge_dbg_dump_qos_buf_cfg(hdev);
+ } else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) {
+ hclge_dbg_dump_mng_table(hdev);
+ } else if (strncmp(cmd_buf, "dump reg", 8) == 0) {
+ hclge_dbg_dump_reg_cmd(hdev, cmd_buf);
+ } else {
+ dev_info(&hdev->pdev->dev, "unknown command\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
new file mode 100644
index 000000000000..d055fda41775
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -0,0 +1,713 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+#ifndef __HCLGE_DEBUGFS_H
+#define __HCLGE_DEBUGFS_H
+
+#define HCLGE_DBG_BUF_LEN 256
+#define HCLGE_DBG_MNG_TBL_MAX 64
+
+#define HCLGE_DBG_MNG_VLAN_MASK_B BIT(0)
+#define HCLGE_DBG_MNG_MAC_MASK_B BIT(1)
+#define HCLGE_DBG_MNG_ETHER_MASK_B BIT(2)
+#define HCLGE_DBG_MNG_E_TYPE_B BIT(11)
+#define HCLGE_DBG_MNG_DROP_B BIT(13)
+#define HCLGE_DBG_MNG_VLAN_TAG 0x0FFF
+#define HCLGE_DBG_MNG_PF_ID 0x0007
+#define HCLGE_DBG_MNG_VF_ID 0x00FF
+
+/* Get DFX BD number offset */
+#define HCLGE_DBG_DFX_BIOS_OFFSET 1
+#define HCLGE_DBG_DFX_SSU_0_OFFSET 2
+#define HCLGE_DBG_DFX_SSU_1_OFFSET 3
+#define HCLGE_DBG_DFX_IGU_OFFSET 4
+#define HCLGE_DBG_DFX_RPU_0_OFFSET 5
+
+#define HCLGE_DBG_DFX_RPU_1_OFFSET 6
+#define HCLGE_DBG_DFX_NCSI_OFFSET 7
+#define HCLGE_DBG_DFX_RTC_OFFSET 8
+#define HCLGE_DBG_DFX_PPP_OFFSET 9
+#define HCLGE_DBG_DFX_RCB_OFFSET 10
+#define HCLGE_DBG_DFX_TQP_OFFSET 11
+
+#define HCLGE_DBG_DFX_SSU_2_OFFSET 12
+
+#pragma pack(1)
+
+struct hclge_qos_pri_map_cmd {
+ u8 pri0_tc : 4,
+ pri1_tc : 4;
+ u8 pri2_tc : 4,
+ pri3_tc : 4;
+ u8 pri4_tc : 4,
+ pri5_tc : 4;
+ u8 pri6_tc : 4,
+ pri7_tc : 4;
+ u8 vlan_pri : 4,
+ rev : 4;
+};
+
+struct hclge_dbg_bitmap_cmd {
+ union {
+ u8 bitmap;
+ struct {
+ u8 bit0 : 1,
+ bit1 : 1,
+ bit2 : 1,
+ bit3 : 1,
+ bit4 : 1,
+ bit5 : 1,
+ bit6 : 1,
+ bit7 : 1;
+ };
+ };
+};
+
+struct hclge_dbg_dfx_message {
+ int flag;
+ char message[60];
+};
+
+#pragma pack()
+
+static struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
+ {false, "Reserved"},
+ {true, "BP_CPU_STATE"},
+ {true, "DFX_MSIX_INFO_NIC_0"},
+ {true, "DFX_MSIX_INFO_NIC_1"},
+ {true, "DFX_MSIX_INFO_NIC_2"},
+ {true, "DFX_MSIX_INFO_NIC_3"},
+
+ {true, "DFX_MSIX_INFO_ROC_0"},
+ {true, "DFX_MSIX_INFO_ROC_1"},
+ {true, "DFX_MSIX_INFO_ROC_2"},
+ {true, "DFX_MSIX_INFO_ROC_3"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = {
+ {false, "Reserved"},
+ {true, "SSU_ETS_PORT_STATUS"},
+ {true, "SSU_ETS_TCG_STATUS"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {true, "SSU_BP_STATUS_0"},
+
+ {true, "SSU_BP_STATUS_1"},
+ {true, "SSU_BP_STATUS_2"},
+ {true, "SSU_BP_STATUS_3"},
+ {true, "SSU_BP_STATUS_4"},
+ {true, "SSU_BP_STATUS_5"},
+ {true, "SSU_MAC_TX_PFC_IND"},
+
+ {true, "MAC_SSU_RX_PFC_IND"},
+ {true, "BTMP_AGEING_ST_B0"},
+ {true, "BTMP_AGEING_ST_B1"},
+ {true, "BTMP_AGEING_ST_B2"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "FULL_DROP_NUM"},
+ {true, "PART_DROP_NUM"},
+ {true, "PPP_KEY_DROP_NUM"},
+ {true, "PPP_RLT_DROP_NUM"},
+ {true, "LO_PRI_UNICAST_RLT_DROP_NUM"},
+ {true, "HI_PRI_MULTICAST_RLT_DROP_NUM"},
+
+ {true, "LO_PRI_MULTICAST_RLT_DROP_NUM"},
+ {true, "NCSI_PACKET_CURR_BUFFER_CNT"},
+ {true, "BTMP_AGEING_RLS_CNT_BANK0"},
+ {true, "BTMP_AGEING_RLS_CNT_BANK1"},
+ {true, "BTMP_AGEING_RLS_CNT_BANK2"},
+ {true, "SSU_MB_RD_RLT_DROP_CNT"},
+
+ {true, "SSU_PPP_MAC_KEY_NUM_L"},
+ {true, "SSU_PPP_MAC_KEY_NUM_H"},
+ {true, "SSU_PPP_HOST_KEY_NUM_L"},
+ {true, "SSU_PPP_HOST_KEY_NUM_H"},
+ {true, "PPP_SSU_MAC_RLT_NUM_L"},
+ {true, "PPP_SSU_MAC_RLT_NUM_H"},
+
+ {true, "PPP_SSU_HOST_RLT_NUM_L"},
+ {true, "PPP_SSU_HOST_RLT_NUM_H"},
+ {true, "NCSI_RX_PACKET_IN_CNT_L"},
+ {true, "NCSI_RX_PACKET_IN_CNT_H"},
+ {true, "NCSI_TX_PACKET_OUT_CNT_L"},
+ {true, "NCSI_TX_PACKET_OUT_CNT_H"},
+
+ {true, "SSU_KEY_DROP_NUM"},
+ {true, "MB_UNCOPY_NUM"},
+ {true, "RX_OQ_DROP_PKT_CNT"},
+ {true, "TX_OQ_DROP_PKT_CNT"},
+ {true, "BANK_UNBALANCE_DROP_CNT"},
+ {true, "BANK_UNBALANCE_RX_DROP_CNT"},
+
+ {true, "NIC_L2_ERR_DROP_PKT_CNT"},
+ {true, "ROC_L2_ERR_DROP_PKT_CNT"},
+ {true, "NIC_L2_ERR_DROP_PKT_CNT_RX"},
+ {true, "ROC_L2_ERR_DROP_PKT_CNT_RX"},
+ {true, "RX_OQ_GLB_DROP_PKT_CNT"},
+ {false, "Reserved"},
+
+ {true, "LO_PRI_UNICAST_CUR_CNT"},
+ {true, "HI_PRI_MULTICAST_CUR_CNT"},
+ {true, "LO_PRI_MULTICAST_CUR_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = {
+ {true, "prt_id"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_0"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_1"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_2"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_3"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_4"},
+
+ {true, "PACKET_TC_CURR_BUFFER_CNT_5"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_6"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_7"},
+ {true, "PACKET_CURR_BUFFER_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "RX_PACKET_IN_CNT_L"},
+ {true, "RX_PACKET_IN_CNT_H"},
+ {true, "RX_PACKET_OUT_CNT_L"},
+ {true, "RX_PACKET_OUT_CNT_H"},
+ {true, "TX_PACKET_IN_CNT_L"},
+ {true, "TX_PACKET_IN_CNT_H"},
+
+ {true, "TX_PACKET_OUT_CNT_L"},
+ {true, "TX_PACKET_OUT_CNT_H"},
+ {true, "ROC_RX_PACKET_IN_CNT_L"},
+ {true, "ROC_RX_PACKET_IN_CNT_H"},
+ {true, "ROC_TX_PACKET_OUT_CNT_L"},
+ {true, "ROC_TX_PACKET_OUT_CNT_H"},
+
+ {true, "RX_PACKET_TC_IN_CNT_0_L"},
+ {true, "RX_PACKET_TC_IN_CNT_0_H"},
+ {true, "RX_PACKET_TC_IN_CNT_1_L"},
+ {true, "RX_PACKET_TC_IN_CNT_1_H"},
+ {true, "RX_PACKET_TC_IN_CNT_2_L"},
+ {true, "RX_PACKET_TC_IN_CNT_2_H"},
+
+ {true, "RX_PACKET_TC_IN_CNT_3_L"},
+ {true, "RX_PACKET_TC_IN_CNT_3_H"},
+ {true, "RX_PACKET_TC_IN_CNT_4_L"},
+ {true, "RX_PACKET_TC_IN_CNT_4_H"},
+ {true, "RX_PACKET_TC_IN_CNT_5_L"},
+ {true, "RX_PACKET_TC_IN_CNT_5_H"},
+
+ {true, "RX_PACKET_TC_IN_CNT_6_L"},
+ {true, "RX_PACKET_TC_IN_CNT_6_H"},
+ {true, "RX_PACKET_TC_IN_CNT_7_L"},
+ {true, "RX_PACKET_TC_IN_CNT_7_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_0_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_0_H"},
+
+ {true, "RX_PACKET_TC_OUT_CNT_1_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_1_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_2_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_2_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_3_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_3_H"},
+
+ {true, "RX_PACKET_TC_OUT_CNT_4_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_4_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_5_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_5_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_6_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_6_H"},
+
+ {true, "RX_PACKET_TC_OUT_CNT_7_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_7_H"},
+ {true, "TX_PACKET_TC_IN_CNT_0_L"},
+ {true, "TX_PACKET_TC_IN_CNT_0_H"},
+ {true, "TX_PACKET_TC_IN_CNT_1_L"},
+ {true, "TX_PACKET_TC_IN_CNT_1_H"},
+
+ {true, "TX_PACKET_TC_IN_CNT_2_L"},
+ {true, "TX_PACKET_TC_IN_CNT_2_H"},
+ {true, "TX_PACKET_TC_IN_CNT_3_L"},
+ {true, "TX_PACKET_TC_IN_CNT_3_H"},
+ {true, "TX_PACKET_TC_IN_CNT_4_L"},
+ {true, "TX_PACKET_TC_IN_CNT_4_H"},
+
+ {true, "TX_PACKET_TC_IN_CNT_5_L"},
+ {true, "TX_PACKET_TC_IN_CNT_5_H"},
+ {true, "TX_PACKET_TC_IN_CNT_6_L"},
+ {true, "TX_PACKET_TC_IN_CNT_6_H"},
+ {true, "TX_PACKET_TC_IN_CNT_7_L"},
+ {true, "TX_PACKET_TC_IN_CNT_7_H"},
+
+ {true, "TX_PACKET_TC_OUT_CNT_0_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_0_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_1_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_1_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_2_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_2_H"},
+
+ {true, "TX_PACKET_TC_OUT_CNT_3_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_3_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_4_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_4_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_5_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_5_H"},
+
+ {true, "TX_PACKET_TC_OUT_CNT_6_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_6_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_7_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_7_H"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = {
+ {true, "OQ_INDEX"},
+ {true, "QUEUE_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
+ {true, "prt_id"},
+ {true, "IGU_RX_ERR_PKT"},
+ {true, "IGU_RX_NO_SOF_PKT"},
+ {true, "EGU_TX_1588_SHORT_PKT"},
+ {true, "EGU_TX_1588_PKT"},
+ {true, "EGU_TX_ERR_PKT"},
+
+ {true, "IGU_RX_OUT_L2_PKT"},
+ {true, "IGU_RX_OUT_L3_PKT"},
+ {true, "IGU_RX_OUT_L4_PKT"},
+ {true, "IGU_RX_IN_L2_PKT"},
+ {true, "IGU_RX_IN_L3_PKT"},
+ {true, "IGU_RX_IN_L4_PKT"},
+
+ {true, "IGU_RX_EL3E_PKT"},
+ {true, "IGU_RX_EL4E_PKT"},
+ {true, "IGU_RX_L3E_PKT"},
+ {true, "IGU_RX_L4E_PKT"},
+ {true, "IGU_RX_ROCEE_PKT"},
+ {true, "IGU_RX_OUT_UDP0_PKT"},
+
+ {true, "IGU_RX_IN_UDP0_PKT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "IGU_RX_OVERSIZE_PKT_L"},
+ {true, "IGU_RX_OVERSIZE_PKT_H"},
+ {true, "IGU_RX_UNDERSIZE_PKT_L"},
+ {true, "IGU_RX_UNDERSIZE_PKT_H"},
+ {true, "IGU_RX_OUT_ALL_PKT_L"},
+ {true, "IGU_RX_OUT_ALL_PKT_H"},
+
+ {true, "IGU_TX_OUT_ALL_PKT_L"},
+ {true, "IGU_TX_OUT_ALL_PKT_H"},
+ {true, "IGU_RX_UNI_PKT_L"},
+ {true, "IGU_RX_UNI_PKT_H"},
+ {true, "IGU_RX_MULTI_PKT_L"},
+ {true, "IGU_RX_MULTI_PKT_H"},
+
+ {true, "IGU_RX_BROAD_PKT_L"},
+ {true, "IGU_RX_BROAD_PKT_H"},
+ {true, "EGU_TX_OUT_ALL_PKT_L"},
+ {true, "EGU_TX_OUT_ALL_PKT_H"},
+ {true, "EGU_TX_UNI_PKT_L"},
+ {true, "EGU_TX_UNI_PKT_H"},
+
+ {true, "EGU_TX_MULTI_PKT_L"},
+ {true, "EGU_TX_MULTI_PKT_H"},
+ {true, "EGU_TX_BROAD_PKT_L"},
+ {true, "EGU_TX_BROAD_PKT_H"},
+ {true, "IGU_TX_KEY_NUM_L"},
+ {true, "IGU_TX_KEY_NUM_H"},
+
+ {true, "IGU_RX_NON_TUN_PKT_L"},
+ {true, "IGU_RX_NON_TUN_PKT_H"},
+ {true, "IGU_RX_TUN_PKT_L"},
+ {true, "IGU_RX_TUN_PKT_H"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = {
+ {true, "tc_queue_num"},
+ {true, "FSM_DFX_ST0"},
+ {true, "FSM_DFX_ST1"},
+ {true, "RPU_RX_PKT_DROP_CNT"},
+ {true, "BUF_WAIT_TIMEOUT"},
+ {true, "BUF_WAIT_TIMEOUT_QID"},
+};
+
+static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = {
+ {false, "Reserved"},
+ {true, "FIFO_DFX_ST0"},
+ {true, "FIFO_DFX_ST1"},
+ {true, "FIFO_DFX_ST2"},
+ {true, "FIFO_DFX_ST3"},
+ {true, "FIFO_DFX_ST4"},
+
+ {true, "FIFO_DFX_ST5"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = {
+ {false, "Reserved"},
+ {true, "NCSI_EGU_TX_FIFO_STS"},
+ {true, "NCSI_PAUSE_STATUS"},
+ {true, "NCSI_RX_CTRL_DMAC_ERR_CNT"},
+ {true, "NCSI_RX_CTRL_SMAC_ERR_CNT"},
+ {true, "NCSI_RX_CTRL_CKS_ERR_CNT"},
+
+ {true, "NCSI_RX_CTRL_PKT_CNT"},
+ {true, "NCSI_RX_PT_DMAC_ERR_CNT"},
+ {true, "NCSI_RX_PT_SMAC_ERR_CNT"},
+ {true, "NCSI_RX_PT_PKT_CNT"},
+ {true, "NCSI_RX_FCS_ERR_CNT"},
+ {true, "NCSI_TX_CTRL_DMAC_ERR_CNT"},
+
+ {true, "NCSI_TX_CTRL_SMAC_ERR_CNT"},
+ {true, "NCSI_TX_CTRL_PKT_CNT"},
+ {true, "NCSI_TX_PT_DMAC_ERR_CNT"},
+ {true, "NCSI_TX_PT_SMAC_ERR_CNT"},
+ {true, "NCSI_TX_PT_PKT_CNT"},
+ {true, "NCSI_TX_PT_PKT_TRUNC_CNT"},
+
+ {true, "NCSI_TX_PT_PKT_ERR_CNT"},
+ {true, "NCSI_TX_CTRL_PKT_ERR_CNT"},
+ {true, "NCSI_RX_CTRL_PKT_TRUNC_CNT"},
+ {true, "NCSI_RX_CTRL_PKT_CFLIT_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "NCSI_MAC_RX_OCTETS_OK"},
+ {true, "NCSI_MAC_RX_OCTETS_BAD"},
+ {true, "NCSI_MAC_RX_UC_PKTS"},
+ {true, "NCSI_MAC_RX_MC_PKTS"},
+ {true, "NCSI_MAC_RX_BC_PKTS"},
+ {true, "NCSI_MAC_RX_PKTS_64OCTETS"},
+
+ {true, "NCSI_MAC_RX_PKTS_65TO127OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_128TO255OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_255TO511OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_512TO1023OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_1024TO1518OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_1519TOMAXOCTETS"},
+
+ {true, "NCSI_MAC_RX_FCS_ERRORS"},
+ {true, "NCSI_MAC_RX_LONG_ERRORS"},
+ {true, "NCSI_MAC_RX_JABBER_ERRORS"},
+ {true, "NCSI_MAC_RX_RUNT_ERR_CNT"},
+ {true, "NCSI_MAC_RX_SHORT_ERR_CNT"},
+ {true, "NCSI_MAC_RX_FILT_PKT_CNT"},
+
+ {true, "NCSI_MAC_RX_OCTETS_TOTAL_FILT"},
+ {true, "NCSI_MAC_TX_OCTETS_OK"},
+ {true, "NCSI_MAC_TX_OCTETS_BAD"},
+ {true, "NCSI_MAC_TX_UC_PKTS"},
+ {true, "NCSI_MAC_TX_MC_PKTS"},
+ {true, "NCSI_MAC_TX_BC_PKTS"},
+
+ {true, "NCSI_MAC_TX_PKTS_64OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_65TO127OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_128TO255OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_256TO511OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_512TO1023OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_1024TO1518OCTETS"},
+
+ {true, "NCSI_MAC_TX_PKTS_1519TOMAXOCTETS"},
+ {true, "NCSI_MAC_TX_UNDERRUN"},
+ {true, "NCSI_MAC_TX_CRC_ERROR"},
+ {true, "NCSI_MAC_TX_PAUSE_FRAMES"},
+ {true, "NCSI_MAC_RX_PAD_PKTS"},
+ {true, "NCSI_MAC_RX_PAUSE_FRAMES"},
+};
+
+static struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
+ {false, "Reserved"},
+ {true, "LGE_IGU_AFIFO_DFX_0"},
+ {true, "LGE_IGU_AFIFO_DFX_1"},
+ {true, "LGE_IGU_AFIFO_DFX_2"},
+ {true, "LGE_IGU_AFIFO_DFX_3"},
+ {true, "LGE_IGU_AFIFO_DFX_4"},
+
+ {true, "LGE_IGU_AFIFO_DFX_5"},
+ {true, "LGE_IGU_AFIFO_DFX_6"},
+ {true, "LGE_IGU_AFIFO_DFX_7"},
+ {true, "LGE_EGU_AFIFO_DFX_0"},
+ {true, "LGE_EGU_AFIFO_DFX_1"},
+ {true, "LGE_EGU_AFIFO_DFX_2"},
+
+ {true, "LGE_EGU_AFIFO_DFX_3"},
+ {true, "LGE_EGU_AFIFO_DFX_4"},
+ {true, "LGE_EGU_AFIFO_DFX_5"},
+ {true, "LGE_EGU_AFIFO_DFX_6"},
+ {true, "LGE_EGU_AFIFO_DFX_7"},
+ {true, "CGE_IGU_AFIFO_DFX_0"},
+
+ {true, "CGE_IGU_AFIFO_DFX_1"},
+ {true, "CGE_EGU_AFIFO_DFX_0"},
+ {true, "CGE_EGU_AFIFO_DFX_1"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = {
+ {false, "Reserved"},
+ {true, "DROP_FROM_PRT_PKT_CNT"},
+ {true, "DROP_FROM_HOST_PKT_CNT"},
+ {true, "DROP_TX_VLAN_PROC_CNT"},
+ {true, "DROP_MNG_CNT"},
+ {true, "DROP_FD_CNT"},
+
+ {true, "DROP_NO_DST_CNT"},
+ {true, "DROP_MC_MBID_FULL_CNT"},
+ {true, "DROP_SC_FILTERED"},
+ {true, "PPP_MC_DROP_PKT_CNT"},
+ {true, "DROP_PT_CNT"},
+ {true, "DROP_MAC_ANTI_SPOOF_CNT"},
+
+ {true, "DROP_IG_VFV_CNT"},
+ {true, "DROP_IG_PRTV_CNT"},
+ {true, "DROP_CNM_PFC_PAUSE_CNT"},
+ {true, "DROP_TORUS_TC_CNT"},
+ {true, "DROP_TORUS_LPBK_CNT"},
+ {true, "PPP_HFS_STS"},
+
+ {true, "PPP_MC_RSLT_STS"},
+ {true, "PPP_P3U_STS"},
+ {true, "PPP_RSLT_DESCR_STS"},
+ {true, "PPP_UMV_STS_0"},
+ {true, "PPP_UMV_STS_1"},
+ {true, "PPP_VFV_STS"},
+
+ {true, "PPP_GRO_KEY_CNT"},
+ {true, "PPP_GRO_INFO_CNT"},
+ {true, "PPP_GRO_DROP_CNT"},
+ {true, "PPP_GRO_OUT_CNT"},
+ {true, "PPP_GRO_KEY_MATCH_DATA_CNT"},
+ {true, "PPP_GRO_KEY_MATCH_TCAM_CNT"},
+
+ {true, "PPP_GRO_INFO_MATCH_CNT"},
+ {true, "PPP_GRO_FREE_ENTRY_CNT"},
+ {true, "PPP_GRO_INNER_DFX_SIGNAL"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "GET_RX_PKT_CNT_L"},
+ {true, "GET_RX_PKT_CNT_H"},
+ {true, "GET_TX_PKT_CNT_L"},
+ {true, "GET_TX_PKT_CNT_H"},
+ {true, "SEND_UC_PRT2HOST_PKT_CNT_L"},
+ {true, "SEND_UC_PRT2HOST_PKT_CNT_H"},
+
+ {true, "SEND_UC_PRT2PRT_PKT_CNT_L"},
+ {true, "SEND_UC_PRT2PRT_PKT_CNT_H"},
+ {true, "SEND_UC_HOST2HOST_PKT_CNT_L"},
+ {true, "SEND_UC_HOST2HOST_PKT_CNT_H"},
+ {true, "SEND_UC_HOST2PRT_PKT_CNT_L"},
+ {true, "SEND_UC_HOST2PRT_PKT_CNT_H"},
+
+ {true, "SEND_MC_FROM_PRT_CNT_L"},
+ {true, "SEND_MC_FROM_PRT_CNT_H"},
+ {true, "SEND_MC_FROM_HOST_CNT_L"},
+ {true, "SEND_MC_FROM_HOST_CNT_H"},
+ {true, "SSU_MC_RD_CNT_L"},
+ {true, "SSU_MC_RD_CNT_H"},
+
+ {true, "SSU_MC_DROP_CNT_L"},
+ {true, "SSU_MC_DROP_CNT_H"},
+ {true, "SSU_MC_RD_PKT_CNT_L"},
+ {true, "SSU_MC_RD_PKT_CNT_H"},
+ {true, "PPP_MC_2HOST_PKT_CNT_L"},
+ {true, "PPP_MC_2HOST_PKT_CNT_H"},
+
+ {true, "PPP_MC_2PRT_PKT_CNT_L"},
+ {true, "PPP_MC_2PRT_PKT_CNT_H"},
+ {true, "NTSNOS_PKT_CNT_L"},
+ {true, "NTSNOS_PKT_CNT_H"},
+ {true, "NTUP_PKT_CNT_L"},
+ {true, "NTUP_PKT_CNT_H"},
+
+ {true, "NTLCL_PKT_CNT_L"},
+ {true, "NTLCL_PKT_CNT_H"},
+ {true, "NTTGT_PKT_CNT_L"},
+ {true, "NTTGT_PKT_CNT_H"},
+ {true, "RTNS_PKT_CNT_L"},
+ {true, "RTNS_PKT_CNT_H"},
+
+ {true, "RTLPBK_PKT_CNT_L"},
+ {true, "RTLPBK_PKT_CNT_H"},
+ {true, "NR_PKT_CNT_L"},
+ {true, "NR_PKT_CNT_H"},
+ {true, "RR_PKT_CNT_L"},
+ {true, "RR_PKT_CNT_H"},
+
+ {true, "MNG_TBL_HIT_CNT_L"},
+ {true, "MNG_TBL_HIT_CNT_H"},
+ {true, "FD_TBL_HIT_CNT_L"},
+ {true, "FD_TBL_HIT_CNT_H"},
+ {true, "FD_LKUP_CNT_L"},
+ {true, "FD_LKUP_CNT_H"},
+
+ {true, "BC_HIT_CNT_L"},
+ {true, "BC_HIT_CNT_H"},
+ {true, "UM_TBL_UC_HIT_CNT_L"},
+ {true, "UM_TBL_UC_HIT_CNT_H"},
+ {true, "UM_TBL_MC_HIT_CNT_L"},
+ {true, "UM_TBL_MC_HIT_CNT_H"},
+
+ {true, "UM_TBL_VMDQ1_HIT_CNT_L"},
+ {true, "UM_TBL_VMDQ1_HIT_CNT_H"},
+ {true, "MTA_TBL_HIT_CNT_L"},
+ {true, "MTA_TBL_HIT_CNT_H"},
+ {true, "FWD_BONDING_HIT_CNT_L"},
+ {true, "FWD_BONDING_HIT_CNT_H"},
+
+ {true, "PROMIS_TBL_HIT_CNT_L"},
+ {true, "PROMIS_TBL_HIT_CNT_H"},
+ {true, "GET_TUNL_PKT_CNT_L"},
+ {true, "GET_TUNL_PKT_CNT_H"},
+ {true, "GET_BMC_PKT_CNT_L"},
+ {true, "GET_BMC_PKT_CNT_H"},
+
+ {true, "SEND_UC_PRT2BMC_PKT_CNT_L"},
+ {true, "SEND_UC_PRT2BMC_PKT_CNT_H"},
+ {true, "SEND_UC_HOST2BMC_PKT_CNT_L"},
+ {true, "SEND_UC_HOST2BMC_PKT_CNT_H"},
+ {true, "SEND_UC_BMC2HOST_PKT_CNT_L"},
+ {true, "SEND_UC_BMC2HOST_PKT_CNT_H"},
+
+ {true, "SEND_UC_BMC2PRT_PKT_CNT_L"},
+ {true, "SEND_UC_BMC2PRT_PKT_CNT_H"},
+ {true, "PPP_MC_2BMC_PKT_CNT_L"},
+ {true, "PPP_MC_2BMC_PKT_CNT_H"},
+ {true, "VLAN_MIRR_CNT_L"},
+ {true, "VLAN_MIRR_CNT_H"},
+
+ {true, "IG_MIRR_CNT_L"},
+ {true, "IG_MIRR_CNT_H"},
+ {true, "EG_MIRR_CNT_L"},
+ {true, "EG_MIRR_CNT_H"},
+ {true, "RX_DEFAULT_HOST_HIT_CNT_L"},
+ {true, "RX_DEFAULT_HOST_HIT_CNT_H"},
+
+ {true, "LAN_PAIR_CNT_L"},
+ {true, "LAN_PAIR_CNT_H"},
+ {true, "UM_TBL_MC_HIT_PKT_CNT_L"},
+ {true, "UM_TBL_MC_HIT_PKT_CNT_H"},
+ {true, "MTA_TBL_HIT_PKT_CNT_L"},
+ {true, "MTA_TBL_HIT_PKT_CNT_H"},
+
+ {true, "PROMIS_TBL_HIT_PKT_CNT_L"},
+ {true, "PROMIS_TBL_HIT_PKT_CNT_H"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = {
+ {false, "Reserved"},
+ {true, "FSM_DFX_ST0"},
+ {true, "FSM_DFX_ST1"},
+ {true, "FSM_DFX_ST2"},
+ {true, "FIFO_DFX_ST0"},
+ {true, "FIFO_DFX_ST1"},
+
+ {true, "FIFO_DFX_ST2"},
+ {true, "FIFO_DFX_ST3"},
+ {true, "FIFO_DFX_ST4"},
+ {true, "FIFO_DFX_ST5"},
+ {true, "FIFO_DFX_ST6"},
+ {true, "FIFO_DFX_ST7"},
+
+ {true, "FIFO_DFX_ST8"},
+ {true, "FIFO_DFX_ST9"},
+ {true, "FIFO_DFX_ST10"},
+ {true, "FIFO_DFX_ST11"},
+ {true, "Q_CREDIT_VLD_0"},
+ {true, "Q_CREDIT_VLD_1"},
+
+ {true, "Q_CREDIT_VLD_2"},
+ {true, "Q_CREDIT_VLD_3"},
+ {true, "Q_CREDIT_VLD_4"},
+ {true, "Q_CREDIT_VLD_5"},
+ {true, "Q_CREDIT_VLD_6"},
+ {true, "Q_CREDIT_VLD_7"},
+
+ {true, "Q_CREDIT_VLD_8"},
+ {true, "Q_CREDIT_VLD_9"},
+ {true, "Q_CREDIT_VLD_10"},
+ {true, "Q_CREDIT_VLD_11"},
+ {true, "Q_CREDIT_VLD_12"},
+ {true, "Q_CREDIT_VLD_13"},
+
+ {true, "Q_CREDIT_VLD_14"},
+ {true, "Q_CREDIT_VLD_15"},
+ {true, "Q_CREDIT_VLD_16"},
+ {true, "Q_CREDIT_VLD_17"},
+ {true, "Q_CREDIT_VLD_18"},
+ {true, "Q_CREDIT_VLD_19"},
+
+ {true, "Q_CREDIT_VLD_20"},
+ {true, "Q_CREDIT_VLD_21"},
+ {true, "Q_CREDIT_VLD_22"},
+ {true, "Q_CREDIT_VLD_23"},
+ {true, "Q_CREDIT_VLD_24"},
+ {true, "Q_CREDIT_VLD_25"},
+
+ {true, "Q_CREDIT_VLD_26"},
+ {true, "Q_CREDIT_VLD_27"},
+ {true, "Q_CREDIT_VLD_28"},
+ {true, "Q_CREDIT_VLD_29"},
+ {true, "Q_CREDIT_VLD_30"},
+ {true, "Q_CREDIT_VLD_31"},
+
+ {true, "GRO_BD_SERR_CNT"},
+ {true, "GRO_CONTEXT_SERR_CNT"},
+ {true, "RX_STASH_CFG_SERR_CNT"},
+ {true, "AXI_RD_FBD_SERR_CNT"},
+ {true, "GRO_BD_MERR_CNT"},
+ {true, "GRO_CONTEXT_MERR_CNT"},
+
+ {true, "RX_STASH_CFG_MERR_CNT"},
+ {true, "AXI_RD_FBD_MERR_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = {
+ {true, "q_num"},
+ {true, "RCB_CFG_RX_RING_TAIL"},
+ {true, "RCB_CFG_RX_RING_HEAD"},
+ {true, "RCB_CFG_RX_RING_FBDNUM"},
+ {true, "RCB_CFG_RX_RING_OFFSET"},
+ {true, "RCB_CFG_RX_RING_FBDOFFSET"},
+
+ {true, "RCB_CFG_RX_RING_PKTNUM_RECORD"},
+ {true, "RCB_CFG_TX_RING_TAIL"},
+ {true, "RCB_CFG_TX_RING_HEAD"},
+ {true, "RCB_CFG_TX_RING_FBDNUM"},
+ {true, "RCB_CFG_TX_RING_OFFSET"},
+ {true, "RCB_CFG_TX_RING_EBDNUM"},
+};
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 123c37e653f3..d0f654123b9b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -4,78 +4,39 @@
#include "hclge_err.h"
static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
- { .int_msk = BIT(0), .msg = "imp_itcm0_ecc_1bit_err" },
{ .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" },
- { .int_msk = BIT(2), .msg = "imp_itcm1_ecc_1bit_err" },
{ .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" },
- { .int_msk = BIT(4), .msg = "imp_itcm2_ecc_1bit_err" },
{ .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" },
- { .int_msk = BIT(6), .msg = "imp_itcm3_ecc_1bit_err" },
{ .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err" },
- { .int_msk = BIT(8), .msg = "imp_dtcm0_mem0_ecc_1bit_err" },
{ .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err" },
- { .int_msk = BIT(10), .msg = "imp_dtcm0_mem1_ecc_1bit_err" },
{ .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err" },
- { .int_msk = BIT(12), .msg = "imp_dtcm1_mem0_ecc_1bit_err" },
{ .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err" },
- { .int_msk = BIT(14), .msg = "imp_dtcm1_mem1_ecc_1bit_err" },
{ .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err" },
- { /* sentinel */ }
-};
-
-static const struct hclge_hw_error hclge_imp_itcm4_ecc_int[] = {
- { .int_msk = BIT(0), .msg = "imp_itcm4_ecc_1bit_err" },
- { .int_msk = BIT(1), .msg = "imp_itcm4_ecc_mbit_err" },
+ { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err" },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
- { .int_msk = BIT(0), .msg = "cmdq_nic_rx_depth_ecc_1bit_err" },
{ .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" },
- { .int_msk = BIT(2), .msg = "cmdq_nic_tx_depth_ecc_1bit_err" },
{ .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" },
- { .int_msk = BIT(4), .msg = "cmdq_nic_rx_tail_ecc_1bit_err" },
{ .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" },
- { .int_msk = BIT(6), .msg = "cmdq_nic_tx_tail_ecc_1bit_err" },
{ .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err" },
- { .int_msk = BIT(8), .msg = "cmdq_nic_rx_head_ecc_1bit_err" },
{ .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err" },
- { .int_msk = BIT(10), .msg = "cmdq_nic_tx_head_ecc_1bit_err" },
{ .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err" },
- { .int_msk = BIT(12), .msg = "cmdq_nic_rx_addr_ecc_1bit_err" },
{ .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err" },
- { .int_msk = BIT(14), .msg = "cmdq_nic_tx_addr_ecc_1bit_err" },
{ .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err" },
- { /* sentinel */ }
-};
-
-static const struct hclge_hw_error hclge_cmdq_rocee_mem_ecc_int[] = {
- { .int_msk = BIT(0), .msg = "cmdq_rocee_rx_depth_ecc_1bit_err" },
- { .int_msk = BIT(1), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" },
- { .int_msk = BIT(2), .msg = "cmdq_rocee_tx_depth_ecc_1bit_err" },
- { .int_msk = BIT(3), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" },
- { .int_msk = BIT(4), .msg = "cmdq_rocee_rx_tail_ecc_1bit_err" },
- { .int_msk = BIT(5), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" },
- { .int_msk = BIT(6), .msg = "cmdq_rocee_tx_tail_ecc_1bit_err" },
- { .int_msk = BIT(7), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" },
- { .int_msk = BIT(8), .msg = "cmdq_rocee_rx_head_ecc_1bit_err" },
- { .int_msk = BIT(9), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" },
- { .int_msk = BIT(10), .msg = "cmdq_rocee_tx_head_ecc_1bit_err" },
- { .int_msk = BIT(11), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" },
- { .int_msk = BIT(12), .msg = "cmdq_rocee_rx_addr_ecc_1bit_err" },
- { .int_msk = BIT(13), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" },
- { .int_msk = BIT(14), .msg = "cmdq_rocee_tx_addr_ecc_1bit_err" },
- { .int_msk = BIT(15), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" },
+ { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" },
+ { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" },
+ { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" },
+ { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" },
+ { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" },
+ { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" },
+ { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" },
+ { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
- { .int_msk = BIT(0), .msg = "tqp_int_cfg_even_ecc_1bit_err" },
- { .int_msk = BIT(1), .msg = "tqp_int_cfg_odd_ecc_1bit_err" },
- { .int_msk = BIT(2), .msg = "tqp_int_ctrl_even_ecc_1bit_err" },
- { .int_msk = BIT(3), .msg = "tqp_int_ctrl_odd_ecc_1bit_err" },
- { .int_msk = BIT(4), .msg = "tx_que_scan_int_ecc_1bit_err" },
- { .int_msk = BIT(5), .msg = "rx_que_scan_int_ecc_1bit_err" },
{ .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" },
{ .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" },
{ .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" },
@@ -85,15 +46,19 @@ static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
{ /* sentinel */ }
};
-static const struct hclge_hw_error hclge_igu_com_err_int[] = {
+static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = {
+ { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err" },
+ { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_igu_int[] = {
{ .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" },
- { .int_msk = BIT(1), .msg = "igu_rx_buf0_ecc_1bit_err" },
{ .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" },
- { .int_msk = BIT(3), .msg = "igu_rx_buf1_ecc_1bit_err" },
{ /* sentinel */ }
};
-static const struct hclge_hw_error hclge_igu_egu_tnl_err_int[] = {
+static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
{ .int_msk = BIT(0), .msg = "rx_buf_overflow" },
{ .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" },
{ .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" },
@@ -104,51 +69,11 @@ static const struct hclge_hw_error hclge_igu_egu_tnl_err_int[] = {
};
static const struct hclge_hw_error hclge_ncsi_err_int[] = {
- { .int_msk = BIT(0), .msg = "ncsi_tx_ecc_1bit_err" },
{ .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" },
{ /* sentinel */ }
};
-static const struct hclge_hw_error hclge_ppp_mpf_int0[] = {
- { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_1bit_err" },
- { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_1bit_err" },
- { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_1bit_err" },
- { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_1bit_err" },
- { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_1bit_err" },
- { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_1bit_err" },
- { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_1bit_err" },
- { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_1bit_err" },
- { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_1bit_err" },
- { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_1bit_err" },
- { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_1bit_err" },
- { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_1bit_err" },
- { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_1bit_err" },
- { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_1bit_err" },
- { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_1bit_err" },
- { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_1bit_err" },
- { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_1bit_err" },
- { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_1bit_err" },
- { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_1bit_err" },
- { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_1bit_err" },
- { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_1bit_err" },
- { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_1bit_err" },
- { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_1bit_err" },
- { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_1bit_err" },
- { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_1bit_err" },
- { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_1bit_err" },
- { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_1bit_err" },
- { .int_msk = BIT(27),
- .msg = "flow_director_ad_mem0_ecc_1bit_err" },
- { .int_msk = BIT(28),
- .msg = "flow_director_ad_mem1_ecc_1bit_err" },
- { .int_msk = BIT(29),
- .msg = "rx_vlan_tag_memory_ecc_1bit_err" },
- { .int_msk = BIT(30),
- .msg = "Tx_UP_mapping_config_mem_ecc_1bit_err" },
- { /* sentinel */ }
-};
-
-static const struct hclge_hw_error hclge_ppp_mpf_int1[] = {
+static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = {
{ .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" },
{ .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" },
{ .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" },
@@ -187,23 +112,13 @@ static const struct hclge_hw_error hclge_ppp_mpf_int1[] = {
{ /* sentinel */ }
};
-static const struct hclge_hw_error hclge_ppp_pf_int[] = {
- { .int_msk = BIT(0), .msg = "Tx_vlan_tag_err" },
+static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = {
+ { .int_msk = BIT(0), .msg = "tx_vlan_tag_err" },
{ .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" },
{ /* sentinel */ }
};
-static const struct hclge_hw_error hclge_ppp_mpf_int2[] = {
- { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_1bit_err" },
- { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_1bit_err" },
- { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_1bit_err" },
- { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_1bit_err" },
- { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_1bit_err" },
- { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_1bit_err" },
- { /* sentinel */ }
-};
-
-static const struct hclge_hw_error hclge_ppp_mpf_int3[] = {
+static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = {
{ .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" },
{ .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" },
{ .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" },
@@ -213,145 +128,248 @@ static const struct hclge_hw_error hclge_ppp_mpf_int3[] = {
{ /* sentinel */ }
};
-struct hclge_tm_sch_ecc_info {
- const char *name;
-};
-
-static const struct hclge_tm_sch_ecc_info hclge_tm_sch_ecc_err[7][15] = {
- {
- { .name = "QSET_QUEUE_CTRL:PRI_LEN TAB" },
- { .name = "QSET_QUEUE_CTRL:SPA_LEN TAB" },
- { .name = "QSET_QUEUE_CTRL:SPB_LEN TAB" },
- { .name = "QSET_QUEUE_CTRL:WRRA_LEN TAB" },
- { .name = "QSET_QUEUE_CTRL:WRRB_LEN TAB" },
- { .name = "QSET_QUEUE_CTRL:SPA_HPTR TAB" },
- { .name = "QSET_QUEUE_CTRL:SPB_HPTR TAB" },
- { .name = "QSET_QUEUE_CTRL:WRRA_HPTR TAB" },
- { .name = "QSET_QUEUE_CTRL:WRRB_HPTR TAB" },
- { .name = "QSET_QUEUE_CTRL:QS_LINKLIST TAB" },
- { .name = "QSET_QUEUE_CTRL:SPA_TPTR TAB" },
- { .name = "QSET_QUEUE_CTRL:SPB_TPTR TAB" },
- { .name = "QSET_QUEUE_CTRL:WRRA_TPTR TAB" },
- { .name = "QSET_QUEUE_CTRL:WRRB_TPTR TAB" },
- { .name = "QSET_QUEUE_CTRL:QS_DEFICITCNT TAB" },
- },
- {
- { .name = "ROCE_QUEUE_CTRL:QS_LEN TAB" },
- { .name = "ROCE_QUEUE_CTRL:QS_TPTR TAB" },
- { .name = "ROCE_QUEUE_CTRL:QS_HPTR TAB" },
- { .name = "ROCE_QUEUE_CTRL:QLINKLIST TAB" },
- { .name = "ROCE_QUEUE_CTRL:QCLEN TAB" },
- },
- {
- { .name = "NIC_QUEUE_CTRL:QS_LEN TAB" },
- { .name = "NIC_QUEUE_CTRL:QS_TPTR TAB" },
- { .name = "NIC_QUEUE_CTRL:QS_HPTR TAB" },
- { .name = "NIC_QUEUE_CTRL:QLINKLIST TAB" },
- { .name = "NIC_QUEUE_CTRL:QCLEN TAB" },
- },
- {
- { .name = "RAM_CFG_CTRL:CSHAP TAB" },
- { .name = "RAM_CFG_CTRL:PSHAP TAB" },
- },
- {
- { .name = "SHAPER_CTRL:PSHAP TAB" },
- },
- {
- { .name = "MSCH_CTRL" },
- },
- {
- { .name = "TOP_CTRL" },
- },
-};
-
-static const struct hclge_hw_error hclge_tm_sch_err_int[] = {
- { .int_msk = BIT(0), .msg = "tm_sch_ecc_1bit_err" },
+static const struct hclge_hw_error hclge_tm_sch_rint[] = {
{ .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err" },
- { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_full_err" },
- { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_empty_err" },
- { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_full_err" },
- { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_empty_err" },
- { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_full_err" },
- { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_empty_err" },
- { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_full_err" },
- { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_empty_err" },
- { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_full_err" },
- { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_empty_err" },
+ { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err" },
+ { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err" },
+ { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err" },
+ { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err" },
+ { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err" },
+ { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err" },
+ { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err" },
+ { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err" },
+ { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err" },
+ { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err" },
{ .int_msk = BIT(12),
- .msg = "tm_sch_port_shap_offset_fifo_wr_full_err" },
+ .msg = "tm_sch_port_shap_offset_fifo_wr_err" },
{ .int_msk = BIT(13),
- .msg = "tm_sch_port_shap_offset_fifo_rd_empty_err" },
+ .msg = "tm_sch_port_shap_offset_fifo_rd_err" },
{ .int_msk = BIT(14),
- .msg = "tm_sch_pg_pshap_offset_fifo_wr_full_err" },
+ .msg = "tm_sch_pg_pshap_offset_fifo_wr_err" },
{ .int_msk = BIT(15),
- .msg = "tm_sch_pg_pshap_offset_fifo_rd_empty_err" },
+ .msg = "tm_sch_pg_pshap_offset_fifo_rd_err" },
{ .int_msk = BIT(16),
- .msg = "tm_sch_pg_cshap_offset_fifo_wr_full_err" },
+ .msg = "tm_sch_pg_cshap_offset_fifo_wr_err" },
{ .int_msk = BIT(17),
- .msg = "tm_sch_pg_cshap_offset_fifo_rd_empty_err" },
+ .msg = "tm_sch_pg_cshap_offset_fifo_rd_err" },
{ .int_msk = BIT(18),
- .msg = "tm_sch_pri_pshap_offset_fifo_wr_full_err" },
+ .msg = "tm_sch_pri_pshap_offset_fifo_wr_err" },
{ .int_msk = BIT(19),
- .msg = "tm_sch_pri_pshap_offset_fifo_rd_empty_err" },
+ .msg = "tm_sch_pri_pshap_offset_fifo_rd_err" },
{ .int_msk = BIT(20),
- .msg = "tm_sch_pri_cshap_offset_fifo_wr_full_err" },
+ .msg = "tm_sch_pri_cshap_offset_fifo_wr_err" },
{ .int_msk = BIT(21),
- .msg = "tm_sch_pri_cshap_offset_fifo_rd_empty_err" },
- { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_full_err" },
- { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_empty_err" },
- { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_full_err" },
- { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_empty_err" },
- { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_full_err" },
- { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_empty_err" },
- { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_full_err" },
- { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_empty_err" },
- { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_full_err" },
- { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_empty_err" },
+ .msg = "tm_sch_pri_cshap_offset_fifo_rd_err" },
+ { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err" },
+ { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err" },
+ { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err" },
+ { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err" },
+ { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err" },
+ { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err" },
+ { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err" },
+ { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err" },
+ { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err" },
+ { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_qcn_fifo_rint[] = {
+ { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err" },
+ { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err" },
+ { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err" },
+ { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err" },
+ { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err" },
+ { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err" },
+ { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err" },
+ { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err" },
+ { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err" },
+ { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err" },
+ { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err" },
+ { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err" },
+ { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err" },
+ { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err" },
+ { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err" },
+ { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err" },
+ { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err" },
+ { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err" },
{ /* sentinel */ }
};
-static const struct hclge_hw_error hclge_qcn_ecc_err_int[] = {
- { .int_msk = BIT(0), .msg = "qcn_byte_mem_ecc_1bit_err" },
+static const struct hclge_hw_error hclge_qcn_ecc_rint[] = {
{ .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err" },
- { .int_msk = BIT(2), .msg = "qcn_time_mem_ecc_1bit_err" },
{ .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err" },
- { .int_msk = BIT(4), .msg = "qcn_fb_mem_ecc_1bit_err" },
{ .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err" },
- { .int_msk = BIT(6), .msg = "qcn_link_mem_ecc_1bit_err" },
{ .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err" },
- { .int_msk = BIT(8), .msg = "qcn_rate_mem_ecc_1bit_err" },
{ .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err" },
- { .int_msk = BIT(10), .msg = "qcn_tmplt_mem_ecc_1bit_err" },
{ .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err" },
- { .int_msk = BIT(12), .msg = "qcn_shap_cfg_mem_ecc_1bit_err" },
{ .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err" },
- { .int_msk = BIT(14), .msg = "qcn_gp0_barrel_mem_ecc_1bit_err" },
{ .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err" },
- { .int_msk = BIT(16), .msg = "qcn_gp1_barrel_mem_ecc_1bit_err" },
{ .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err" },
- { .int_msk = BIT(18), .msg = "qcn_gp2_barrel_mem_ecc_1bit_err" },
{ .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err" },
- { .int_msk = BIT(20), .msg = "qcn_gp3_barral_mem_ecc_1bit_err" },
{ .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err" },
{ /* sentinel */ }
};
-static void hclge_log_error(struct device *dev,
- const struct hclge_hw_error *err_list,
+static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = {
+ { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err" },
+ { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err" },
+ { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err" },
+ { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err" },
+ { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err" },
+ { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err" },
+ { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = {
+ { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err" },
+ { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err" },
+ { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err" },
+ { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err" },
+ { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err" },
+ { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err" },
+ { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err" },
+ { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err" },
+ { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err" },
+ { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err" },
+ { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err" },
+ { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err" },
+ { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err" },
+ { .int_msk = BIT(26), .msg = "rd_bus_err" },
+ { .int_msk = BIT(27), .msg = "wr_bus_err" },
+ { .int_msk = BIT(28), .msg = "reg_search_miss" },
+ { .int_msk = BIT(29), .msg = "rx_q_search_miss" },
+ { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect" },
+ { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = {
+ { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err" },
+ { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err" },
+ { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err" },
+ { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = {
+ { .int_msk = BIT(0), .msg = "over_8bd_no_fe" },
+ { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err" },
+ { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err" },
+ { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison" },
+ { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison" },
+ { .int_msk = BIT(5), .msg = "buf_wait_timeout" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ssu_com_err_int[] = {
+ { .int_msk = BIT(0), .msg = "buf_sum_err" },
+ { .int_msk = BIT(1), .msg = "ppp_mb_num_err" },
+ { .int_msk = BIT(2), .msg = "ppp_mbid_err" },
+ { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err" },
+ { .int_msk = BIT(4), .msg = "ppp_rlt_host_err" },
+ { .int_msk = BIT(5), .msg = "cks_edit_position_err" },
+ { .int_msk = BIT(6), .msg = "cks_edit_condition_err" },
+ { .int_msk = BIT(7), .msg = "vlan_edit_condition_err" },
+ { .int_msk = BIT(8), .msg = "vlan_num_ot_err" },
+ { .int_msk = BIT(9), .msg = "vlan_num_in_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = {
+ { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" },
+ { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port" },
+ { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port" },
+ { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port" },
+ { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port" },
+ { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port" },
+ { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port" },
+ { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port" },
+ { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port" },
+ { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port" },
+ { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port" },
+ { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port" },
+ { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = {
+ { .int_msk = BIT(0), .msg = "ig_mac_inf_int" },
+ { .int_msk = BIT(1), .msg = "ig_host_inf_int" },
+ { .int_msk = BIT(2), .msg = "ig_roc_buf_int" },
+ { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int" },
+ { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int" },
+ { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int" },
+ { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int" },
+ { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int" },
+ { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int" },
+ { .int_msk = BIT(9), .msg = "qm_eof_fifo_int" },
+ { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int" },
+ { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int" },
+ { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int" },
+ { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int" },
+ { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int" },
+ { .int_msk = BIT(15), .msg = "host_cmd_fifo_int" },
+ { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int" },
+ { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int" },
+ { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int" },
+ { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int" },
+ { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int" },
+ { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int" },
+ { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int" },
+ { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = {
+ { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg" },
+ { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg" },
+ { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg" },
+ { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = {
+ { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" },
+ { .int_msk = BIT(9), .msg = "low_water_line_err_port" },
+ { .int_msk = BIT(10), .msg = "hi_water_line_err_port" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
+ { .int_msk = 0, .msg = "rocee qmm ovf: sgid invalid err" },
+ { .int_msk = 0x4, .msg = "rocee qmm ovf: sgid ovf err" },
+ { .int_msk = 0x8, .msg = "rocee qmm ovf: smac invalid err" },
+ { .int_msk = 0xC, .msg = "rocee qmm ovf: smac ovf err" },
+ { .int_msk = 0x10, .msg = "rocee qmm ovf: cqc invalid err" },
+ { .int_msk = 0x11, .msg = "rocee qmm ovf: cqc ovf err" },
+ { .int_msk = 0x12, .msg = "rocee qmm ovf: cqc hopnum err" },
+ { .int_msk = 0x13, .msg = "rocee qmm ovf: cqc ba0 err" },
+ { .int_msk = 0x14, .msg = "rocee qmm ovf: srqc invalid err" },
+ { .int_msk = 0x15, .msg = "rocee qmm ovf: srqc ovf err" },
+ { .int_msk = 0x16, .msg = "rocee qmm ovf: srqc hopnum err" },
+ { .int_msk = 0x17, .msg = "rocee qmm ovf: srqc ba0 err" },
+ { .int_msk = 0x18, .msg = "rocee qmm ovf: mpt invalid err" },
+ { .int_msk = 0x19, .msg = "rocee qmm ovf: mpt ovf err" },
+ { .int_msk = 0x1A, .msg = "rocee qmm ovf: mpt hopnum err" },
+ { .int_msk = 0x1B, .msg = "rocee qmm ovf: mpt ba0 err" },
+ { .int_msk = 0x1C, .msg = "rocee qmm ovf: qpc invalid err" },
+ { .int_msk = 0x1D, .msg = "rocee qmm ovf: qpc ovf err" },
+ { .int_msk = 0x1E, .msg = "rocee qmm ovf: qpc hopnum err" },
+ { .int_msk = 0x1F, .msg = "rocee qmm ovf: qpc ba0 err" },
+ { /* sentinel */ }
+};
+
+static void hclge_log_error(struct device *dev, char *reg,
+ const struct hclge_hw_error *err,
u32 err_sts)
{
- const struct hclge_hw_error *err;
- int i = 0;
-
- while (err_list[i].msg) {
- err = &err_list[i];
- if (!(err->int_msk & err_sts)) {
- i++;
- continue;
- }
- dev_warn(dev, "%s [error status=0x%x] found\n",
- err->msg, err_sts);
- i++;
+ while (err->msg) {
+ if (err->int_msk & err_sts)
+ dev_warn(dev, "%s %s found [error status=0x%x]\n",
+ reg, err->msg, err_sts);
+ err++;
}
}
@@ -391,96 +409,44 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev,
return ret;
}
-/* hclge_cmd_clear_error: clear the error status
- * @hdev: pointer to struct hclge_dev
- * @desc: descriptor for describing the command
- * @desc_src: prefilled descriptor from the previous command for reusing
- * @cmd: command opcode
- * @flag: flag for extended command structure
- *
- * This function clear the error status in the hw register/s using command
- */
-static int hclge_cmd_clear_error(struct hclge_dev *hdev,
- struct hclge_desc *desc,
- struct hclge_desc *desc_src,
- u32 cmd, u16 flag)
-{
- struct device *dev = &hdev->pdev->dev;
- int num = 1;
- int ret, i;
-
- if (cmd) {
- hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
- if (flag) {
- desc[0].flag |= cpu_to_le16(flag);
- hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
- num = 2;
- }
- if (desc_src) {
- for (i = 0; i < 6; i++) {
- desc[0].data[i] = desc_src[0].data[i];
- if (flag)
- desc[1].data[i] = desc_src[1].data[i];
- }
- }
- } else {
- hclge_cmd_reuse_desc(&desc[0], false);
- if (flag) {
- desc[0].flag |= cpu_to_le16(flag);
- hclge_cmd_reuse_desc(&desc[1], false);
- num = 2;
- }
- }
- ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
- if (ret)
- dev_err(dev, "clear error cmd failed (%d)\n", ret);
-
- return ret;
-}
-
-static int hclge_enable_common_error(struct hclge_dev *hdev, bool en)
+static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc[2];
int ret;
+ /* configure common error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false);
if (en) {
- /* enable COMMON error interrupts */
desc[0].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN);
desc[0].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN |
HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN);
desc[0].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN);
- desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN);
+ desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN |
+ HCLGE_MSIX_SRAM_ECC_ERR_INT_EN);
desc[0].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN);
- } else {
- /* disable COMMON error interrupts */
- desc[0].data[0] = 0;
- desc[0].data[2] = 0;
- desc[0].data[3] = 0;
- desc[0].data[4] = 0;
- desc[0].data[5] = 0;
}
+
desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK);
desc[1].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK |
HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK);
desc[1].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK);
- desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK);
+ desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK |
+ HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK);
desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK);
ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
if (ret)
dev_err(dev,
- "failed(%d) to enable/disable COMMON err interrupts\n",
- ret);
+ "fail(%d) to configure common err interrupts\n", ret);
return ret;
}
-static int hclge_enable_ncsi_error(struct hclge_dev *hdev, bool en)
+static int hclge_config_ncsi_hw_err_int(struct hclge_dev *hdev, bool en)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc;
@@ -489,74 +455,65 @@ static int hclge_enable_ncsi_error(struct hclge_dev *hdev, bool en)
if (hdev->pdev->revision < 0x21)
return 0;
- /* enable/disable NCSI error interrupts */
+ /* configure NCSI error interrupts */
hclge_cmd_setup_basic_desc(&desc, HCLGE_NCSI_INT_EN, false);
if (en)
desc.data[0] = cpu_to_le32(HCLGE_NCSI_ERR_INT_EN);
- else
- desc.data[0] = 0;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(dev,
- "failed(%d) to enable/disable NCSI error interrupts\n",
- ret);
+ "fail(%d) to configure NCSI error interrupts\n", ret);
return ret;
}
-static int hclge_enable_igu_egu_error(struct hclge_dev *hdev, bool en)
+static int hclge_config_igu_egu_hw_err_int(struct hclge_dev *hdev, bool en)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc;
int ret;
- /* enable/disable error interrupts */
+ /* configure IGU,EGU error interrupts */
hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false);
if (en)
desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
- else
- desc.data[0] = 0;
+
desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(dev,
- "failed(%d) to enable/disable IGU common interrupts\n",
- ret);
+ "fail(%d) to configure IGU common interrupts\n", ret);
return ret;
}
hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_EGU_TNL_INT_EN, false);
if (en)
desc.data[0] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN);
- else
- desc.data[0] = 0;
+
desc.data[1] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN_MASK);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(dev,
- "failed(%d) to enable/disable IGU-EGU TNL interrupts\n",
- ret);
+ "fail(%d) to configure IGU-EGU TNL interrupts\n", ret);
return ret;
}
- ret = hclge_enable_ncsi_error(hdev, en);
- if (ret)
- dev_err(dev, "fail(%d) to en/disable err int\n", ret);
+ ret = hclge_config_ncsi_hw_err_int(hdev, en);
return ret;
}
-static int hclge_enable_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
+static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
bool en)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc[2];
int ret;
- /* enable/disable PPP error interrupts */
+ /* configure PPP error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
@@ -567,24 +524,24 @@ static int hclge_enable_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN);
desc[0].data[1] =
cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN);
- } else {
- desc[0].data[0] = 0;
- desc[0].data[1] = 0;
+ desc[0].data[4] = cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN);
}
+
desc[1].data[0] =
cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK);
desc[1].data[1] =
cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK);
+ if (hdev->pdev->revision >= 0x21)
+ desc[1].data[2] =
+ cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN_MASK);
} else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
if (en) {
desc[0].data[0] =
cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN);
desc[0].data[1] =
cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN);
- } else {
- desc[0].data[0] = 0;
- desc[0].data[1] = 0;
}
+
desc[1].data[0] =
cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK);
desc[1].data[1] =
@@ -593,498 +550,863 @@ static int hclge_enable_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
if (ret)
- dev_err(dev,
- "failed(%d) to enable/disable PPP error interrupts\n",
- ret);
+ dev_err(dev, "fail(%d) to configure PPP error intr\n", ret);
return ret;
}
-static int hclge_enable_ppp_error(struct hclge_dev *hdev, bool en)
+static int hclge_config_ppp_hw_err_int(struct hclge_dev *hdev, bool en)
{
- struct device *dev = &hdev->pdev->dev;
int ret;
- ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD,
+ ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD,
en);
- if (ret) {
- dev_err(dev,
- "failed(%d) to enable/disable PPP error intr 0,1\n",
- ret);
+ if (ret)
return ret;
- }
- ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD,
+ ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD,
en);
- if (ret)
- dev_err(dev,
- "failed(%d) to enable/disable PPP error intr 2,3\n",
- ret);
return ret;
}
-int hclge_enable_tm_hw_error(struct hclge_dev *hdev, bool en)
+static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc;
int ret;
- /* enable TM SCH hw errors */
+ /* configure TM SCH hw errors */
hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_SCH_ECC_INT_EN, false);
if (en)
desc.data[0] = cpu_to_le32(HCLGE_TM_SCH_ECC_ERR_INT_EN);
- else
- desc.data[0] = 0;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(dev, "failed(%d) to configure TM SCH errors\n", ret);
+ dev_err(dev, "fail(%d) to configure TM SCH errors\n", ret);
return ret;
}
- /* enable TM QCN hw errors */
+ /* configure TM QCN hw errors */
ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG,
0, 0, 0);
if (ret) {
- dev_err(dev, "failed(%d) to read TM QCN CFG status\n", ret);
+ dev_err(dev, "fail(%d) to read TM QCN CFG status\n", ret);
return ret;
}
hclge_cmd_reuse_desc(&desc, false);
if (en)
desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
- else
- desc.data[1] = 0;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(dev,
- "failed(%d) to configure TM QCN mem errors\n", ret);
+ "fail(%d) to configure TM QCN mem errors\n", ret);
return ret;
}
-static void hclge_process_common_error(struct hclge_dev *hdev,
- enum hclge_err_int_type type)
+static int hclge_config_mac_err_int(struct hclge_dev *hdev, bool en)
{
struct device *dev = &hdev->pdev->dev;
- struct hclge_desc desc[2];
- u32 err_sts;
+ struct hclge_desc desc;
int ret;
- /* read err sts */
- ret = hclge_cmd_query_error(hdev, &desc[0],
- HCLGE_COMMON_ECC_INT_CFG,
- HCLGE_CMD_FLAG_NEXT, 0, 0);
- if (ret) {
+ /* configure MAC common error interrupts */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_COMMON_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN);
+
+ desc.data[1] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
dev_err(dev,
- "failed(=%d) to query COMMON error interrupt status\n",
- ret);
- return;
- }
+ "fail(%d) to configure MAC COMMON error intr\n", ret);
- /* log err */
- err_sts = (le32_to_cpu(desc[0].data[0])) & HCLGE_IMP_TCM_ECC_INT_MASK;
- hclge_log_error(dev, &hclge_imp_tcm_ecc_int[0], err_sts);
+ return ret;
+}
- err_sts = (le32_to_cpu(desc[0].data[1])) & HCLGE_CMDQ_ECC_INT_MASK;
- hclge_log_error(dev, &hclge_cmdq_nic_mem_ecc_int[0], err_sts);
+static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
+ bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int num = 1;
+ int ret;
- err_sts = (le32_to_cpu(desc[0].data[1]) >> HCLGE_CMDQ_ROC_ECC_INT_SHIFT)
- & HCLGE_CMDQ_ECC_INT_MASK;
- hclge_log_error(dev, &hclge_cmdq_rocee_mem_ecc_int[0], err_sts);
+ /* configure PPU error interrupts */
+ if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) {
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
+ desc[0].flag |= HCLGE_CMD_FLAG_NEXT;
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
+ if (en) {
+ desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN;
+ desc[0].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN;
+ desc[1].data[3] = HCLGE_PPU_MPF_ABNORMAL_INT3_EN;
+ desc[1].data[4] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN;
+ }
- if ((le32_to_cpu(desc[0].data[3])) & BIT(0))
- dev_warn(dev, "imp_rd_data_poison_err found\n");
+ desc[1].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK;
+ desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK;
+ desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK;
+ desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK;
+ num = 2;
+ } else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) {
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
+ if (en)
+ desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2;
- err_sts = (le32_to_cpu(desc[0].data[3]) >> HCLGE_TQP_ECC_INT_SHIFT) &
- HCLGE_TQP_ECC_INT_MASK;
- hclge_log_error(dev, &hclge_tqp_int_ecc_int[0], err_sts);
+ desc[0].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK;
+ } else if (cmd == HCLGE_PPU_PF_OTHER_INT_CMD) {
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
+ if (en)
+ desc[0].data[0] = HCLGE_PPU_PF_ABNORMAL_INT_EN;
- err_sts = (le32_to_cpu(desc[0].data[5])) &
- HCLGE_IMP_ITCM4_ECC_INT_MASK;
- hclge_log_error(dev, &hclge_imp_itcm4_ecc_int[0], err_sts);
+ desc[0].data[2] = HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK;
+ } else {
+ dev_err(dev, "Invalid cmd to configure PPU error interrupts\n");
+ return -EINVAL;
+ }
- /* clear error interrupts */
- desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_CLR_MASK);
- desc[1].data[1] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_CLR_MASK |
- HCLGE_CMDQ_ROCEE_ECC_CLR_MASK);
- desc[1].data[3] = cpu_to_le32(HCLGE_TQP_IMP_ERR_CLR_MASK);
- desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_CLR_MASK);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
- ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0,
- HCLGE_CMD_FLAG_NEXT);
- if (ret)
- dev_err(dev,
- "failed(%d) to clear COMMON error interrupt status\n",
- ret);
+ return ret;
}
-static void hclge_process_ncsi_error(struct hclge_dev *hdev,
- enum hclge_err_int_type type)
+static int hclge_config_ppu_hw_err_int(struct hclge_dev *hdev, bool en)
{
struct device *dev = &hdev->pdev->dev;
- struct hclge_desc desc_rd;
- struct hclge_desc desc_wr;
- u32 err_sts;
int ret;
- if (hdev->pdev->revision < 0x21)
- return;
-
- /* read NCSI error status */
- ret = hclge_cmd_query_error(hdev, &desc_rd, HCLGE_NCSI_INT_QUERY,
- 0, 1, HCLGE_NCSI_ERR_INT_TYPE);
+ ret = hclge_config_ppu_error_interrupts(hdev, HCLGE_PPU_MPF_ECC_INT_CMD,
+ en);
if (ret) {
- dev_err(dev,
- "failed(=%d) to query NCSI error interrupt status\n",
+ dev_err(dev, "fail(%d) to configure PPU MPF ECC error intr\n",
ret);
- return;
+ return ret;
}
- /* log err */
- err_sts = le32_to_cpu(desc_rd.data[0]);
- hclge_log_error(dev, &hclge_ncsi_err_int[0], err_sts);
+ ret = hclge_config_ppu_error_interrupts(hdev,
+ HCLGE_PPU_MPF_OTHER_INT_CMD,
+ en);
+ if (ret) {
+ dev_err(dev, "fail(%d) to configure PPU MPF other intr\n", ret);
+ return ret;
+ }
- /* clear err int */
- ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
- HCLGE_NCSI_INT_CLR, 0);
+ ret = hclge_config_ppu_error_interrupts(hdev,
+ HCLGE_PPU_PF_OTHER_INT_CMD, en);
if (ret)
- dev_err(dev, "failed(=%d) to clear NCSI interrupt status\n",
+ dev_err(dev, "fail(%d) to configure PPU PF error interrupts\n",
ret);
+ return ret;
}
-static void hclge_process_igu_egu_error(struct hclge_dev *hdev,
- enum hclge_err_int_type int_type)
+static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
{
struct device *dev = &hdev->pdev->dev;
- struct hclge_desc desc_rd;
- struct hclge_desc desc_wr;
- u32 err_sts;
+ struct hclge_desc desc[2];
int ret;
- /* read IGU common err sts */
- ret = hclge_cmd_query_error(hdev, &desc_rd,
- HCLGE_IGU_COMMON_INT_QUERY,
- 0, 1, int_type);
- if (ret) {
- dev_err(dev, "failed(=%d) to query IGU common int status\n",
- ret);
- return;
+ /* configure SSU ecc error interrupts */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_ECC_INT_CMD, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_ECC_INT_CMD, false);
+ if (en) {
+ desc[0].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN);
+ desc[0].data[1] =
+ cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN);
+ desc[0].data[4] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN);
}
- /* log err */
- err_sts = le32_to_cpu(desc_rd.data[0]) &
- HCLGE_IGU_COM_INT_MASK;
- hclge_log_error(dev, &hclge_igu_com_err_int[0], err_sts);
+ desc[1].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN_MASK);
+ desc[1].data[1] = cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK);
+ desc[1].data[2] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN_MASK);
- /* clear err int */
- ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
- HCLGE_IGU_COMMON_INT_CLR, 0);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
if (ret) {
- dev_err(dev, "failed(=%d) to clear IGU common int status\n",
- ret);
- return;
+ dev_err(dev,
+ "fail(%d) to configure SSU ECC error interrupt\n", ret);
+ return ret;
}
- /* read IGU-EGU TNL err sts */
- ret = hclge_cmd_query_error(hdev, &desc_rd,
- HCLGE_IGU_EGU_TNL_INT_QUERY,
- 0, 1, int_type);
- if (ret) {
- dev_err(dev, "failed(=%d) to query IGU-EGU TNL int status\n",
- ret);
- return;
+ /* configure SSU common error interrupts */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_COMMON_INT_CMD, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_COMMON_INT_CMD, false);
+
+ if (en) {
+ if (hdev->pdev->revision >= 0x21)
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_SSU_COMMON_INT_EN);
+ else
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_SSU_COMMON_INT_EN & ~BIT(5));
+ desc[0].data[1] = cpu_to_le32(HCLGE_SSU_PORT_BASED_ERR_INT_EN);
+ desc[0].data[2] =
+ cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN);
}
- /* log err */
- err_sts = le32_to_cpu(desc_rd.data[0]) &
- HCLGE_IGU_EGU_TNL_INT_MASK;
- hclge_log_error(dev, &hclge_igu_egu_tnl_err_int[0], err_sts);
+ desc[1].data[0] = cpu_to_le32(HCLGE_SSU_COMMON_INT_EN_MASK |
+ HCLGE_SSU_PORT_BASED_ERR_INT_EN_MASK);
+ desc[1].data[1] = cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK);
- /* clear err int */
- ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
- HCLGE_IGU_EGU_TNL_INT_CLR, 0);
- if (ret) {
- dev_err(dev, "failed(=%d) to clear IGU-EGU TNL int status\n",
- ret);
- return;
- }
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
+ if (ret)
+ dev_err(dev,
+ "fail(%d) to configure SSU COMMON error intr\n", ret);
- hclge_process_ncsi_error(hdev, HCLGE_ERR_INT_RAS_NFE);
+ return ret;
}
-static int hclge_log_and_clear_ppp_error(struct hclge_dev *hdev, u32 cmd,
- enum hclge_err_int_type int_type)
+#define HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type) \
+ do { \
+ if (ae_dev->ops->set_default_reset_request) \
+ ae_dev->ops->set_default_reset_request(ae_dev, \
+ reset_type); \
+ } while (0)
+
+/* hclge_handle_mpf_ras_error: handle all main PF RAS errors
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @num: number of extended command structures
+ *
+ * This function handles all the main PF RAS errors in the
+ * hw register/s using command.
+ */
+static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc,
+ int num)
{
- enum hnae3_reset_type reset_level = HNAE3_NONE_RESET;
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
struct device *dev = &hdev->pdev->dev;
- const struct hclge_hw_error *hw_err_lst1, *hw_err_lst2, *hw_err_lst3;
- struct hclge_desc desc[2];
- u32 err_sts;
+ __le32 *desc_data;
+ u32 status;
int ret;
- /* read PPP INT sts */
- ret = hclge_cmd_query_error(hdev, &desc[0], cmd,
- HCLGE_CMD_FLAG_NEXT, 5, int_type);
+ /* query all main PF RAS errors */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_MPF_RAS_INT,
+ true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret) {
- dev_err(dev, "failed(=%d) to query PPP interrupt status\n",
- ret);
- return -EIO;
+ dev_err(dev, "query all mpf ras int cmd failed (%d)\n", ret);
+ return ret;
}
- /* log error */
- if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
- hw_err_lst1 = &hclge_ppp_mpf_int0[0];
- hw_err_lst2 = &hclge_ppp_mpf_int1[0];
- hw_err_lst3 = &hclge_ppp_pf_int[0];
- } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
- hw_err_lst1 = &hclge_ppp_mpf_int2[0];
- hw_err_lst2 = &hclge_ppp_mpf_int3[0];
- } else {
- dev_err(dev, "invalid command(=%d)\n", cmd);
- return -EINVAL;
+ /* log HNS common errors */
+ status = le32_to_cpu(desc[0].data[0]);
+ if (status) {
+ hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
+ &hclge_imp_tcm_ecc_int[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
}
- err_sts = le32_to_cpu(desc[0].data[2]);
- if (err_sts) {
- hclge_log_error(dev, hw_err_lst1, err_sts);
- reset_level = HNAE3_FUNC_RESET;
+ status = le32_to_cpu(desc[0].data[1]);
+ if (status) {
+ hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
+ &hclge_cmdq_nic_mem_ecc_int[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
}
- err_sts = le32_to_cpu(desc[0].data[3]);
- if (err_sts) {
- hclge_log_error(dev, hw_err_lst2, err_sts);
- reset_level = HNAE3_FUNC_RESET;
+ if ((le32_to_cpu(desc[0].data[2])) & BIT(0)) {
+ dev_warn(dev, "imp_rd_data_poison_err found\n");
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
}
- if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
- err_sts = (le32_to_cpu(desc[0].data[4]) >> 8) & 0x3;
- if (err_sts) {
- hclge_log_error(dev, hw_err_lst3, err_sts);
- reset_level = HNAE3_FUNC_RESET;
- }
+ status = le32_to_cpu(desc[0].data[3]);
+ if (status) {
+ hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
+ &hclge_tqp_int_ecc_int[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
}
- /* clear PPP INT */
- ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0,
- HCLGE_CMD_FLAG_NEXT);
- if (ret) {
- dev_err(dev, "failed(=%d) to clear PPP interrupt status\n",
- ret);
- return -EIO;
+ status = le32_to_cpu(desc[0].data[4]);
+ if (status) {
+ hclge_log_error(dev, "MSIX_ECC_INT_STS",
+ &hclge_msix_sram_ecc_int[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
}
- return 0;
+ /* log SSU(Storage Switch Unit) errors */
+ desc_data = (__le32 *)&desc[2];
+ status = le32_to_cpu(*(desc_data + 2));
+ if (status) {
+ dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_0 ssu_ecc_mbit_int[31:0]\n");
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ }
+
+ status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
+ if (status) {
+ dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_ecc_mbit_int[32]\n");
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ }
+
+ status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK;
+ if (status) {
+ hclge_log_error(dev, "SSU_COMMON_ERR_INT",
+ &hclge_ssu_com_err_int[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ }
+
+ /* log IGU(Ingress Unit) errors */
+ desc_data = (__le32 *)&desc[3];
+ status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK;
+ if (status)
+ hclge_log_error(dev, "IGU_INT_STS",
+ &hclge_igu_int[0], status);
+
+ /* log PPP(Programmable Packet Process) errors */
+ desc_data = (__le32 *)&desc[4];
+ status = le32_to_cpu(*(desc_data + 1));
+ if (status)
+ hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
+ &hclge_ppp_mpf_abnormal_int_st1[0], status);
+
+ status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK;
+ if (status)
+ hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
+ &hclge_ppp_mpf_abnormal_int_st3[0], status);
+
+ /* log PPU(RCB) errors */
+ desc_data = (__le32 *)&desc[5];
+ status = le32_to_cpu(*(desc_data + 1));
+ if (status) {
+ dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n",
+ "rpu_rx_pkt_ecc_mbit_err");
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ }
+
+ status = le32_to_cpu(*(desc_data + 2));
+ if (status) {
+ hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
+ &hclge_ppu_mpf_abnormal_int_st2[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ }
+
+ status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK;
+ if (status) {
+ hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
+ &hclge_ppu_mpf_abnormal_int_st3[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ }
+
+ /* log TM(Traffic Manager) errors */
+ desc_data = (__le32 *)&desc[6];
+ status = le32_to_cpu(*desc_data);
+ if (status) {
+ hclge_log_error(dev, "TM_SCH_RINT",
+ &hclge_tm_sch_rint[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ }
+
+ /* log QCN(Quantized Congestion Control) errors */
+ desc_data = (__le32 *)&desc[7];
+ status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK;
+ if (status) {
+ hclge_log_error(dev, "QCN_FIFO_RINT",
+ &hclge_qcn_fifo_rint[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ }
+
+ status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK;
+ if (status) {
+ hclge_log_error(dev, "QCN_ECC_RINT",
+ &hclge_qcn_ecc_rint[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ }
+
+ /* log NCSI errors */
+ desc_data = (__le32 *)&desc[9];
+ status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK;
+ if (status) {
+ hclge_log_error(dev, "NCSI_ECC_INT_RPT",
+ &hclge_ncsi_err_int[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ }
+
+ /* clear all main PF RAS errors */
+ hclge_cmd_reuse_desc(&desc[0], false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ if (ret)
+ dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret);
+
+ return ret;
}
-static void hclge_process_ppp_error(struct hclge_dev *hdev,
- enum hclge_err_int_type int_type)
+/* hclge_handle_pf_ras_error: handle all PF RAS errors
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @num: number of extended command structures
+ *
+ * This function handles all the PF RAS errors in the
+ * hw register/s using command.
+ */
+static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc,
+ int num)
{
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
struct device *dev = &hdev->pdev->dev;
+ __le32 *desc_data;
+ u32 status;
int ret;
- /* read PPP INT0,1 sts */
- ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD0_INT_CMD,
- int_type);
- if (ret < 0) {
- dev_err(dev, "failed(=%d) to clear PPP interrupt 0,1 status\n",
- ret);
- return;
+ /* query all PF RAS errors */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_PF_RAS_INT,
+ true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ if (ret) {
+ dev_err(dev, "query all pf ras int cmd failed (%d)\n", ret);
+ return ret;
}
- /* read err PPP INT2,3 sts */
- ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD1_INT_CMD,
- int_type);
- if (ret < 0)
- dev_err(dev, "failed(=%d) to clear PPP interrupt 2,3 status\n",
- ret);
+ /* log SSU(Storage Switch Unit) errors */
+ status = le32_to_cpu(desc[0].data[0]);
+ if (status) {
+ hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
+ &hclge_ssu_port_based_err_int[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ }
+
+ status = le32_to_cpu(desc[0].data[1]);
+ if (status) {
+ hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
+ &hclge_ssu_fifo_overflow_int[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ }
+
+ status = le32_to_cpu(desc[0].data[2]);
+ if (status) {
+ hclge_log_error(dev, "SSU_ETS_TCG_INT",
+ &hclge_ssu_ets_tcg_int[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ }
+
+ /* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */
+ desc_data = (__le32 *)&desc[1];
+ status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK;
+ if (status)
+ hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
+ &hclge_igu_egu_tnl_int[0], status);
+
+ /* clear all PF RAS errors */
+ hclge_cmd_reuse_desc(&desc[0], false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ if (ret)
+ dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret);
+
+ return ret;
}
-static void hclge_process_tm_sch_error(struct hclge_dev *hdev)
+static int hclge_handle_all_ras_errors(struct hclge_dev *hdev)
{
struct device *dev = &hdev->pdev->dev;
- const struct hclge_tm_sch_ecc_info *tm_sch_ecc_info;
- struct hclge_desc desc;
- u32 ecc_info;
- u8 module_no;
- u8 ram_no;
+ u32 mpf_bd_num, pf_bd_num, bd_num;
+ struct hclge_desc desc_bd;
+ struct hclge_desc *desc;
int ret;
- /* read TM scheduler errors */
- ret = hclge_cmd_query_error(hdev, &desc,
- HCLGE_TM_SCH_MBIT_ECC_INFO_CMD, 0, 0, 0);
+ /* query the number of registers in the RAS int status */
+ hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_RAS_INT_STS_BD_NUM,
+ true);
+ ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
if (ret) {
- dev_err(dev, "failed(%d) to read SCH mbit ECC err info\n", ret);
- return;
+ dev_err(dev, "fail(%d) to query ras int status bd num\n", ret);
+ return ret;
}
- ecc_info = le32_to_cpu(desc.data[0]);
+ mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
+ pf_bd_num = le32_to_cpu(desc_bd.data[1]);
+ bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
- ret = hclge_cmd_query_error(hdev, &desc,
- HCLGE_TM_SCH_ECC_ERR_RINT_CMD, 0, 0, 0);
+ desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ /* handle all main PF RAS errors */
+ ret = hclge_handle_mpf_ras_error(hdev, desc, mpf_bd_num);
if (ret) {
- dev_err(dev, "failed(%d) to read SCH ECC err status\n", ret);
- return;
+ kfree(desc);
+ return ret;
}
+ memset(desc, 0, bd_num * sizeof(struct hclge_desc));
+
+ /* handle all PF RAS errors */
+ ret = hclge_handle_pf_ras_error(hdev, desc, pf_bd_num);
+ kfree(desc);
- /* log TM scheduler errors */
- if (le32_to_cpu(desc.data[0])) {
- hclge_log_error(dev, &hclge_tm_sch_err_int[0],
- le32_to_cpu(desc.data[0]));
- if (le32_to_cpu(desc.data[0]) & 0x2) {
- module_no = (ecc_info >> 20) & 0xF;
- ram_no = (ecc_info >> 16) & 0xF;
- tm_sch_ecc_info =
- &hclge_tm_sch_ecc_err[module_no][ram_no];
- dev_warn(dev, "ecc err module:ram=%s\n",
- tm_sch_ecc_info->name);
- dev_warn(dev, "ecc memory address = 0x%x\n",
- ecc_info & 0xFFFF);
+ return ret;
+}
+
+static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int ret;
+
+ /* read overflow error status */
+ ret = hclge_cmd_query_error(hdev, &desc[0],
+ HCLGE_ROCEE_PF_RAS_INT_CMD,
+ 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret);
+ return ret;
+ }
+
+ /* log overflow error */
+ if (le32_to_cpu(desc[0].data[0]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
+ const struct hclge_hw_error *err;
+ u32 err_sts;
+
+ err = &hclge_rocee_qmm_ovf_err_int[0];
+ err_sts = HCLGE_ROCEE_OVF_ERR_TYPE_MASK &
+ le32_to_cpu(desc[0].data[0]);
+ while (err->msg) {
+ if (err->int_msk == err_sts) {
+ dev_warn(dev, "%s [error status=0x%x] found\n",
+ err->msg,
+ le32_to_cpu(desc[0].data[0]));
+ break;
+ }
+ err++;
}
}
- /* clear TM scheduler errors */
- ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
- if (ret) {
- dev_err(dev, "failed(%d) to clear TM SCH error status\n", ret);
- return;
+ if (le32_to_cpu(desc[0].data[1]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
+ dev_warn(dev, "ROCEE TSP OVF [error status=0x%x] found\n",
+ le32_to_cpu(desc[0].data[1]));
}
- ret = hclge_cmd_query_error(hdev, &desc,
- HCLGE_TM_SCH_ECC_ERR_RINT_CE, 0, 0, 0);
- if (ret) {
- dev_err(dev, "failed(%d) to read SCH CE status\n", ret);
- return;
+ if (le32_to_cpu(desc[0].data[2]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
+ dev_warn(dev, "ROCEE SCC OVF [error status=0x%x] found\n",
+ le32_to_cpu(desc[0].data[2]));
}
- ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
+ return 0;
+}
+
+static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
+{
+ enum hnae3_reset_type reset_type = HNAE3_FUNC_RESET;
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ unsigned int status;
+ int ret;
+
+ /* read RAS error interrupt status */
+ ret = hclge_cmd_query_error(hdev, &desc[0],
+ HCLGE_QUERY_CLEAR_ROCEE_RAS_INT,
+ 0, 0, 0);
if (ret) {
- dev_err(dev, "failed(%d) to clear TM SCH CE status\n", ret);
- return;
+ dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret);
+ /* reset everything for now */
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ return ret;
}
- ret = hclge_cmd_query_error(hdev, &desc,
- HCLGE_TM_SCH_ECC_ERR_RINT_NFE, 0, 0, 0);
- if (ret) {
- dev_err(dev, "failed(%d) to read SCH NFE status\n", ret);
- return;
+ status = le32_to_cpu(desc[0].data[0]);
+
+ if (status & HCLGE_ROCEE_RERR_INT_MASK)
+ dev_warn(dev, "ROCEE RAS AXI rresp error\n");
+
+ if (status & HCLGE_ROCEE_BERR_INT_MASK)
+ dev_warn(dev, "ROCEE RAS AXI bresp error\n");
+
+ if (status & HCLGE_ROCEE_ECC_INT_MASK) {
+ dev_warn(dev, "ROCEE RAS 2bit ECC error\n");
+ reset_type = HNAE3_GLOBAL_RESET;
}
- ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
- if (ret) {
- dev_err(dev, "failed(%d) to clear TM SCH NFE status\n", ret);
- return;
+ if (status & HCLGE_ROCEE_OVF_INT_MASK) {
+ ret = hclge_log_rocee_ovf_error(hdev);
+ if (ret) {
+ dev_err(dev, "failed(%d) to process ovf error\n", ret);
+ /* reset everything for now */
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ return ret;
+ }
}
- ret = hclge_cmd_query_error(hdev, &desc,
- HCLGE_TM_SCH_ECC_ERR_RINT_FE, 0, 0, 0);
+ /* clear error status */
+ hclge_cmd_reuse_desc(&desc[0], false);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 1);
if (ret) {
- dev_err(dev, "failed(%d) to read SCH FE status\n", ret);
- return;
+ dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret);
+ /* reset everything for now */
+ reset_type = HNAE3_GLOBAL_RESET;
}
- ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
- if (ret)
- dev_err(dev, "failed(%d) to clear TM SCH FE status\n", ret);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type);
+
+ return ret;
}
-static void hclge_process_tm_qcn_error(struct hclge_dev *hdev)
+static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc;
int ret;
- /* read QCN errors */
- ret = hclge_cmd_query_error(hdev, &desc,
- HCLGE_TM_QCN_MEM_INT_INFO_CMD, 0, 0, 0);
- if (ret) {
- dev_err(dev, "failed(%d) to read QCN ECC err status\n", ret);
- return;
- }
+ if (hdev->pdev->revision < 0x21 || !hnae3_dev_roce_supported(hdev))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_CONFIG_ROCEE_RAS_INT_EN, false);
+ if (en) {
+ /* enable ROCEE hw error interrupts */
+ desc.data[0] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN);
+ desc.data[1] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN);
- /* log QCN errors */
- if (le32_to_cpu(desc.data[0]))
- hclge_log_error(dev, &hclge_qcn_ecc_err_int[0],
- le32_to_cpu(desc.data[0]));
+ hclge_log_and_clear_rocee_ras_error(hdev);
+ }
+ desc.data[2] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN_MASK);
+ desc.data[3] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN_MASK);
- /* clear QCN errors */
- ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- dev_err(dev, "failed(%d) to clear QCN error status\n", ret);
+ dev_err(dev, "failed(%d) to config ROCEE RAS interrupt\n", ret);
+
+ return ret;
}
-static void hclge_process_tm_error(struct hclge_dev *hdev,
- enum hclge_err_int_type type)
+static int hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
{
- hclge_process_tm_sch_error(hdev);
- hclge_process_tm_qcn_error(hdev);
+ struct hclge_dev *hdev = ae_dev->priv;
+
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ hdev->pdev->revision < 0x21)
+ return HNAE3_NONE_RESET;
+
+ return hclge_log_and_clear_rocee_ras_error(hdev);
}
static const struct hclge_hw_blk hw_blk[] = {
- { .msk = BIT(0), .name = "IGU_EGU",
- .enable_error = hclge_enable_igu_egu_error,
- .process_error = hclge_process_igu_egu_error, },
- { .msk = BIT(5), .name = "COMMON",
- .enable_error = hclge_enable_common_error,
- .process_error = hclge_process_common_error, },
- { .msk = BIT(4), .name = "TM",
- .enable_error = hclge_enable_tm_hw_error,
- .process_error = hclge_process_tm_error, },
- { .msk = BIT(1), .name = "PPP",
- .enable_error = hclge_enable_ppp_error,
- .process_error = hclge_process_ppp_error, },
+ {
+ .msk = BIT(0), .name = "IGU_EGU",
+ .config_err_int = hclge_config_igu_egu_hw_err_int,
+ },
+ {
+ .msk = BIT(1), .name = "PPP",
+ .config_err_int = hclge_config_ppp_hw_err_int,
+ },
+ {
+ .msk = BIT(2), .name = "SSU",
+ .config_err_int = hclge_config_ssu_hw_err_int,
+ },
+ {
+ .msk = BIT(3), .name = "PPU",
+ .config_err_int = hclge_config_ppu_hw_err_int,
+ },
+ {
+ .msk = BIT(4), .name = "TM",
+ .config_err_int = hclge_config_tm_hw_err_int,
+ },
+ {
+ .msk = BIT(5), .name = "COMMON",
+ .config_err_int = hclge_config_common_hw_err_int,
+ },
+ {
+ .msk = BIT(8), .name = "MAC",
+ .config_err_int = hclge_config_mac_err_int,
+ },
{ /* sentinel */ }
};
int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state)
{
+ const struct hclge_hw_blk *module = hw_blk;
struct device *dev = &hdev->pdev->dev;
int ret = 0;
- int i = 0;
- while (hw_blk[i].name) {
- if (!hw_blk[i].enable_error) {
- i++;
- continue;
+ while (module->name) {
+ if (module->config_err_int) {
+ ret = module->config_err_int(hdev, state);
+ if (ret)
+ return ret;
}
- ret = hw_blk[i].enable_error(hdev, state);
- if (ret) {
- dev_err(dev, "fail(%d) to en/disable err int\n", ret);
- return ret;
- }
- i++;
+ module++;
}
+ ret = hclge_config_rocee_ras_interrupt(hdev, state);
+ if (ret)
+ dev_err(dev, "fail(%d) to configure ROCEE err int\n", ret);
+
return ret;
}
-pci_ers_result_t hclge_process_ras_hw_error(struct hnae3_ae_dev *ae_dev)
+pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
{
struct hclge_dev *hdev = ae_dev->priv;
struct device *dev = &hdev->pdev->dev;
- u32 sts, val;
- int i = 0;
-
- sts = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
-
- /* Processing Non-fatal errors */
- if (sts & HCLGE_RAS_REG_NFE_MASK) {
- val = (sts >> HCLGE_RAS_REG_NFE_SHIFT) & 0xFF;
- i = 0;
- while (hw_blk[i].name) {
- if (!(hw_blk[i].msk & val)) {
- i++;
- continue;
- }
- dev_warn(dev, "%s ras non-fatal error identified\n",
- hw_blk[i].name);
- if (hw_blk[i].process_error)
- hw_blk[i].process_error(hdev,
- HCLGE_ERR_INT_RAS_NFE);
- i++;
- }
+ u32 status;
+
+ status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
+
+ /* Handling Non-fatal HNS RAS errors */
+ if (status & HCLGE_RAS_REG_NFE_MASK) {
+ dev_warn(dev,
+ "HNS Non-Fatal RAS error(status=0x%x) identified\n",
+ status);
+ hclge_handle_all_ras_errors(hdev);
+ } else {
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ hdev->pdev->revision < 0x21)
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+ if (status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
+ dev_warn(dev, "ROCEE uncorrected RAS error identified\n");
+ hclge_handle_rocee_ras_error(ae_dev);
+ }
+
+ if (status & HCLGE_RAS_REG_NFE_MASK ||
+ status & HCLGE_RAS_REG_ROCEE_ERR_MASK)
+ return PCI_ERS_RESULT_NEED_RESET;
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
+ unsigned long *reset_requests)
+{
+ struct device *dev = &hdev->pdev->dev;
+ u32 mpf_bd_num, pf_bd_num, bd_num;
+ struct hclge_desc desc_bd;
+ struct hclge_desc *desc;
+ __le32 *desc_data;
+ int ret = 0;
+ u32 status;
+
+ /* set default handling */
+ set_bit(HNAE3_FUNC_RESET, reset_requests);
+
+ /* query the number of bds for the MSIx int status */
+ hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_MSIX_INT_STS_BD_NUM,
+ true);
+ ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
+ if (ret) {
+ dev_err(dev, "fail(%d) to query msix int status bd num\n",
+ ret);
+ /* reset everything for now */
+ set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ return ret;
+ }
+
+ mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
+ pf_bd_num = le32_to_cpu(desc_bd.data[1]);
+ bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
+
+ desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ goto out;
+
+ /* query all main PF MSIx errors */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
+ true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
+ if (ret) {
+ dev_err(dev, "query all mpf msix int cmd failed (%d)\n",
+ ret);
+ /* reset everything for now */
+ set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ goto msi_error;
+ }
+
+ /* log MAC errors */
+ desc_data = (__le32 *)&desc[1];
+ status = le32_to_cpu(*desc_data);
+ if (status) {
+ hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
+ &hclge_mac_afifo_tnl_int[0], status);
+ set_bit(HNAE3_GLOBAL_RESET, reset_requests);
}
- return PCI_ERS_RESULT_NEED_RESET;
+ /* log PPU(RCB) errors */
+ desc_data = (__le32 *)&desc[5];
+ status = le32_to_cpu(*(desc_data + 2)) &
+ HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
+ if (status) {
+ dev_warn(dev,
+ "PPU_MPF_ABNORMAL_INT_ST2[28:29], err_status(0x%x)\n",
+ status);
+ set_bit(HNAE3_CORE_RESET, reset_requests);
+ }
+
+ /* clear all main PF MSIx errors */
+ hclge_cmd_reuse_desc(&desc[0], false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
+ if (ret) {
+ dev_err(dev, "clear all mpf msix int cmd failed (%d)\n",
+ ret);
+ /* reset everything for now */
+ set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ goto msi_error;
+ }
+
+ /* query all PF MSIx errors */
+ memset(desc, 0, bd_num * sizeof(struct hclge_desc));
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
+ true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
+ if (ret) {
+ dev_err(dev, "query all pf msix int cmd failed (%d)\n",
+ ret);
+ /* reset everything for now */
+ set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ goto msi_error;
+ }
+
+ /* log SSU PF errors */
+ status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK;
+ if (status) {
+ hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
+ &hclge_ssu_port_based_pf_int[0], status);
+ set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ }
+
+ /* read and log PPP PF errors */
+ desc_data = (__le32 *)&desc[2];
+ status = le32_to_cpu(*desc_data);
+ if (status)
+ hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
+ &hclge_ppp_pf_abnormal_int[0], status);
+
+ /* PPU(RCB) PF errors */
+ desc_data = (__le32 *)&desc[3];
+ status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK;
+ if (status)
+ hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
+ &hclge_ppu_pf_abnormal_int[0], status);
+
+ /* clear all PF MSIx errors */
+ hclge_cmd_reuse_desc(&desc[0], false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
+ if (ret) {
+ dev_err(dev, "clear all pf msix int cmd failed (%d)\n",
+ ret);
+ /* reset everything for now */
+ set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ }
+
+msi_error:
+ kfree(desc);
+out:
+ return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index e0e3b5861495..51a7d4eb066a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -7,9 +7,11 @@
#include "hclge_main.h"
#define HCLGE_RAS_PF_OTHER_INT_STS_REG 0x20B00
-#define HCLGE_RAS_REG_FE_MASK 0xFF
#define HCLGE_RAS_REG_NFE_MASK 0xFF00
-#define HCLGE_RAS_REG_NFE_SHIFT 8
+#define HCLGE_RAS_REG_ROCEE_ERR_MASK 0x3000000
+
+#define HCLGE_VECTOR0_PF_OTHER_INT_STS_REG 0x20800
+#define HCLGE_VECTOR0_REG_MSIX_MASK 0x1FF00
#define HCLGE_IMP_TCM_ECC_ERR_INT_EN 0xFFFF0000
#define HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK 0xFFFF0000
@@ -23,6 +25,8 @@
#define HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK 0x0100
#define HCLGE_TQP_ECC_ERR_INT_EN 0x0FFF
#define HCLGE_TQP_ECC_ERR_INT_EN_MASK 0x0FFF
+#define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK 0x0F000000
+#define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN 0x0F000000
#define HCLGE_IGU_ERR_INT_EN 0x0000066F
#define HCLGE_IGU_ERR_INT_EN_MASK 0x000F
#define HCLGE_IGU_TNL_ERR_INT_EN 0x0002AABF
@@ -41,21 +45,55 @@
#define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF
#define HCLGE_NCSI_ERR_INT_EN 0x3
#define HCLGE_NCSI_ERR_INT_TYPE 0x9
+#define HCLGE_MAC_COMMON_ERR_INT_EN GENMASK(7, 0)
+#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK GENMASK(7, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK GENMASK(31, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN 0x3FFF3FFF
+#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK 0x3FFF3FFF
+#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN2 0xB
+#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK 0xB
+#define HCLGE_PPU_MPF_ABNORMAL_INT3_EN GENMASK(7, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK GENMASK(23, 16)
+#define HCLGE_PPU_PF_ABNORMAL_INT_EN GENMASK(5, 0)
+#define HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK GENMASK(5, 0)
+#define HCLGE_SSU_1BIT_ECC_ERR_INT_EN GENMASK(31, 0)
+#define HCLGE_SSU_1BIT_ECC_ERR_INT_EN_MASK GENMASK(31, 0)
+#define HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN GENMASK(31, 0)
+#define HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK GENMASK(31, 0)
+#define HCLGE_SSU_BIT32_ECC_ERR_INT_EN 0x0101
+#define HCLGE_SSU_BIT32_ECC_ERR_INT_EN_MASK 0x0101
+#define HCLGE_SSU_COMMON_INT_EN GENMASK(9, 0)
+#define HCLGE_SSU_COMMON_INT_EN_MASK GENMASK(9, 0)
+#define HCLGE_SSU_PORT_BASED_ERR_INT_EN 0x0BFF
+#define HCLGE_SSU_PORT_BASED_ERR_INT_EN_MASK 0x0BFF0000
+#define HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN GENMASK(23, 0)
+#define HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK GENMASK(23, 0)
+
+#define HCLGE_SSU_COMMON_ERR_INT_MASK GENMASK(9, 0)
+#define HCLGE_SSU_PORT_INT_MSIX_MASK 0x7BFF
+#define HCLGE_IGU_INT_MASK GENMASK(3, 0)
+#define HCLGE_IGU_EGU_TNL_INT_MASK GENMASK(5, 0)
+#define HCLGE_PPP_MPF_INT_ST3_MASK GENMASK(5, 0)
+#define HCLGE_PPU_MPF_INT_ST3_MASK GENMASK(7, 0)
+#define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK GENMASK(29, 28)
+#define HCLGE_PPU_PF_INT_MSIX_MASK 0x27
+#define HCLGE_QCN_FIFO_INT_MASK GENMASK(17, 0)
+#define HCLGE_QCN_ECC_INT_MASK GENMASK(21, 0)
+#define HCLGE_NCSI_ECC_INT_MASK GENMASK(1, 0)
-#define HCLGE_IMP_TCM_ECC_INT_MASK 0xFFFF
-#define HCLGE_IMP_ITCM4_ECC_INT_MASK 0x3
-#define HCLGE_CMDQ_ECC_INT_MASK 0xFFFF
-#define HCLGE_CMDQ_ROC_ECC_INT_SHIFT 16
-#define HCLGE_TQP_ECC_INT_MASK 0xFFF
-#define HCLGE_TQP_ECC_INT_SHIFT 16
-#define HCLGE_IMP_TCM_ECC_CLR_MASK 0xFFFF
-#define HCLGE_IMP_ITCM4_ECC_CLR_MASK 0x3
-#define HCLGE_CMDQ_NIC_ECC_CLR_MASK 0xFFFF
-#define HCLGE_CMDQ_ROCEE_ECC_CLR_MASK 0xFFFF0000
-#define HCLGE_TQP_IMP_ERR_CLR_MASK 0x0FFF0001
-#define HCLGE_IGU_COM_INT_MASK 0xF
-#define HCLGE_IGU_EGU_TNL_INT_MASK 0x3F
-#define HCLGE_PPP_PF_INT_MASK 0x100
+#define HCLGE_ROCEE_RAS_NFE_INT_EN 0xF
+#define HCLGE_ROCEE_RAS_CE_INT_EN 0x1
+#define HCLGE_ROCEE_RAS_NFE_INT_EN_MASK 0xF
+#define HCLGE_ROCEE_RAS_CE_INT_EN_MASK 0x1
+#define HCLGE_ROCEE_RERR_INT_MASK BIT(0)
+#define HCLGE_ROCEE_BERR_INT_MASK BIT(1)
+#define HCLGE_ROCEE_ECC_INT_MASK BIT(2)
+#define HCLGE_ROCEE_OVF_INT_MASK BIT(3)
+#define HCLGE_ROCEE_OVF_ERR_INT_MASK 0x10000
+#define HCLGE_ROCEE_OVF_ERR_TYPE_MASK 0x3F
enum hclge_err_int_type {
HCLGE_ERR_INT_MSIX = 0,
@@ -67,9 +105,7 @@ enum hclge_err_int_type {
struct hclge_hw_blk {
u32 msk;
const char *name;
- int (*enable_error)(struct hclge_dev *hdev, bool en);
- void (*process_error)(struct hclge_dev *hdev,
- enum hclge_err_int_type type);
+ int (*config_err_int)(struct hclge_dev *hdev, bool en);
};
struct hclge_hw_error {
@@ -78,6 +114,7 @@ struct hclge_hw_error {
};
int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state);
-int hclge_enable_tm_hw_error(struct hclge_dev *hdev, bool en);
-pci_ers_result_t hclge_process_ras_hw_error(struct hnae3_ae_dev *ae_dev);
+pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev);
+int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
+ unsigned long *reset_requests);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index ffdd96020860..f7637c08bb3a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -26,7 +26,9 @@
#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
-static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
+#define HCLGE_BUF_SIZE_UNIT 256
+
+static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
@@ -48,6 +50,62 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
+static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
+ HCLGE_CMDQ_TX_ADDR_H_REG,
+ HCLGE_CMDQ_TX_DEPTH_REG,
+ HCLGE_CMDQ_TX_TAIL_REG,
+ HCLGE_CMDQ_TX_HEAD_REG,
+ HCLGE_CMDQ_RX_ADDR_L_REG,
+ HCLGE_CMDQ_RX_ADDR_H_REG,
+ HCLGE_CMDQ_RX_DEPTH_REG,
+ HCLGE_CMDQ_RX_TAIL_REG,
+ HCLGE_CMDQ_RX_HEAD_REG,
+ HCLGE_VECTOR0_CMDQ_SRC_REG,
+ HCLGE_CMDQ_INTR_STS_REG,
+ HCLGE_CMDQ_INTR_EN_REG,
+ HCLGE_CMDQ_INTR_GEN_REG};
+
+static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
+ HCLGE_VECTOR0_OTER_EN_REG,
+ HCLGE_MISC_RESET_STS_REG,
+ HCLGE_MISC_VECTOR_INT_STS,
+ HCLGE_GLOBAL_RESET_REG,
+ HCLGE_FUN_RST_ING,
+ HCLGE_GRO_EN_REG};
+
+static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
+ HCLGE_RING_RX_ADDR_H_REG,
+ HCLGE_RING_RX_BD_NUM_REG,
+ HCLGE_RING_RX_BD_LENGTH_REG,
+ HCLGE_RING_RX_MERGE_EN_REG,
+ HCLGE_RING_RX_TAIL_REG,
+ HCLGE_RING_RX_HEAD_REG,
+ HCLGE_RING_RX_FBD_NUM_REG,
+ HCLGE_RING_RX_OFFSET_REG,
+ HCLGE_RING_RX_FBD_OFFSET_REG,
+ HCLGE_RING_RX_STASH_REG,
+ HCLGE_RING_RX_BD_ERR_REG,
+ HCLGE_RING_TX_ADDR_L_REG,
+ HCLGE_RING_TX_ADDR_H_REG,
+ HCLGE_RING_TX_BD_NUM_REG,
+ HCLGE_RING_TX_PRIORITY_REG,
+ HCLGE_RING_TX_TC_REG,
+ HCLGE_RING_TX_MERGE_EN_REG,
+ HCLGE_RING_TX_TAIL_REG,
+ HCLGE_RING_TX_HEAD_REG,
+ HCLGE_RING_TX_FBD_NUM_REG,
+ HCLGE_RING_TX_OFFSET_REG,
+ HCLGE_RING_TX_EBD_NUM_REG,
+ HCLGE_RING_TX_EBD_OFFSET_REG,
+ HCLGE_RING_TX_BD_ERR_REG,
+ HCLGE_RING_EN_REG};
+
+static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
+ HCLGE_TQP_INTR_GL0_REG,
+ HCLGE_TQP_INTR_GL1_REG,
+ HCLGE_TQP_INTR_GL2_REG,
+ HCLGE_TQP_INTR_RL_REG};
+
static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
"App Loopback test",
"Serdes serial Loopback test",
@@ -631,6 +689,22 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hdev->num_tqps = __le16_to_cpu(req->tqp_num);
hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
+ if (req->tx_buf_size)
+ hdev->tx_buf_size =
+ __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
+ else
+ hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
+
+ hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
+
+ if (req->dv_buf_size)
+ hdev->dv_buf_size =
+ __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
+ else
+ hdev->dv_buf_size = HCLGE_DEFAULT_DV;
+
+ hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
+
if (hnae3_dev_roce_supported(hdev)) {
hdev->roce_base_msix_offset =
hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
@@ -886,7 +960,7 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->pfc_max = hdev->tc_max;
}
- hdev->tm_info.num_tc = hdev->tc_max;
+ hdev->tm_info.num_tc = 1;
/* Currently not support uncontiuous tc */
for (i = 0; i < hdev->tm_info.num_tc; i++)
@@ -921,6 +995,28 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+static int hclge_config_gro(struct hclge_dev *hdev, bool en)
+{
+ struct hclge_cfg_gro_status_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ if (!hnae3_dev_gro_supported(hdev))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
+ req = (struct hclge_cfg_gro_status_cmd *)desc.data;
+
+ req->gro_en = cpu_to_le16(en ? 1 : 0);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "GRO hardware config cmd failed, ret = %d\n", ret);
+
+ return ret;
+}
+
static int hclge_alloc_tqps(struct hclge_dev *hdev)
{
struct hclge_tqp *tqp;
@@ -1144,6 +1240,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
for (i = 0; i < num_vport; i++) {
vport->back = hdev;
vport->vport_id = i;
+ vport->mps = HCLGE_MAC_DEFAULT_FRAME;
if (i == 0)
ret = hclge_vport_setup(vport, tqp_main_vport);
@@ -1289,40 +1386,51 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
{
u32 shared_buf_min, shared_buf_tc, shared_std;
int tc_num, pfc_enable_num;
- u32 shared_buf;
+ u32 shared_buf, aligned_mps;
u32 rx_priv;
int i;
tc_num = hclge_get_tc_num(hdev);
pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
+ aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
if (hnae3_dev_dcb_supported(hdev))
- shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
+ shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
else
- shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
+ shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
+ + hdev->dv_buf_size;
- shared_buf_tc = pfc_enable_num * hdev->mps +
- (tc_num - pfc_enable_num) * hdev->mps / 2 +
- hdev->mps;
- shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
+ shared_buf_tc = pfc_enable_num * aligned_mps +
+ (tc_num - pfc_enable_num) * aligned_mps / 2 +
+ aligned_mps;
+ shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
+ HCLGE_BUF_SIZE_UNIT);
rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
- if (rx_all <= rx_priv + shared_std)
+ if (rx_all < rx_priv + shared_std)
return false;
- shared_buf = rx_all - rx_priv;
+ shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
buf_alloc->s_buf.buf_size = shared_buf;
- buf_alloc->s_buf.self.high = shared_buf;
- buf_alloc->s_buf.self.low = 2 * hdev->mps;
+ if (hnae3_dev_dcb_supported(hdev)) {
+ buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
+ buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
+ - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
+ } else {
+ buf_alloc->s_buf.self.high = aligned_mps +
+ HCLGE_NON_DCB_ADDITIONAL_BUF;
+ buf_alloc->s_buf.self.low =
+ roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
+ }
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
if ((hdev->hw_tc_map & BIT(i)) &&
(hdev->tm_info.hw_pfc_map & BIT(i))) {
- buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
- buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
+ buf_alloc->s_buf.tc_thrd[i].low = aligned_mps;
+ buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps;
} else {
buf_alloc->s_buf.tc_thrd[i].low = 0;
- buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
+ buf_alloc->s_buf.tc_thrd[i].high = aligned_mps;
}
}
@@ -1340,11 +1448,11 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
- if (total_size < HCLGE_DEFAULT_TX_BUF)
+ if (total_size < hdev->tx_buf_size)
return -ENOMEM;
if (hdev->hw_tc_map & BIT(i))
- priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
+ priv->tx_buf_size = hdev->tx_buf_size;
else
priv->tx_buf_size = 0;
@@ -1362,7 +1470,6 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
-#define HCLGE_BUF_SIZE_UNIT 128
u32 rx_all = hdev->pkt_buf_size, aligned_mps;
int no_pfc_priv_num, pfc_priv_num;
struct hclge_priv_buf *priv;
@@ -1388,13 +1495,16 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
priv->enable = 1;
if (hdev->tm_info.hw_pfc_map & BIT(i)) {
priv->wl.low = aligned_mps;
- priv->wl.high = priv->wl.low + aligned_mps;
+ priv->wl.high =
+ roundup(priv->wl.low + aligned_mps,
+ HCLGE_BUF_SIZE_UNIT);
priv->buf_size = priv->wl.high +
- HCLGE_DEFAULT_DV;
+ hdev->dv_buf_size;
} else {
priv->wl.low = 0;
priv->wl.high = 2 * aligned_mps;
- priv->buf_size = priv->wl.high;
+ priv->buf_size = priv->wl.high +
+ hdev->dv_buf_size;
}
} else {
priv->enable = 0;
@@ -1424,13 +1534,13 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
priv->enable = 1;
if (hdev->tm_info.hw_pfc_map & BIT(i)) {
- priv->wl.low = 128;
+ priv->wl.low = 256;
priv->wl.high = priv->wl.low + aligned_mps;
- priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
+ priv->buf_size = priv->wl.high + hdev->dv_buf_size;
} else {
priv->wl.low = 0;
priv->wl.high = aligned_mps;
- priv->buf_size = priv->wl.high;
+ priv->buf_size = priv->wl.high + hdev->dv_buf_size;
}
}
@@ -1873,37 +1983,6 @@ static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
}
-static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
- u8 *duplex)
-{
- struct hclge_query_an_speed_dup_cmd *req;
- struct hclge_desc desc;
- int speed_tmp;
- int ret;
-
- req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "mac speed/autoneg/duplex query cmd failed %d\n",
- ret);
- return ret;
- }
-
- *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
- speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
- HCLGE_QUERY_SPEED_S);
-
- ret = hclge_parse_speed(speed_tmp, speed);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "could not parse speed(=%d), %d\n", speed_tmp, ret);
-
- return ret;
-}
-
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
{
struct hclge_config_auto_neg_cmd *req;
@@ -1947,12 +2026,10 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
static int hclge_mac_init(struct hclge_dev *hdev)
{
- struct hnae3_handle *handle = &hdev->vport[0].nic;
- struct net_device *netdev = handle->kinfo.netdev;
struct hclge_mac *mac = &hdev->hw.mac;
- int mtu;
int ret;
+ hdev->support_sfp_query = true;
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
hdev->hw.mac.duplex);
@@ -1964,15 +2041,16 @@ static int hclge_mac_init(struct hclge_dev *hdev)
mac->link = 0;
- if (netdev)
- mtu = netdev->mtu;
- else
- mtu = ETH_DATA_LEN;
+ ret = hclge_set_mac_mtu(hdev, hdev->mps);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
+ return ret;
+ }
- ret = hclge_set_mtu(handle, mtu);
+ ret = hclge_buffer_alloc(hdev);
if (ret)
dev_err(&hdev->pdev->dev,
- "set mtu failed ret=%d\n", ret);
+ "allocate buffer fail, ret=%d\n", ret);
return ret;
}
@@ -2061,34 +2139,58 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
}
}
+static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
+{
+ struct hclge_sfp_speed_cmd *resp = NULL;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
+ resp = (struct hclge_sfp_speed_cmd *)desc.data;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret == -EOPNOTSUPP) {
+ dev_warn(&hdev->pdev->dev,
+ "IMP do not support get SFP speed %d\n", ret);
+ return ret;
+ } else if (ret) {
+ dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
+ return ret;
+ }
+
+ *speed = resp->sfp_speed;
+
+ return 0;
+}
+
static int hclge_update_speed_duplex(struct hclge_dev *hdev)
{
struct hclge_mac mac = hdev->hw.mac;
- u8 duplex;
int speed;
int ret;
- /* get the speed and duplex as autoneg'result from mac cmd when phy
+ /* get the speed from SFP cmd when phy
* doesn't exit.
*/
- if (mac.phydev || !mac.autoneg)
+ if (mac.phydev)
return 0;
- ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "mac autoneg/speed/duplex query failed %d\n", ret);
- return ret;
- }
+ /* if IMP does not support get SFP/qSFP speed, return directly */
+ if (!hdev->support_sfp_query)
+ return 0;
- ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "mac speed/duplex config failed %d\n", ret);
+ ret = hclge_get_sfp_speed(hdev, &speed);
+ if (ret == -EOPNOTSUPP) {
+ hdev->support_sfp_query = false;
+ return ret;
+ } else if (ret) {
return ret;
}
- return 0;
+ if (speed == HCLGE_MAC_SPEED_UNKNOWN)
+ return 0; /* do nothing if no SFP */
+
+ /* must config full duplex for SFP */
+ return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
}
static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
@@ -2129,12 +2231,13 @@ static void hclge_service_complete(struct hclge_dev *hdev)
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
- u32 rst_src_reg;
- u32 cmdq_src_reg;
+ u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
/* fetch the events from their corresponding regs */
rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
+ msix_src_reg = hclge_read_dev(&hdev->hw,
+ HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
/* Assumption: If by any chance reset and mailbox events are reported
* together then we will only process reset event in this go and will
@@ -2144,7 +2247,16 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
*/
/* check for vector0 reset event sources */
+ if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
+ dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
+ set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+ dev_info(&hdev->pdev->dev, "global reset interrupt\n");
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
@@ -2152,17 +2264,16 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
}
if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
+ dev_info(&hdev->pdev->dev, "core reset interrupt\n");
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
return HCLGE_VECTOR0_EVENT_RST;
}
- if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
- set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
- *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
- return HCLGE_VECTOR0_EVENT_RST;
- }
+ /* check for vector0 msix event source */
+ if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK)
+ return HCLGE_VECTOR0_EVENT_ERR;
/* check for vector0 mailbox(=CMDQ RX) event source */
if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
@@ -2214,6 +2325,19 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
/* vector 0 interrupt is shared with reset and mailbox source events.*/
switch (event_cause) {
+ case HCLGE_VECTOR0_EVENT_ERR:
+ /* we do not know what type of reset is required now. This could
+ * only be decided after we fetch the type of errors which
+ * caused this event. Therefore, we will do below for now:
+ * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
+ * have defered type of reset to be used.
+ * 2. Schedule the reset serivce task.
+ * 3. When service task receives HNAE3_UNKNOWN_RESET type it
+ * will fetch the correct type of reset. This would be done
+ * by first decoding the types of errors.
+ */
+ set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
+ /* fall through */
case HCLGE_VECTOR0_EVENT_RST:
hclge_reset_task_schedule(hdev);
break;
@@ -2308,21 +2432,56 @@ static int hclge_notify_client(struct hclge_dev *hdev,
int ret;
ret = client->ops->reset_notify(handle, type);
- if (ret)
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "notify nic client failed %d(%d)\n", type, ret);
return ret;
+ }
}
return 0;
}
+static int hclge_notify_roce_client(struct hclge_dev *hdev,
+ enum hnae3_reset_notify_type type)
+{
+ struct hnae3_client *client = hdev->roce_client;
+ int ret = 0;
+ u16 i;
+
+ if (!client)
+ return 0;
+
+ if (!client->ops->reset_notify)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
+ struct hnae3_handle *handle = &hdev->vport[i].roce;
+
+ ret = client->ops->reset_notify(handle, type);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "notify roce client failed %d(%d)",
+ type, ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
static int hclge_reset_wait(struct hclge_dev *hdev)
{
#define HCLGE_RESET_WATI_MS 100
-#define HCLGE_RESET_WAIT_CNT 5
+#define HCLGE_RESET_WAIT_CNT 200
u32 val, reg, reg_bit;
u32 cnt = 0;
switch (hdev->reset_type) {
+ case HNAE3_IMP_RESET:
+ reg = HCLGE_GLOBAL_RESET_REG;
+ reg_bit = HCLGE_IMP_RESET_BIT;
+ break;
case HNAE3_GLOBAL_RESET:
reg = HCLGE_GLOBAL_RESET_REG;
reg_bit = HCLGE_GLOBAL_RESET_BIT;
@@ -2335,6 +2494,8 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
reg = HCLGE_FUN_RST_ING;
reg_bit = HCLGE_FUN_RST_ING_B;
break;
+ case HNAE3_FLR_RESET:
+ break;
default:
dev_err(&hdev->pdev->dev,
"Wait for unsupported reset type: %d\n",
@@ -2342,6 +2503,20 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
return -EINVAL;
}
+ if (hdev->reset_type == HNAE3_FLR_RESET) {
+ while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
+ cnt++ < HCLGE_RESET_WAIT_CNT)
+ msleep(HCLGE_RESET_WATI_MS);
+
+ if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
+ dev_err(&hdev->pdev->dev,
+ "flr wait timeout: %d\n", cnt);
+ return -EBUSY;
+ }
+
+ return 0;
+ }
+
val = hclge_read_dev(&hdev->hw, reg);
while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
msleep(HCLGE_RESET_WATI_MS);
@@ -2358,6 +2533,55 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
return 0;
}
+static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
+{
+ struct hclge_vf_rst_cmd *req;
+ struct hclge_desc desc;
+
+ req = (struct hclge_vf_rst_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
+ req->dest_vfid = func_id;
+
+ if (reset)
+ req->vf_rst = 0x1;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
+{
+ int i;
+
+ for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+ int ret;
+
+ /* Send cmd to set/clear VF's FUNC_RST_ING */
+ ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set vf(%d) rst failed %d!\n",
+ vport->vport_id, ret);
+ return ret;
+ }
+
+ if (!reset)
+ continue;
+
+ /* Inform VF to process the reset.
+ * hclge_inform_reset_assert_to_vf may fail if VF
+ * driver is not loaded.
+ */
+ ret = hclge_inform_reset_assert_to_vf(vport);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "inform reset to vf(%d) failed %d!\n",
+ vport->vport_id, ret);
+ }
+
+ return 0;
+}
+
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
{
struct hclge_desc desc;
@@ -2396,11 +2620,16 @@ static void hclge_do_reset(struct hclge_dev *hdev)
break;
case HNAE3_FUNC_RESET:
dev_info(&pdev->dev, "PF Reset requested\n");
- hclge_func_reset_cmd(hdev, 0);
/* schedule again to check later */
set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
hclge_reset_task_schedule(hdev);
break;
+ case HNAE3_FLR_RESET:
+ dev_info(&pdev->dev, "FLR requested\n");
+ /* schedule again to check later */
+ set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
+ hclge_reset_task_schedule(hdev);
+ break;
default:
dev_warn(&pdev->dev,
"Unsupported reset type: %d\n", hdev->reset_type);
@@ -2413,21 +2642,46 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
{
enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
+ /* first, resolve any unknown reset type to the known type(s) */
+ if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
+ /* we will intentionally ignore any errors from this function
+ * as we will end up in *some* reset request in any case
+ */
+ hclge_handle_hw_msix_error(hdev, addr);
+ clear_bit(HNAE3_UNKNOWN_RESET, addr);
+ /* We defered the clearing of the error event which caused
+ * interrupt since it was not posssible to do that in
+ * interrupt context (and this is the reason we introduced
+ * new UNKNOWN reset type). Now, the errors have been
+ * handled and cleared in hardware we can safely enable
+ * interrupts. This is an exception to the norm.
+ */
+ hclge_enable_vector(&hdev->misc_vector, true);
+ }
+
/* return the highest priority reset level amongst all */
- if (test_bit(HNAE3_GLOBAL_RESET, addr))
+ if (test_bit(HNAE3_IMP_RESET, addr)) {
+ rst_level = HNAE3_IMP_RESET;
+ clear_bit(HNAE3_IMP_RESET, addr);
+ clear_bit(HNAE3_GLOBAL_RESET, addr);
+ clear_bit(HNAE3_CORE_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
rst_level = HNAE3_GLOBAL_RESET;
- else if (test_bit(HNAE3_CORE_RESET, addr))
+ clear_bit(HNAE3_GLOBAL_RESET, addr);
+ clear_bit(HNAE3_CORE_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_CORE_RESET, addr)) {
rst_level = HNAE3_CORE_RESET;
- else if (test_bit(HNAE3_IMP_RESET, addr))
- rst_level = HNAE3_IMP_RESET;
- else if (test_bit(HNAE3_FUNC_RESET, addr))
+ clear_bit(HNAE3_CORE_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
rst_level = HNAE3_FUNC_RESET;
-
- /* now, clear all other resets */
- clear_bit(HNAE3_GLOBAL_RESET, addr);
- clear_bit(HNAE3_CORE_RESET, addr);
- clear_bit(HNAE3_IMP_RESET, addr);
- clear_bit(HNAE3_FUNC_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_FLR_RESET, addr)) {
+ rst_level = HNAE3_FLR_RESET;
+ clear_bit(HNAE3_FLR_RESET, addr);
+ }
return rst_level;
}
@@ -2457,39 +2711,209 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
hclge_enable_vector(&hdev->misc_vector, true);
}
+static int hclge_reset_prepare_down(struct hclge_dev *hdev)
+{
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_FUNC_RESET:
+ /* fall through */
+ case HNAE3_FLR_RESET:
+ ret = hclge_set_all_vf_rst(hdev, true);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
+{
+ u32 reg_val;
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_FUNC_RESET:
+ /* There is no mechanism for PF to know if VF has stopped IO
+ * for now, just wait 100 ms for VF to stop IO
+ */
+ msleep(100);
+ ret = hclge_func_reset_cmd(hdev, 0);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "asserting function reset fail %d!\n", ret);
+ return ret;
+ }
+
+ /* After performaning pf reset, it is not necessary to do the
+ * mailbox handling or send any command to firmware, because
+ * any mailbox handling or command to firmware is only valid
+ * after hclge_cmd_init is called.
+ */
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ break;
+ case HNAE3_FLR_RESET:
+ /* There is no mechanism for PF to know if VF has stopped IO
+ * for now, just wait 100 ms for VF to stop IO
+ */
+ msleep(100);
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ break;
+ case HNAE3_IMP_RESET:
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
+ BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
+ break;
+ default:
+ break;
+ }
+
+ dev_info(&hdev->pdev->dev, "prepare wait ok\n");
+
+ return ret;
+}
+
+static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
+{
+#define MAX_RESET_FAIL_CNT 5
+#define RESET_UPGRADE_DELAY_SEC 10
+
+ if (hdev->reset_pending) {
+ dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
+ hdev->reset_pending);
+ return true;
+ } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
+ (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
+ BIT(HCLGE_IMP_RESET_BIT))) {
+ dev_info(&hdev->pdev->dev,
+ "reset failed because IMP Reset is pending\n");
+ hclge_clear_reset_cause(hdev);
+ return false;
+ } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
+ hdev->reset_fail_cnt++;
+ if (is_timeout) {
+ set_bit(hdev->reset_type, &hdev->reset_pending);
+ dev_info(&hdev->pdev->dev,
+ "re-schedule to wait for hw reset done\n");
+ return true;
+ }
+
+ dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
+ hclge_clear_reset_cause(hdev);
+ mod_timer(&hdev->reset_timer,
+ jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
+
+ return false;
+ }
+
+ hclge_clear_reset_cause(hdev);
+ dev_err(&hdev->pdev->dev, "Reset fail!\n");
+ return false;
+}
+
+static int hclge_reset_prepare_up(struct hclge_dev *hdev)
+{
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_FUNC_RESET:
+ /* fall through */
+ case HNAE3_FLR_RESET:
+ ret = hclge_set_all_vf_rst(hdev, false);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
static void hclge_reset(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- struct hnae3_handle *handle;
+ bool is_timeout = false;
+ int ret;
/* Initialize ae_dev reset status as well, in case enet layer wants to
* know if device is undergoing reset
*/
ae_dev->reset_type = hdev->reset_type;
+ hdev->reset_count++;
/* perform reset of the stack & ae device for a client */
- handle = &hdev->vport[0].nic;
+ ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ goto err_reset;
+
+ ret = hclge_reset_prepare_down(hdev);
+ if (ret)
+ goto err_reset;
+
rtnl_lock();
- hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
rtnl_unlock();
- if (!hclge_reset_wait(hdev)) {
- rtnl_lock();
- hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
- hclge_reset_ae_dev(hdev->ae_dev);
- hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ ret = hclge_reset_prepare_wait(hdev);
+ if (ret)
+ goto err_reset;
- hclge_clear_reset_cause(hdev);
- } else {
- rtnl_lock();
- /* schedule again to check pending resets later */
- set_bit(hdev->reset_type, &hdev->reset_pending);
- hclge_reset_task_schedule(hdev);
+ if (hclge_reset_wait(hdev)) {
+ is_timeout = true;
+ goto err_reset;
}
- hclge_notify_client(hdev, HNAE3_UP_CLIENT);
- handle->last_reset_time = jiffies;
+ ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ goto err_reset;
+
+ rtnl_lock();
+ ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_reset_ae_dev(hdev->ae_dev);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ hclge_clear_reset_cause(hdev);
+
+ ret = hclge_reset_prepare_up(hdev);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
rtnl_unlock();
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ goto err_reset;
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ goto err_reset;
+
+ hdev->last_reset_time = jiffies;
+ hdev->reset_fail_cnt = 0;
ae_dev->reset_type = HNAE3_NONE_RESET;
+
+ return;
+
+err_reset_lock:
+ rtnl_unlock();
+err_reset:
+ if (hclge_reset_err_handle(hdev, is_timeout))
+ hclge_reset_task_schedule(hdev);
}
static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
@@ -2515,20 +2939,42 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
if (!handle)
handle = &hdev->vport[0].nic;
- if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
+ if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
return;
- else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
- handle->reset_level = HNAE3_FUNC_RESET;
+ else if (hdev->default_reset_request)
+ hdev->reset_level =
+ hclge_get_reset_level(hdev,
+ &hdev->default_reset_request);
+ else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
+ hdev->reset_level = HNAE3_FUNC_RESET;
dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
- handle->reset_level);
+ hdev->reset_level);
/* request reset & schedule reset task */
- set_bit(handle->reset_level, &hdev->reset_request);
+ set_bit(hdev->reset_level, &hdev->reset_request);
hclge_reset_task_schedule(hdev);
- if (handle->reset_level < HNAE3_GLOBAL_RESET)
- handle->reset_level++;
+ if (hdev->reset_level < HNAE3_GLOBAL_RESET)
+ hdev->reset_level++;
+}
+
+static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+
+ set_bit(rst_type, &hdev->default_reset_request);
+}
+
+static void hclge_reset_timer(struct timer_list *t)
+{
+ struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
+
+ dev_info(&hdev->pdev->dev,
+ "triggering global reset in reset timer\n");
+ set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
+ hclge_reset_event(hdev->pdev, NULL);
}
static void hclge_reset_subtask(struct hclge_dev *hdev)
@@ -2542,6 +2988,7 @@ static void hclge_reset_subtask(struct hclge_dev *hdev)
* b. else, we can come back later to check this status so re-sched
* now.
*/
+ hdev->last_reset_time = jiffies;
hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
if (hdev->reset_type != HNAE3_NONE_RESET)
hclge_reset(hdev);
@@ -2584,6 +3031,23 @@ static void hclge_mailbox_service_task(struct work_struct *work)
clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
}
+static void hclge_update_vport_alive(struct hclge_dev *hdev)
+{
+ int i;
+
+ /* start from vport 1 for PF is always alive */
+ for (i = 1; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+
+ if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+
+ /* If vf is not alive, set to default value */
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ vport->mps = HCLGE_MAC_DEFAULT_FRAME;
+ }
+}
+
static void hclge_service_task(struct work_struct *work)
{
struct hclge_dev *hdev =
@@ -2596,6 +3060,7 @@ static void hclge_service_task(struct work_struct *work)
hclge_update_speed_duplex(hdev);
hclge_update_link_status(hdev);
+ hclge_update_vport_alive(hdev);
hclge_service_complete(hdev);
}
@@ -4212,6 +4677,13 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
u16 tqps;
+ if (vf > hdev->num_req_vfs) {
+ dev_err(&hdev->pdev->dev,
+ "Error: vf id (%d) > max vf num (%d)\n",
+ vf, hdev->num_req_vfs);
+ return -EINVAL;
+ }
+
dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
@@ -4222,13 +4694,6 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
return -EINVAL;
}
- if (vf > hdev->num_req_vfs) {
- dev_err(&hdev->pdev->dev,
- "Error: vf id (%d) > max vf num (%d)\n",
- vf, hdev->num_req_vfs);
- return -EINVAL;
- }
-
action = HCLGE_FD_ACTION_ACCEPT_PACKET;
q_index = ring;
}
@@ -4336,8 +4801,16 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
struct hlist_node *node;
int ret;
+ /* Return ok here, because reset error handling will check this
+ * return value. If error is returned here, the reset process will
+ * fail.
+ */
if (!hnae3_dev_fd_supported(hdev))
- return -EOPNOTSUPP;
+ return 0;
+
+ /* if fd is disabled, should not restore it when reset */
+ if (!hdev->fd_cfg.fd_en)
+ return 0;
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
@@ -4592,6 +5065,31 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
return 0;
}
+static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
+ hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
+}
+
+static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+}
+
+static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hdev->reset_count;
+}
+
static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -4801,19 +5299,28 @@ static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
}
}
-static int hclge_ae_start(struct hnae3_handle *handle)
+static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int i;
- for (i = 0; i < vport->alloc_tqps; i++)
- hclge_tqp_enable(hdev, i, 0, true);
+ if (enable) {
+ mod_timer(&hdev->service_timer, jiffies + HZ);
+ } else {
+ del_timer_sync(&hdev->service_timer);
+ cancel_work_sync(&hdev->service_task);
+ clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
+ }
+}
+
+static int hclge_ae_start(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
/* mac enable */
hclge_cfg_mac_mode(hdev, true);
clear_bit(HCLGE_STATE_DOWN, &hdev->state);
- mod_timer(&hdev->service_timer, jiffies + HZ);
hdev->hw.mac.link = 0;
/* reset tqp stats */
@@ -4832,17 +5339,17 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
- del_timer_sync(&hdev->service_timer);
- cancel_work_sync(&hdev->service_task);
- clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
-
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+ /* If it is not PF reset, the firmware will disable the MAC,
+ * so it only need to stop phy here.
+ */
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
+ hdev->reset_type != HNAE3_FUNC_RESET) {
hclge_mac_stop_phy(hdev);
return;
}
- for (i = 0; i < vport->alloc_tqps; i++)
- hclge_tqp_enable(hdev, i, 0, false);
+ for (i = 0; i < handle->kinfo.num_tqps; i++)
+ hclge_reset_tqp(handle, i);
/* Mac disable */
hclge_cfg_mac_mode(hdev, false);
@@ -4851,11 +5358,35 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
/* reset tqp stats */
hclge_reset_tqp_stats(handle);
- del_timer_sync(&hdev->service_timer);
- cancel_work_sync(&hdev->service_task);
hclge_update_link_status(hdev);
}
+int hclge_vport_start(struct hclge_vport *vport)
+{
+ set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ vport->last_active_jiffies = jiffies;
+ return 0;
+}
+
+void hclge_vport_stop(struct hclge_vport *vport)
+{
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+}
+
+static int hclge_client_start(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_vport_start(vport);
+}
+
+static void hclge_client_stop(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ hclge_vport_stop(vport);
+}
+
static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
u16 cmdq_resp, u8 resp_code,
enum hclge_mac_vlan_tbl_opcode op)
@@ -6003,54 +6534,76 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
return hclge_set_vlan_rx_offload_cfg(vport);
}
-static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
+static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
{
struct hclge_config_max_frm_size_cmd *req;
struct hclge_desc desc;
- int max_frm_size;
- int ret;
-
- max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
-
- if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
- max_frm_size > HCLGE_MAC_MAX_FRAME)
- return -EINVAL;
-
- max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
req = (struct hclge_config_max_frm_size_cmd *)desc.data;
- req->max_frm_size = cpu_to_le16(max_frm_size);
+ req->max_frm_size = cpu_to_le16(new_mps);
req->min_frm_size = HCLGE_MAC_MIN_FRAME;
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
- else
- hdev->mps = max_frm_size;
-
- return ret;
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
}
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
{
struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_set_vport_mtu(vport, new_mtu);
+}
+
+int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
+{
struct hclge_dev *hdev = vport->back;
- int ret;
+ int i, max_frm_size, ret = 0;
+
+ max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
+ if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
+ max_frm_size > HCLGE_MAC_MAX_FRAME)
+ return -EINVAL;
+
+ max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
+ mutex_lock(&hdev->vport_lock);
+ /* VF's mps must fit within hdev->mps */
+ if (vport->vport_id && max_frm_size > hdev->mps) {
+ mutex_unlock(&hdev->vport_lock);
+ return -EINVAL;
+ } else if (vport->vport_id) {
+ vport->mps = max_frm_size;
+ mutex_unlock(&hdev->vport_lock);
+ return 0;
+ }
+
+ /* PF's mps must be greater then VF's mps */
+ for (i = 1; i < hdev->num_alloc_vport; i++)
+ if (max_frm_size < hdev->vport[i].mps) {
+ mutex_unlock(&hdev->vport_lock);
+ return -EINVAL;
+ }
+
+ hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
- ret = hclge_set_mac_mtu(hdev, new_mtu);
+ ret = hclge_set_mac_mtu(hdev, max_frm_size);
if (ret) {
dev_err(&hdev->pdev->dev,
"Change mtu fail, ret =%d\n", ret);
- return ret;
+ goto out;
}
+ hdev->mps = max_frm_size;
+ vport->mps = max_frm_size;
+
ret = hclge_buffer_alloc(hdev);
if (ret)
dev_err(&hdev->pdev->dev,
"Allocate buffer fail, ret =%d\n", ret);
+out:
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ mutex_unlock(&hdev->vport_lock);
return ret;
}
@@ -6098,8 +6651,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
}
-static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
- u16 queue_id)
+u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
{
struct hnae3_queue *queue;
struct hclge_tqp *tqp;
@@ -6250,7 +6802,7 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev)
if (!phydev->link || !phydev->autoneg)
return 0;
- local_advertising = ethtool_adv_to_lcl_adv_t(phydev->advertising);
+ local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
if (phydev->pause)
remote_advertising = LPA_PAUSE_CAP;
@@ -6612,6 +7164,8 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
if (hdev->service_timer.function)
del_timer_sync(&hdev->service_timer);
+ if (hdev->reset_timer.function)
+ del_timer_sync(&hdev->reset_timer);
if (hdev->service_task.func)
cancel_work_sync(&hdev->service_task);
if (hdev->rst_service_task.func)
@@ -6620,6 +7174,34 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
cancel_work_sync(&hdev->mbx_service_task);
}
+static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
+{
+#define HCLGE_FLR_WAIT_MS 100
+#define HCLGE_FLR_WAIT_CNT 50
+ struct hclge_dev *hdev = ae_dev->priv;
+ int cnt = 0;
+
+ clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+ set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
+ hclge_reset_event(hdev->pdev, NULL);
+
+ while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
+ cnt++ < HCLGE_FLR_WAIT_CNT)
+ msleep(HCLGE_FLR_WAIT_MS);
+
+ if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
+ dev_err(&hdev->pdev->dev,
+ "flr wait down timeout: %d\n", cnt);
+}
+
+static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+
+ set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+}
+
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
@@ -6635,7 +7217,11 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->pdev = pdev;
hdev->ae_dev = ae_dev;
hdev->reset_type = HNAE3_NONE_RESET;
+ hdev->reset_level = HNAE3_FUNC_RESET;
ae_dev->priv = hdev;
+ hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
+
+ mutex_init(&hdev->vport_lock);
ret = hclge_pci_init(hdev);
if (ret) {
@@ -6727,6 +7313,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
+ ret = hclge_config_gro(hdev, true);
+ if (ret)
+ goto err_mdiobus_unreg;
+
ret = hclge_init_vlan_config(hdev);
if (ret) {
dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
@@ -6762,13 +7352,14 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
ret = hclge_hw_error_set_state(hdev, true);
if (ret) {
dev_err(&pdev->dev,
- "hw error interrupts enable failed, ret =%d\n", ret);
+ "fail(%d) to enable hw error interrupts\n", ret);
goto err_mdiobus_unreg;
}
hclge_dcb_ops_set(hdev);
timer_setup(&hdev->service_timer, hclge_service_timer, 0);
+ timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
INIT_WORK(&hdev->service_task, hclge_service_task);
INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
@@ -6779,6 +7370,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_enable_vector(&hdev->misc_vector, true);
hclge_state_init(hdev);
+ hdev->last_reset_time = jiffies;
pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
return 0;
@@ -6806,6 +7398,17 @@ static void hclge_stats_clear(struct hclge_dev *hdev)
memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
}
+static void hclge_reset_vport_state(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ hclge_vport_start(vport);
+ vport++;
+ }
+}
+
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct hclge_dev *hdev = ae_dev->priv;
@@ -6823,19 +7426,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
- ret = hclge_get_cap(hdev);
- if (ret) {
- dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
- ret);
- return ret;
- }
-
- ret = hclge_configure(hdev);
- if (ret) {
- dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
- return ret;
- }
-
ret = hclge_map_tqp(hdev);
if (ret) {
dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
@@ -6856,6 +7446,10 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = hclge_config_gro(hdev, true);
+ if (ret)
+ return ret;
+
ret = hclge_init_vlan_config(hdev);
if (ret) {
dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
@@ -6881,11 +7475,17 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
- /* Re-enable the TM hw error interrupts because
- * they get disabled on core/global reset.
+ /* Re-enable the hw error interrupts because
+ * the interrupts get disabled on core/global reset.
*/
- if (hclge_enable_tm_hw_error(hdev, true))
- dev_err(&pdev->dev, "failed to enable TM hw error interrupts\n");
+ ret = hclge_hw_error_set_state(hdev, true);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fail(%d) to re-enable HNS hw error interrupts\n", ret);
+ return ret;
+ }
+
+ hclge_reset_vport_state(hdev);
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -6913,6 +7513,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_destroy_cmd_queue(&hdev->hw);
hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev);
+ mutex_destroy(&hdev->vport_lock);
ae_dev->priv = NULL;
}
@@ -7166,8 +7767,15 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
return 0;
}
+#define MAX_SEPARATE_NUM 4
+#define SEPARATOR_VALUE 0xFFFFFFFF
+#define REG_NUM_PER_LINE 4
+#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
+
static int hclge_get_regs_len(struct hnae3_handle *handle)
{
+ int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
u32 regs_num_32_bit, regs_num_64_bit;
@@ -7180,15 +7788,25 @@ static int hclge_get_regs_len(struct hnae3_handle *handle)
return -EOPNOTSUPP;
}
- return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
+ cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
+ common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
+ ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
+ tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
+
+ return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
+ tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
+ regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
}
static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
void *data)
{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
u32 regs_num_32_bit, regs_num_64_bit;
+ int i, j, reg_um, separator_num;
+ u32 *reg = data;
int ret;
*version = hdev->fw_version;
@@ -7200,16 +7818,53 @@ static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
return;
}
- ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data);
+ /* fetching per-PF registers valus from PF PCIe register space */
+ reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+
+ reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+
+ reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ for (j = 0; j < kinfo->num_tqps; j++) {
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclge_read_dev(&hdev->hw,
+ ring_reg_addr_list[i] +
+ 0x200 * j);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+ }
+
+ reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ for (j = 0; j < hdev->num_msi_used - 1; j++) {
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclge_read_dev(&hdev->hw,
+ tqp_intr_reg_addr_list[i] +
+ 4 * j);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+ }
+
+ /* fetching PF common registers values from firmware */
+ ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
if (ret) {
dev_err(&hdev->pdev->dev,
"Get 32 bit register failed, ret = %d.\n", ret);
return;
}
- data = (u32 *)data + regs_num_32_bit;
- ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit,
- data);
+ reg += regs_num_32_bit;
+ ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
if (ret)
dev_err(&hdev->pdev->dev,
"Get 64 bit register failed, ret = %d.\n", ret);
@@ -7272,9 +7927,19 @@ static void hclge_get_link_mode(struct hnae3_handle *handle,
}
}
+static int hclge_gro_en(struct hnae3_handle *handle, int enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hclge_config_gro(hdev, enable);
+}
+
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
+ .flr_prepare = hclge_flr_prepare,
+ .flr_done = hclge_flr_done,
.init_client_instance = hclge_init_client_instance,
.uninit_client_instance = hclge_uninit_client_instance,
.map_ring_to_vector = hclge_map_ring_to_vector,
@@ -7285,6 +7950,8 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_loopback = hclge_set_loopback,
.start = hclge_ae_start,
.stop = hclge_ae_stop,
+ .client_start = hclge_client_start,
+ .client_stop = hclge_client_stop,
.get_status = hclge_get_status,
.get_ksettings_an_result = hclge_get_ksettings_an_result,
.update_speed_duplex_h = hclge_update_speed_duplex_h,
@@ -7321,6 +7988,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
.reset_event = hclge_reset_event,
+ .set_default_reset_request = hclge_set_def_reset_request,
.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
.set_channels = hclge_set_channels,
.get_channels = hclge_get_channels,
@@ -7336,7 +8004,14 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fd_all_rules = hclge_get_all_rules,
.restore_fd_rules = hclge_restore_fd_entries,
.enable_fd = hclge_enable_fd,
- .process_hw_error = hclge_process_ras_hw_error,
+ .dbg_run_cmd = hclge_dbg_run_cmd,
+ .handle_hw_ras_error = hclge_handle_hw_ras_error,
+ .get_hw_reset_stat = hclge_get_hw_reset_stat,
+ .ae_dev_resetting = hclge_ae_dev_resetting,
+ .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
+ .set_gro_en = hclge_gro_en,
+ .get_global_queue_id = hclge_covert_handle_qid_global,
+ .set_timer_task = hclge_set_timer_task,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 0d9215404269..6615b85a1c52 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -28,6 +28,62 @@
#define HCLGE_VECTOR_REG_OFFSET 0x4
#define HCLGE_VECTOR_VF_OFFSET 0x100000
+#define HCLGE_CMDQ_TX_ADDR_L_REG 0x27000
+#define HCLGE_CMDQ_TX_ADDR_H_REG 0x27004
+#define HCLGE_CMDQ_TX_DEPTH_REG 0x27008
+#define HCLGE_CMDQ_TX_TAIL_REG 0x27010
+#define HCLGE_CMDQ_TX_HEAD_REG 0x27014
+#define HCLGE_CMDQ_RX_ADDR_L_REG 0x27018
+#define HCLGE_CMDQ_RX_ADDR_H_REG 0x2701C
+#define HCLGE_CMDQ_RX_DEPTH_REG 0x27020
+#define HCLGE_CMDQ_RX_TAIL_REG 0x27024
+#define HCLGE_CMDQ_RX_HEAD_REG 0x27028
+#define HCLGE_CMDQ_INTR_SRC_REG 0x27100
+#define HCLGE_CMDQ_INTR_STS_REG 0x27104
+#define HCLGE_CMDQ_INTR_EN_REG 0x27108
+#define HCLGE_CMDQ_INTR_GEN_REG 0x2710C
+
+/* bar registers for common func */
+#define HCLGE_VECTOR0_OTER_EN_REG 0x20600
+#define HCLGE_RAS_OTHER_STS_REG 0x20B00
+#define HCLGE_FUNC_RESET_STS_REG 0x20C00
+#define HCLGE_GRO_EN_REG 0x28000
+
+/* bar registers for rcb */
+#define HCLGE_RING_RX_ADDR_L_REG 0x80000
+#define HCLGE_RING_RX_ADDR_H_REG 0x80004
+#define HCLGE_RING_RX_BD_NUM_REG 0x80008
+#define HCLGE_RING_RX_BD_LENGTH_REG 0x8000C
+#define HCLGE_RING_RX_MERGE_EN_REG 0x80014
+#define HCLGE_RING_RX_TAIL_REG 0x80018
+#define HCLGE_RING_RX_HEAD_REG 0x8001C
+#define HCLGE_RING_RX_FBD_NUM_REG 0x80020
+#define HCLGE_RING_RX_OFFSET_REG 0x80024
+#define HCLGE_RING_RX_FBD_OFFSET_REG 0x80028
+#define HCLGE_RING_RX_STASH_REG 0x80030
+#define HCLGE_RING_RX_BD_ERR_REG 0x80034
+#define HCLGE_RING_TX_ADDR_L_REG 0x80040
+#define HCLGE_RING_TX_ADDR_H_REG 0x80044
+#define HCLGE_RING_TX_BD_NUM_REG 0x80048
+#define HCLGE_RING_TX_PRIORITY_REG 0x8004C
+#define HCLGE_RING_TX_TC_REG 0x80050
+#define HCLGE_RING_TX_MERGE_EN_REG 0x80054
+#define HCLGE_RING_TX_TAIL_REG 0x80058
+#define HCLGE_RING_TX_HEAD_REG 0x8005C
+#define HCLGE_RING_TX_FBD_NUM_REG 0x80060
+#define HCLGE_RING_TX_OFFSET_REG 0x80064
+#define HCLGE_RING_TX_EBD_NUM_REG 0x80068
+#define HCLGE_RING_TX_EBD_OFFSET_REG 0x80070
+#define HCLGE_RING_TX_BD_ERR_REG 0x80074
+#define HCLGE_RING_EN_REG 0x80090
+
+/* bar registers for tqp interrupt */
+#define HCLGE_TQP_INTR_CTRL_REG 0x20000
+#define HCLGE_TQP_INTR_GL0_REG 0x20100
+#define HCLGE_TQP_INTR_GL1_REG 0x20200
+#define HCLGE_TQP_INTR_GL2_REG 0x20300
+#define HCLGE_TQP_INTR_RL_REG 0x20900
+
#define HCLGE_RSS_IND_TBL_SIZE 512
#define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0)
#define HCLGE_RSS_KEY_SIZE 40
@@ -97,11 +153,13 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0)
/* Reset related Registers */
+#define HCLGE_PF_OTHER_INT_REG 0x20600
#define HCLGE_MISC_RESET_STS_REG 0x20700
#define HCLGE_MISC_VECTOR_INT_STS 0x20800
#define HCLGE_GLOBAL_RESET_REG 0x20A00
#define HCLGE_GLOBAL_RESET_BIT 0
#define HCLGE_CORE_RESET_BIT 1
+#define HCLGE_IMP_RESET_BIT 2
#define HCLGE_FUN_RST_ING 0x20C00
#define HCLGE_FUN_RST_ING_B 0
@@ -115,8 +173,10 @@ enum HLCGE_PORT_TYPE {
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
+#define HCLGE_VECTOR0_IMP_RESET_INT_B 1
+
#define HCLGE_MAC_DEFAULT_FRAME \
- (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
+ (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
#define HCLGE_MAC_MIN_FRAME 64
#define HCLGE_MAC_MAX_FRAME 9728
@@ -145,12 +205,14 @@ enum HCLGE_DEV_STATE {
enum hclge_evt_cause {
HCLGE_VECTOR0_EVENT_RST,
HCLGE_VECTOR0_EVENT_MBX,
+ HCLGE_VECTOR0_EVENT_ERR,
HCLGE_VECTOR0_EVENT_OTHER,
};
#define HCLGE_MPF_ENBALE 1
enum HCLGE_MAC_SPEED {
+ HCLGE_MAC_SPEED_UNKNOWN = 0, /* unknown */
HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */
HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */
HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */
@@ -593,10 +655,16 @@ struct hclge_dev {
struct hclge_misc_vector misc_vector;
struct hclge_hw_stats hw_stats;
unsigned long state;
+ unsigned long flr_state;
+ unsigned long last_reset_time;
enum hnae3_reset_type reset_type;
+ enum hnae3_reset_type reset_level;
+ unsigned long default_reset_request;
unsigned long reset_request; /* reset has been requested */
unsigned long reset_pending; /* client rst is pending to be served */
+ unsigned long reset_count; /* the number of reset has been done */
+ u32 reset_fail_cnt;
u32 fw_version;
u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
u16 num_tqps; /* Num task queue pairs of this PF */
@@ -614,6 +682,7 @@ struct hclge_dev {
u8 hw_tc_map;
u8 tc_num_last_time;
enum hclge_fc_mode fc_mode_last_time;
+ u8 support_sfp_query;
#define HCLGE_FLAG_TC_BASE_SCH_MODE 1
#define HCLGE_FLAG_VNET_BASE_SCH_MODE 2
@@ -644,6 +713,7 @@ struct hclge_dev {
unsigned long service_timer_period;
unsigned long service_timer_previous;
struct timer_list service_timer;
+ struct timer_list reset_timer;
struct work_struct service_task;
struct work_struct rst_service_task;
struct work_struct mbx_service_task;
@@ -666,7 +736,12 @@ struct hclge_dev {
u32 flag;
u32 pkt_buf_size; /* Total pf buf size for tx/rx */
+ u32 tx_buf_size; /* Tx buffer size for each TC */
+ u32 dv_buf_size; /* Dv buffer size for each TC */
+
u32 mps; /* Max packet size */
+ /* vport_lock protect resource shared by vports */
+ struct mutex vport_lock;
struct hclge_vlan_type_cfg vlan_type_cfg;
@@ -717,6 +792,11 @@ struct hclge_rss_tuple_cfg {
u8 ipv6_fragment_en;
};
+enum HCLGE_VPORT_STATE {
+ HCLGE_VPORT_STATE_ALIVE,
+ HCLGE_VPORT_STATE_MAX
+};
+
struct hclge_vport {
u16 alloc_tqps; /* Allocated Tx/Rx queues */
@@ -742,6 +822,10 @@ struct hclge_vport {
struct hclge_dev *back; /* Back reference to associated dev */
struct hnae3_handle nic;
struct hnae3_handle roce;
+
+ unsigned long state;
+ unsigned long last_active_jiffies;
+ u32 mps; /* Max packet size */
};
void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
@@ -768,6 +852,12 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue)
return tqp->index;
}
+static inline bool hclge_is_reset_pending(struct hclge_dev *hdev)
+{
+ return !!hdev->reset_pending;
+}
+
+int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill);
@@ -777,9 +867,15 @@ int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
+int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
void hclge_mbx_handler(struct hclge_dev *hdev);
int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
int hclge_cfg_flowctrl(struct hclge_dev *hdev);
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
+int hclge_vport_start(struct hclge_vport *vport);
+void hclge_vport_stop(struct hclge_vport *vport);
+int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
+int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf);
+u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index f890022938d9..a1de451a85df 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -79,15 +79,26 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
return status;
}
-static int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
+int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
{
+ struct hclge_dev *hdev = vport->back;
+ enum hnae3_reset_type reset_type;
u8 msg_data[2];
u8 dest_vfid;
dest_vfid = (u8)vport->vport_id;
+ if (hdev->reset_type == HNAE3_FUNC_RESET)
+ reset_type = HNAE3_VF_PF_FUNC_RESET;
+ else if (hdev->reset_type == HNAE3_FLR_RESET)
+ reset_type = HNAE3_VF_FULL_RESET;
+ else
+ return -EINVAL;
+
+ memcpy(&msg_data[0], &reset_type, sizeof(u16));
+
/* send this requested info to VF */
- return hclge_send_mbx_msg(vport, msg_data, sizeof(u8),
+ return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
HCLGE_MBX_ASSERTING_RESET, dest_vfid);
}
@@ -290,6 +301,21 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
return status;
}
+static int hclge_set_vf_alive(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+ bool alive = !!mbx_req->msg[2];
+ int ret = 0;
+
+ if (alive)
+ ret = hclge_vport_start(vport);
+ else
+ hclge_vport_stop(vport);
+
+ return ret;
+}
+
static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
@@ -363,24 +389,41 @@ static void hclge_reset_vf(struct hclge_vport *vport,
int ret;
dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!",
- mbx_req->mbx_src_vfid);
-
- /* Acknowledge VF that PF is now about to assert the reset for the VF.
- * On receiving this message VF will get into pending state and will
- * start polling for the hardware reset completion status.
- */
- ret = hclge_inform_reset_assert_to_vf(vport);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to inform VF(%d)of reset, reset failed!\n",
- ret, vport->vport_id);
- return;
- }
+ vport->vport_id);
- dev_warn(&hdev->pdev->dev, "PF is now resetting VF %d.\n",
- mbx_req->mbx_src_vfid);
- /* reset this virtual function */
- hclge_func_reset_cmd(hdev, mbx_req->mbx_src_vfid);
+ ret = hclge_func_reset_cmd(hdev, vport->vport_id);
+ hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
+}
+
+static void hclge_vf_keep_alive(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ vport->last_active_jiffies = jiffies;
+}
+
+static int hclge_set_vf_mtu(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ int ret;
+ u32 mtu;
+
+ memcpy(&mtu, &mbx_req->msg[2], sizeof(mtu));
+ ret = hclge_set_vport_mtu(vport, mtu);
+
+ return hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
+}
+
+static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ u16 queue_id, qid_in_pf;
+ u8 resp_data[2];
+
+ memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
+ qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
+ memcpy(resp_data, &qid_in_pf, sizeof(qid_in_pf));
+
+ return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, 2);
}
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
@@ -460,6 +503,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
"PF failed(%d) to config VF's VLAN\n",
ret);
break;
+ case HCLGE_MBX_SET_ALIVE:
+ ret = hclge_set_vf_alive(vport, req, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to set VF's ALIVE\n",
+ ret);
+ break;
case HCLGE_MBX_GET_QINFO:
ret = hclge_get_vf_queue_info(vport, req, true);
if (ret)
@@ -487,6 +537,22 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
case HCLGE_MBX_RESET:
hclge_reset_vf(vport, req);
break;
+ case HCLGE_MBX_KEEP_ALIVE:
+ hclge_vf_keep_alive(vport, req);
+ break;
+ case HCLGE_MBX_SET_MTU:
+ ret = hclge_set_vf_mtu(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF fail(%d) to set mtu\n", ret);
+ break;
+ case HCLGE_MBX_GET_QID_IN_PF:
+ ret = hclge_get_queue_id_in_pf(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to get qid for VF\n",
+ ret);
+ break;
default:
dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %d\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 03018638f701..dabb8437f8dc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -12,7 +12,7 @@
SUPPORTED_TP | \
PHY_10BT_FEATURES | \
PHY_100BT_FEATURES | \
- PHY_1000BT_FEATURES)
+ SUPPORTED_1000baseT_Full)
enum hclge_mdio_c22_op_seq {
HCLGE_MDIO_C22_WRITE = 1,
@@ -179,6 +179,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
int duplex, speed;
int ret;
+ /* When phy link down, do nothing */
+ if (netdev->phydev->link == 0)
+ return;
+
speed = netdev->phydev->speed;
duplex = netdev->phydev->duplex;
@@ -195,12 +199,13 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev)
{
struct net_device *netdev = hdev->vport[0].nic.netdev;
struct phy_device *phydev = hdev->hw.mac.phydev;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
int ret;
if (!phydev)
return 0;
- phydev->supported &= ~SUPPORTED_FIBRE;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
ret = phy_connect_direct(netdev, phydev,
hclge_mac_adjust_link,
@@ -210,7 +215,15 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev)
return ret;
}
- phydev->supported &= HCLGE_PHY_SUPPORTED_FEATURES;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask);
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ mask);
+ linkmode_set_bit_array(phy_gbit_features_array,
+ ARRAY_SIZE(phy_gbit_features_array),
+ mask);
+ linkmode_and(phydev->supported, phydev->supported, mask);
phy_support_asym_pause(phydev);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 494e562fe8c7..00458da67503 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -1259,15 +1259,13 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
return 0;
}
-int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
+void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
{
struct hclge_vport *vport = hdev->vport;
struct hnae3_knic_private_info *kinfo;
u32 i, k;
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
- if (prio_tc[i] >= hdev->tm_info.num_tc)
- return -EINVAL;
hdev->tm_info.prio_tc[i] = prio_tc[i];
for (k = 0; k < hdev->num_alloc_vport; k++) {
@@ -1275,18 +1273,12 @@ int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
kinfo->prio_tc[i] = prio_tc[i];
}
}
- return 0;
}
-int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
+void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
{
u8 i, bit_map = 0;
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- if (num_tc > hdev->vport[i].alloc_tqps)
- return -EINVAL;
- }
-
hdev->tm_info.num_tc = num_tc;
for (i = 0; i < hdev->tm_info.num_tc; i++)
@@ -1300,8 +1292,6 @@ int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
hdev->hw_tc_map = bit_map;
hclge_tm_schd_info_init(hdev);
-
- return 0;
}
int hclge_tm_init_hw(struct hclge_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 25eef13a3e14..b6496a439304 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -40,6 +40,13 @@ struct hclge_nq_to_qs_link_cmd {
__le16 qset_id;
};
+struct hclge_tqp_tx_queue_tc_cmd {
+ __le16 queue_id;
+ __le16 rsvd;
+ u8 tc_id;
+ u8 rev[3];
+};
+
struct hclge_pg_weight_cmd {
u8 pg_id;
u8 dwrr;
@@ -55,6 +62,12 @@ struct hclge_qs_weight_cmd {
u8 dwrr;
};
+struct hclge_ets_tc_weight_cmd {
+ u8 tc_weight[HNAE3_MAX_TC];
+ u8 weight_offset;
+ u8 rsvd[15];
+};
+
#define HCLGE_TM_SHAP_IR_B_MSK GENMASK(7, 0)
#define HCLGE_TM_SHAP_IR_B_LSH 0
#define HCLGE_TM_SHAP_IR_U_MSK GENMASK(11, 8)
@@ -131,8 +144,8 @@ struct hclge_port_shapping_cmd {
int hclge_tm_schd_init(struct hclge_dev *hdev);
int hclge_pause_setup_hw(struct hclge_dev *hdev);
int hclge_tm_schd_mode_hw(struct hclge_dev *hdev);
-int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc);
-int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
+void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc);
+void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
int hclge_tm_map_cfg(struct hclge_dev *hdev);
int hclge_tm_init_hw(struct hclge_dev *hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 0d3b445f6799..d5765c8cf3a3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -72,6 +72,45 @@ static bool hclgevf_is_special_opcode(u16 opcode)
return false;
}
+static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
+{
+ struct hclgevf_dev *hdev = ring->dev;
+ struct hclgevf_hw *hw = &hdev->hw;
+ u32 reg_val;
+
+ if (ring->flag == HCLGEVF_TYPE_CSQ) {
+ reg_val = (u32)ring->desc_dma_addr;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
+ reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
+
+ reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
+
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
+ } else {
+ reg_val = (u32)ring->desc_dma_addr;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
+ reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
+
+ reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
+
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
+ }
+}
+
+static void hclgevf_cmd_init_regs(struct hclgevf_hw *hw)
+{
+ hclgevf_cmd_config_regs(&hw->cmq.csq);
+ hclgevf_cmd_config_regs(&hw->cmq.crq);
+}
+
static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
{
int size = ring->desc_num * sizeof(struct hclgevf_desc);
@@ -96,61 +135,23 @@ static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
}
}
-static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
- struct hclgevf_cmq_ring *ring)
+static int hclgevf_alloc_cmd_queue(struct hclgevf_dev *hdev, int ring_type)
{
struct hclgevf_hw *hw = &hdev->hw;
- int ring_type = ring->flag;
- u32 reg_val;
+ struct hclgevf_cmq_ring *ring =
+ (ring_type == HCLGEVF_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
int ret;
- ring->desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
- spin_lock_init(&ring->lock);
- ring->next_to_clean = 0;
- ring->next_to_use = 0;
ring->dev = hdev;
+ ring->flag = ring_type;
/* allocate CSQ/CRQ descriptor */
ret = hclgevf_alloc_cmd_desc(ring);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
(ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
- return ret;
- }
- /* initialize the hardware registers with csq/crq dma-address,
- * descriptor number, head & tail pointers
- */
- switch (ring_type) {
- case HCLGEVF_TYPE_CSQ:
- reg_val = (u32)ring->desc_dma_addr;
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
- reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
-
- reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
- reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
-
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
- return 0;
- case HCLGEVF_TYPE_CRQ:
- reg_val = (u32)ring->desc_dma_addr;
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
- reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
-
- reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
- reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
-
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
- return 0;
- default:
- return -EINVAL;
- }
+ return ret;
}
void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
@@ -188,7 +189,8 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
spin_lock_bh(&hw->cmq.csq.lock);
- if (num > hclgevf_ring_space(&hw->cmq.csq)) {
+ if (num > hclgevf_ring_space(&hw->cmq.csq) ||
+ test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
spin_unlock_bh(&hw->cmq.csq.lock);
return -EBUSY;
}
@@ -282,55 +284,83 @@ static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
return status;
}
-int hclgevf_cmd_init(struct hclgevf_dev *hdev)
+int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev)
{
- u32 version;
int ret;
- /* setup Tx write back timeout */
+ /* Setup the lock for command queue */
+ spin_lock_init(&hdev->hw.cmq.csq.lock);
+ spin_lock_init(&hdev->hw.cmq.crq.lock);
+
hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
+ hdev->hw.cmq.csq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
+ hdev->hw.cmq.crq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
- /* setup queue CSQ/CRQ rings */
- hdev->hw.cmq.csq.flag = HCLGEVF_TYPE_CSQ;
- ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.csq);
+ ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CSQ);
if (ret) {
dev_err(&hdev->pdev->dev,
- "failed(%d) to initialize CSQ ring\n", ret);
+ "CSQ ring setup error %d\n", ret);
return ret;
}
- hdev->hw.cmq.crq.flag = HCLGEVF_TYPE_CRQ;
- ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.crq);
+ ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CRQ);
if (ret) {
dev_err(&hdev->pdev->dev,
- "failed(%d) to initialize CRQ ring\n", ret);
+ "CRQ ring setup error %d\n", ret);
goto err_csq;
}
+ return 0;
+err_csq:
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
+ return ret;
+}
+
+int hclgevf_cmd_init(struct hclgevf_dev *hdev)
+{
+ u32 version;
+ int ret;
+
+ spin_lock_bh(&hdev->hw.cmq.csq.lock);
+ spin_lock_bh(&hdev->hw.cmq.crq.lock);
+
/* initialize the pointers of async rx queue of mailbox */
hdev->arq.hdev = hdev;
hdev->arq.head = 0;
hdev->arq.tail = 0;
hdev->arq.count = 0;
+ hdev->hw.cmq.csq.next_to_clean = 0;
+ hdev->hw.cmq.csq.next_to_use = 0;
+ hdev->hw.cmq.crq.next_to_clean = 0;
+ hdev->hw.cmq.crq.next_to_use = 0;
+
+ hclgevf_cmd_init_regs(&hdev->hw);
+
+ spin_unlock_bh(&hdev->hw.cmq.crq.lock);
+ spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+
+ clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+
+ /* Check if there is new reset pending, because the higher level
+ * reset may happen when lower level reset is being processed.
+ */
+ if (hclgevf_is_reset_pending(hdev)) {
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ return -EBUSY;
+ }
/* get firmware version */
ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to query firmware version\n", ret);
- goto err_crq;
+ return ret;
}
hdev->fw_version = version;
dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
return 0;
-err_crq:
- hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
-err_csq:
- hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
-
- return ret;
}
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index bc294b0c8b62..47030b42341f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -87,6 +87,8 @@ enum hclgevf_opcode_type {
HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
+ /* GRO command */
+ HCLGEVF_OPC_GRO_GENERIC_CONFIG = 0x0C10,
/* RSS cmd */
HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01,
HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02,
@@ -149,6 +151,12 @@ struct hclgevf_query_res_cmd {
__le16 rsv[7];
};
+#define HCLGEVF_GRO_EN_B 0
+struct hclgevf_cfg_gro_status_cmd {
+ __le16 gro_en;
+ u8 rsv[22];
+};
+
#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4
#define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4
#define HCLGEVF_RSS_HASH_KEY_NUM 16
@@ -256,6 +264,7 @@ static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg)
int hclgevf_cmd_init(struct hclgevf_dev *hdev);
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev);
+int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev);
int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num);
void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 085edb945389..82103d5fa815 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2,6 +2,7 @@
// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/etherdevice.h>
+#include <linux/iopoll.h>
#include <net/rtnetlink.h>
#include "hclgevf_cmd.h"
#include "hclgevf_main.h"
@@ -10,8 +11,7 @@
#define HCLGEVF_NAME "hclgevf"
-static int hclgevf_init_hdev(struct hclgevf_dev *hdev);
-static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev);
+static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
static struct hnae3_ae_algo ae_algovf;
static const struct pci_device_id ae_algovf_pci_tbl[] = {
@@ -23,6 +23,58 @@ static const struct pci_device_id ae_algovf_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
+static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG,
+ HCLGEVF_CMDQ_TX_ADDR_H_REG,
+ HCLGEVF_CMDQ_TX_DEPTH_REG,
+ HCLGEVF_CMDQ_TX_TAIL_REG,
+ HCLGEVF_CMDQ_TX_HEAD_REG,
+ HCLGEVF_CMDQ_RX_ADDR_L_REG,
+ HCLGEVF_CMDQ_RX_ADDR_H_REG,
+ HCLGEVF_CMDQ_RX_DEPTH_REG,
+ HCLGEVF_CMDQ_RX_TAIL_REG,
+ HCLGEVF_CMDQ_RX_HEAD_REG,
+ HCLGEVF_VECTOR0_CMDQ_SRC_REG,
+ HCLGEVF_CMDQ_INTR_STS_REG,
+ HCLGEVF_CMDQ_INTR_EN_REG,
+ HCLGEVF_CMDQ_INTR_GEN_REG};
+
+static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
+ HCLGEVF_RST_ING,
+ HCLGEVF_GRO_EN_REG};
+
+static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG,
+ HCLGEVF_RING_RX_ADDR_H_REG,
+ HCLGEVF_RING_RX_BD_NUM_REG,
+ HCLGEVF_RING_RX_BD_LENGTH_REG,
+ HCLGEVF_RING_RX_MERGE_EN_REG,
+ HCLGEVF_RING_RX_TAIL_REG,
+ HCLGEVF_RING_RX_HEAD_REG,
+ HCLGEVF_RING_RX_FBD_NUM_REG,
+ HCLGEVF_RING_RX_OFFSET_REG,
+ HCLGEVF_RING_RX_FBD_OFFSET_REG,
+ HCLGEVF_RING_RX_STASH_REG,
+ HCLGEVF_RING_RX_BD_ERR_REG,
+ HCLGEVF_RING_TX_ADDR_L_REG,
+ HCLGEVF_RING_TX_ADDR_H_REG,
+ HCLGEVF_RING_TX_BD_NUM_REG,
+ HCLGEVF_RING_TX_PRIORITY_REG,
+ HCLGEVF_RING_TX_TC_REG,
+ HCLGEVF_RING_TX_MERGE_EN_REG,
+ HCLGEVF_RING_TX_TAIL_REG,
+ HCLGEVF_RING_TX_HEAD_REG,
+ HCLGEVF_RING_TX_FBD_NUM_REG,
+ HCLGEVF_RING_TX_OFFSET_REG,
+ HCLGEVF_RING_TX_EBD_NUM_REG,
+ HCLGEVF_RING_TX_EBD_OFFSET_REG,
+ HCLGEVF_RING_TX_BD_ERR_REG,
+ HCLGEVF_RING_EN_REG};
+
+static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
+ HCLGEVF_TQP_INTR_GL0_REG,
+ HCLGEVF_TQP_INTR_GL1_REG,
+ HCLGEVF_TQP_INTR_GL2_REG,
+ HCLGEVF_TQP_INTR_RL_REG};
+
static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
struct hnae3_handle *handle)
{
@@ -204,17 +256,28 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
return 0;
}
+static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 msg_data[2], resp_data[2];
+ u16 qid_in_pf = 0;
+ int ret;
+
+ memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
+
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data,
+ 2, true, resp_data, 2);
+ if (!ret)
+ qid_in_pf = *(u16 *)resp_data;
+
+ return qid_in_pf;
+}
+
static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
{
struct hclgevf_tqp *tqp;
int i;
- /* if this is on going reset then we need to re-allocate the TPQs
- * since we cannot assume we would get same number of TPQs back from PF
- */
- if (hclgevf_dev_ongoing_reset(hdev))
- devm_kfree(&hdev->pdev->dev, hdev->htqp);
-
hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
sizeof(struct hclgevf_tqp), GFP_KERNEL);
if (!hdev->htqp)
@@ -258,12 +321,6 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
new_tqps = kinfo->rss_size * kinfo->num_tc;
kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
- /* if this is on going reset then we need to re-allocate the hnae queues
- * as well since number of TPQs from PF might have changed.
- */
- if (hclgevf_dev_ongoing_reset(hdev))
- devm_kfree(&hdev->pdev->dev, kinfo->tqp);
-
kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
sizeof(struct hnae3_queue *), GFP_KERNEL);
if (!kinfo->tqp)
@@ -868,6 +925,9 @@ static int hclgevf_unmap_ring_from_vector(
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int ret, vector_id;
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ return 0;
+
vector_id = hclgevf_get_vector_index(hdev, vector);
if (vector_id < 0) {
dev_err(&handle->pdev->dev,
@@ -956,13 +1016,6 @@ static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
return status;
}
-static int hclgevf_get_queue_id(struct hnae3_queue *queue)
-{
- struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
-
- return tqp->index;
-}
-
static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
@@ -1097,38 +1150,87 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
2, true, NULL, 0);
}
+static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu,
+ sizeof(new_mtu), true, NULL, 0);
+}
+
static int hclgevf_notify_client(struct hclgevf_dev *hdev,
enum hnae3_reset_notify_type type)
{
struct hnae3_client *client = hdev->nic_client;
struct hnae3_handle *handle = &hdev->nic;
+ int ret;
if (!client->ops->reset_notify)
return -EOPNOTSUPP;
- return client->ops->reset_notify(handle, type);
+ ret = client->ops->reset_notify(handle, type);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
+ type, ret);
+
+ return ret;
+}
+
+static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+}
+
+static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev,
+ unsigned long delay_us,
+ unsigned long wait_cnt)
+{
+ unsigned long cnt = 0;
+
+ while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
+ cnt++ < wait_cnt)
+ usleep_range(delay_us, delay_us * 2);
+
+ if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
+ dev_err(&hdev->pdev->dev,
+ "flr wait timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
}
static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
{
-#define HCLGEVF_RESET_WAIT_MS 500
-#define HCLGEVF_RESET_WAIT_CNT 20
- u32 val, cnt = 0;
+#define HCLGEVF_RESET_WAIT_US 20000
+#define HCLGEVF_RESET_WAIT_CNT 2000
+#define HCLGEVF_RESET_WAIT_TIMEOUT_US \
+ (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
+
+ u32 val;
+ int ret;
/* wait to check the hardware reset completion status */
- val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
- while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
- (cnt < HCLGEVF_RESET_WAIT_CNT)) {
- msleep(HCLGEVF_RESET_WAIT_MS);
- val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
- cnt++;
- }
+ val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+ dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val);
+
+ if (hdev->reset_type == HNAE3_FLR_RESET)
+ return hclgevf_flr_poll_timeout(hdev,
+ HCLGEVF_RESET_WAIT_US,
+ HCLGEVF_RESET_WAIT_CNT);
+
+ ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val,
+ !(val & HCLGEVF_RST_ING_BITS),
+ HCLGEVF_RESET_WAIT_US,
+ HCLGEVF_RESET_WAIT_TIMEOUT_US);
/* hardware completion status should be available by this time */
- if (cnt >= HCLGEVF_RESET_WAIT_CNT) {
- dev_warn(&hdev->pdev->dev,
- "could'nt get reset done status from h/w, timeout!\n");
- return -EBUSY;
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "could'nt get reset done status from h/w, timeout!\n");
+ return ret;
}
/* we will wait a bit more to let reset of the stack to complete. This
@@ -1145,10 +1247,12 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
int ret;
/* uninitialize the nic client */
- hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
/* re-initialize the hclge device */
- ret = hclgevf_init_hdev(hdev);
+ ret = hclgevf_reset_hdev(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
"hclge device re-init failed, VF is disabled!\n");
@@ -1156,22 +1260,60 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
}
/* bring up the nic client again */
- hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
return 0;
}
+static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
+{
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_VF_FUNC_RESET:
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
+ 0, true, NULL, sizeof(u8));
+ break;
+ case HNAE3_FLR_RESET:
+ set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ break;
+ default:
+ break;
+ }
+
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+
+ dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n",
+ hdev->reset_type, ret);
+
+ return ret;
+}
+
static int hclgevf_reset(struct hclgevf_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
int ret;
+ /* Initialize ae_dev reset status as well, in case enet layer wants to
+ * know if device is undergoing reset
+ */
+ ae_dev->reset_type = hdev->reset_type;
+ hdev->reset_count++;
rtnl_lock();
/* bring down the nic to stop any ongoing TX/RX */
- hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ goto err_reset_lock;
rtnl_unlock();
+ ret = hclgevf_reset_prepare_wait(hdev);
+ if (ret)
+ goto err_reset;
+
/* check if VF could successfully fetch the hardware reset completion
* status from the hardware
*/
@@ -1181,58 +1323,121 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
dev_err(&hdev->pdev->dev,
"VF failed(=%d) to fetch H/W reset completion status\n",
ret);
-
- dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n");
- rtnl_lock();
- hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
-
- rtnl_unlock();
- return ret;
+ goto err_reset;
}
rtnl_lock();
/* now, re-initialize the nic client and ae device*/
ret = hclgevf_reset_stack(hdev);
- if (ret)
+ if (ret) {
dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
+ goto err_reset_lock;
+ }
/* bring up the nic to enable TX/RX again */
- hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ goto err_reset_lock;
rtnl_unlock();
+ hdev->last_reset_time = jiffies;
+ ae_dev->reset_type = HNAE3_NONE_RESET;
+
return ret;
-}
+err_reset_lock:
+ rtnl_unlock();
+err_reset:
+ /* When VF reset failed, only the higher level reset asserted by PF
+ * can restore it, so re-initialize the command queue to receive
+ * this higher reset event.
+ */
+ hclgevf_cmd_init(hdev);
+ dev_err(&hdev->pdev->dev, "failed to reset VF\n");
-static int hclgevf_do_reset(struct hclgevf_dev *hdev)
-{
- int status;
- u8 respmsg;
+ return ret;
+}
- status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
- 0, false, &respmsg, sizeof(u8));
- if (status)
- dev_err(&hdev->pdev->dev,
- "VF reset request to PF failed(=%d)\n", status);
+static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
+ unsigned long *addr)
+{
+ enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
+
+ /* return the highest priority reset level amongst all */
+ if (test_bit(HNAE3_VF_RESET, addr)) {
+ rst_level = HNAE3_VF_RESET;
+ clear_bit(HNAE3_VF_RESET, addr);
+ clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
+ rst_level = HNAE3_VF_FULL_RESET;
+ clear_bit(HNAE3_VF_FULL_RESET, addr);
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
+ rst_level = HNAE3_VF_PF_FUNC_RESET;
+ clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
+ rst_level = HNAE3_VF_FUNC_RESET;
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_FLR_RESET, addr)) {
+ rst_level = HNAE3_FLR_RESET;
+ clear_bit(HNAE3_FLR_RESET, addr);
+ }
- return status;
+ return rst_level;
}
static void hclgevf_reset_event(struct pci_dev *pdev,
struct hnae3_handle *handle)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ struct hclgevf_dev *hdev = ae_dev->priv;
dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
- handle->reset_level = HNAE3_VF_RESET;
+ if (hdev->default_reset_request)
+ hdev->reset_level =
+ hclgevf_get_reset_level(hdev,
+ &hdev->default_reset_request);
+ else
+ hdev->reset_level = HNAE3_VF_FUNC_RESET;
/* reset of this VF requested */
set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
- handle->last_reset_time = jiffies;
+ hdev->last_reset_time = jiffies;
+}
+
+static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ set_bit(rst_type, &hdev->default_reset_request);
+}
+
+static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev)
+{
+#define HCLGEVF_FLR_WAIT_MS 100
+#define HCLGEVF_FLR_WAIT_CNT 50
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int cnt = 0;
+
+ clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+ set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
+ hclgevf_reset_event(hdev->pdev, NULL);
+
+ while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
+ cnt++ < HCLGEVF_FLR_WAIT_CNT)
+ msleep(HCLGEVF_FLR_WAIT_MS);
+
+ if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
+ dev_err(&hdev->pdev->dev,
+ "flr wait down timeout: %d\n", cnt);
}
static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
@@ -1321,9 +1526,15 @@ static void hclgevf_reset_service_task(struct work_struct *work)
*/
hdev->reset_attempts = 0;
- ret = hclgevf_reset(hdev);
- if (ret)
- dev_err(&hdev->pdev->dev, "VF stack reset failed.\n");
+ hdev->last_reset_time = jiffies;
+ while ((hdev->reset_type =
+ hclgevf_get_reset_level(hdev, &hdev->reset_pending))
+ != HNAE3_NONE_RESET) {
+ ret = hclgevf_reset(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF stack reset failed %d.\n", ret);
+ }
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) {
/* we could be here when either of below happens:
@@ -1352,19 +1563,17 @@ static void hclgevf_reset_service_task(struct work_struct *work)
*/
if (hdev->reset_attempts > 3) {
/* prepare for full reset of stack + pcie interface */
- hdev->nic.reset_level = HNAE3_VF_FULL_RESET;
+ set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
/* "defer" schedule the reset task again */
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
} else {
hdev->reset_attempts++;
- /* request PF for resetting this VF via mailbox */
- ret = hclgevf_do_reset(hdev);
- if (ret)
- dev_warn(&hdev->pdev->dev,
- "VF rst fail, stack will call\n");
+ set_bit(hdev->reset_level, &hdev->reset_pending);
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
}
+ hclgevf_reset_task_schedule(hdev);
}
clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
@@ -1386,6 +1595,28 @@ static void hclgevf_mailbox_service_task(struct work_struct *work)
clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
}
+static void hclgevf_keep_alive_timer(struct timer_list *t)
+{
+ struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer);
+
+ schedule_work(&hdev->keep_alive_task);
+ mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
+}
+
+static void hclgevf_keep_alive_task(struct work_struct *work)
+{
+ struct hclgevf_dev *hdev;
+ u8 respmsg;
+ int ret;
+
+ hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
+ 0, false, &respmsg, sizeof(u8));
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF sends keep alive cmd failed(=%d)\n", ret);
+}
+
static void hclgevf_service_task(struct work_struct *work)
{
struct hclgevf_dev *hdev;
@@ -1407,24 +1638,37 @@ static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
}
-static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
+static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
+ u32 *clearval)
{
- u32 cmdq_src_reg;
+ u32 cmdq_src_reg, rst_ing_reg;
/* fetch the events from their corresponding regs */
cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
HCLGEVF_VECTOR0_CMDQ_SRC_REG);
+ if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) {
+ rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+ dev_info(&hdev->pdev->dev,
+ "receive reset interrupt 0x%x!\n", rst_ing_reg);
+ set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B);
+ *clearval = cmdq_src_reg;
+ return HCLGEVF_VECTOR0_EVENT_RST;
+ }
+
/* check for vector0 mailbox(=CMDQ RX) event source */
if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
*clearval = cmdq_src_reg;
- return true;
+ return HCLGEVF_VECTOR0_EVENT_MBX;
}
dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
- return false;
+ return HCLGEVF_VECTOR0_EVENT_OTHER;
}
static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
@@ -1434,19 +1678,28 @@ static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
{
+ enum hclgevf_evt_cause event_cause;
struct hclgevf_dev *hdev = data;
u32 clearval;
hclgevf_enable_vector(&hdev->misc_vector, false);
- if (!hclgevf_check_event_cause(hdev, &clearval))
- goto skip_sched;
-
- hclgevf_mbx_handler(hdev);
+ event_cause = hclgevf_check_evt_cause(hdev, &clearval);
- hclgevf_clear_event_cause(hdev, clearval);
+ switch (event_cause) {
+ case HCLGEVF_VECTOR0_EVENT_RST:
+ hclgevf_reset_task_schedule(hdev);
+ break;
+ case HCLGEVF_VECTOR0_EVENT_MBX:
+ hclgevf_mbx_handler(hdev);
+ break;
+ default:
+ break;
+ }
-skip_sched:
- hclgevf_enable_vector(&hdev->misc_vector, true);
+ if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
+ hclgevf_clear_event_cause(hdev, clearval);
+ hclgevf_enable_vector(&hdev->misc_vector, true);
+ }
return IRQ_HANDLED;
}
@@ -1468,7 +1721,7 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
- struct hclgevf_dev *hdev = ae_dev->priv;
+ struct hclgevf_dev *hdev;
hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
if (!hdev)
@@ -1504,6 +1757,29 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
return 0;
}
+static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
+{
+ struct hclgevf_cfg_gro_status_cmd *req;
+ struct hclgevf_desc desc;
+ int ret;
+
+ if (!hnae3_dev_gro_supported(hdev))
+ return 0;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG,
+ false);
+ req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
+
+ req->gro_en = cpu_to_le16(en ? 1 : 0);
+
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF GRO hardware config cmd failed, ret = %d.\n", ret);
+
+ return ret;
+}
+
static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
{
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
@@ -1564,23 +1840,22 @@ static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
false);
}
-static int hclgevf_ae_start(struct hnae3_handle *handle)
+static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- int i, queue_id;
- for (i = 0; i < kinfo->num_tqps; i++) {
- /* ring enable */
- queue_id = hclgevf_get_queue_id(kinfo->tqp[i]);
- if (queue_id < 0) {
- dev_warn(&hdev->pdev->dev,
- "Get invalid queue id, ignore it\n");
- continue;
- }
-
- hclgevf_tqp_enable(hdev, queue_id, 0, true);
+ if (enable) {
+ mod_timer(&hdev->service_timer, jiffies + HZ);
+ } else {
+ del_timer_sync(&hdev->service_timer);
+ cancel_work_sync(&hdev->service_task);
+ clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
}
+}
+
+static int hclgevf_ae_start(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle);
@@ -1588,45 +1863,59 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
hclgevf_request_link_info(hdev);
clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
- mod_timer(&hdev->service_timer, jiffies + HZ);
return 0;
}
static void hclgevf_ae_stop(struct hnae3_handle *handle)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- int i, queue_id;
+ int i;
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
- for (i = 0; i < kinfo->num_tqps; i++) {
- /* Ring disable */
- queue_id = hclgevf_get_queue_id(kinfo->tqp[i]);
- if (queue_id < 0) {
- dev_warn(&hdev->pdev->dev,
- "Get invalid queue id, ignore it\n");
- continue;
- }
-
- hclgevf_tqp_enable(hdev, queue_id, 0, false);
- }
+ for (i = 0; i < handle->kinfo.num_tqps; i++)
+ hclgevf_reset_tqp(handle, i);
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle);
- del_timer_sync(&hdev->service_timer);
- cancel_work_sync(&hdev->service_task);
- clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
hclgevf_update_link_status(hdev, 0);
}
-static void hclgevf_state_init(struct hclgevf_dev *hdev)
+static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
{
- /* if this is on going reset then skip this initialization */
- if (hclgevf_dev_ongoing_reset(hdev))
- return;
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 msg_data;
+
+ msg_data = alive ? 1 : 0;
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE,
+ 0, &msg_data, 1, false, NULL, 0);
+}
+
+static int hclgevf_client_start(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
+ return hclgevf_set_alive(handle, true);
+}
+
+static void hclgevf_client_stop(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret;
+
+ ret = hclgevf_set_alive(handle, false);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "%s failed %d\n", __func__, ret);
+ del_timer_sync(&hdev->keep_alive_timer);
+ cancel_work_sync(&hdev->keep_alive_task);
+}
+
+static void hclgevf_state_init(struct hclgevf_dev *hdev)
+{
/* setup tasks for the MBX */
INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
@@ -1668,10 +1957,6 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
int vectors;
int i;
- /* if this is on going reset then skip this initialization */
- if (hclgevf_dev_ongoing_reset(hdev))
- return 0;
-
if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
vectors = pci_alloc_irq_vectors(pdev,
hdev->roce_base_msix_offset + 1,
@@ -1710,6 +1995,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(int), GFP_KERNEL);
if (!hdev->vector_irq) {
+ devm_kfree(&pdev->dev, hdev->vector_status);
pci_free_irq_vectors(pdev);
return -ENOMEM;
}
@@ -1721,6 +2007,8 @@ static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
+ devm_kfree(&pdev->dev, hdev->vector_status);
+ devm_kfree(&pdev->dev, hdev->vector_irq);
pci_free_irq_vectors(pdev);
}
@@ -1728,10 +2016,6 @@ static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
{
int ret = 0;
- /* if this is on going reset then skip this initialization */
- if (hclgevf_dev_ongoing_reset(hdev))
- return 0;
-
hclgevf_get_misc_vector(hdev);
ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
@@ -1861,14 +2145,6 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
struct hclgevf_hw *hw;
int ret;
- /* check if we need to skip initialization of pci. This will happen if
- * device is undergoing VF reset. Otherwise, we would need to
- * re-initialize pci interface again i.e. when device is not going
- * through *any* reset or actually undergoing full reset.
- */
- if (hclgevf_dev_ongoing_reset(hdev))
- return 0;
-
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "failed to enable PCI device\n");
@@ -1957,23 +2233,98 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
return 0;
}
-static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret = 0;
+
+ if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
+ test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+ hclgevf_misc_irq_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+ clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+ }
+
+ if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+ pci_set_master(pdev);
+ ret = hclgevf_init_msi(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed(%d) to init MSI/MSI-X\n", ret);
+ return ret;
+ }
+
+ ret = hclgevf_misc_irq_init(hdev);
+ if (ret) {
+ hclgevf_uninit_msi(hdev);
+ dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
+ ret);
+ return ret;
+ }
+
+ set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+ }
+
+ return ret;
+}
+
+static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
int ret;
- /* check if device is on-going full reset(i.e. pcie as well) */
- if (hclgevf_dev_ongoing_full_reset(hdev)) {
- dev_warn(&pdev->dev, "device is going full reset\n");
- hclgevf_uninit_hdev(hdev);
+ ret = hclgevf_pci_reset(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "pci reset failed %d\n", ret);
+ return ret;
+ }
+
+ ret = hclgevf_cmd_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "cmd failed %d\n", ret);
+ return ret;
}
+ ret = hclgevf_rss_init_hw(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize RSS\n", ret);
+ return ret;
+ }
+
+ ret = hclgevf_config_gro(hdev, true);
+ if (ret)
+ return ret;
+
+ ret = hclgevf_init_vlan_config(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize VLAN config\n", ret);
+ return ret;
+ }
+
+ dev_info(&hdev->pdev->dev, "Reset done\n");
+
+ return 0;
+}
+
+static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
ret = hclgevf_pci_init(hdev);
if (ret) {
dev_err(&pdev->dev, "PCI initialization failed\n");
return ret;
}
+ ret = hclgevf_cmd_queue_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret);
+ goto err_cmd_queue_init;
+ }
+
ret = hclgevf_cmd_init(hdev);
if (ret)
goto err_cmd_init;
@@ -1983,16 +2334,17 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret) {
dev_err(&hdev->pdev->dev,
"Query vf status error, ret = %d.\n", ret);
- goto err_query_vf;
+ goto err_cmd_init;
}
ret = hclgevf_init_msi(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
- goto err_query_vf;
+ goto err_cmd_init;
}
hclgevf_state_init(hdev);
+ hdev->reset_level = HNAE3_VF_FUNC_RESET;
ret = hclgevf_misc_irq_init(hdev);
if (ret) {
@@ -2001,6 +2353,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_misc_irq_init;
}
+ set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+
ret = hclgevf_configure(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
@@ -2019,6 +2373,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
+ ret = hclgevf_config_gro(hdev, true);
+ if (ret)
+ goto err_config;
+
/* Initialize RSS for this VF */
ret = hclgevf_rss_init_hw(hdev);
if (ret) {
@@ -2034,6 +2392,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
+ hdev->last_reset_time = jiffies;
pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
return 0;
@@ -2043,25 +2402,31 @@ err_config:
err_misc_irq_init:
hclgevf_state_uninit(hdev);
hclgevf_uninit_msi(hdev);
-err_query_vf:
- hclgevf_cmd_uninit(hdev);
err_cmd_init:
+ hclgevf_cmd_uninit(hdev);
+err_cmd_queue_init:
hclgevf_pci_uninit(hdev);
+ clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
return ret;
}
static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
{
hclgevf_state_uninit(hdev);
- hclgevf_misc_irq_uninit(hdev);
- hclgevf_cmd_uninit(hdev);
- hclgevf_uninit_msi(hdev);
+
+ if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+ hclgevf_misc_irq_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+ }
+
hclgevf_pci_uninit(hdev);
+ hclgevf_cmd_uninit(hdev);
}
static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
+ struct hclgevf_dev *hdev;
int ret;
ret = hclgevf_alloc_hdev(ae_dev);
@@ -2071,10 +2436,16 @@ static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
}
ret = hclgevf_init_hdev(ae_dev->priv);
- if (ret)
+ if (ret) {
dev_err(&pdev->dev, "hclge device initialization failed\n");
+ return ret;
+ }
- return ret;
+ hdev = ae_dev->priv;
+ timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0);
+ INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task);
+
+ return 0;
}
static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
@@ -2151,6 +2522,13 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
hdev->hw.mac.duplex = duplex;
}
+static int hclgevf_gro_en(struct hnae3_handle *handle, int enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_config_gro(hdev, enable);
+}
+
static void hclgevf_get_media_type(struct hnae3_handle *handle,
u8 *media_type)
{
@@ -2159,13 +2537,104 @@ static void hclgevf_get_media_type(struct hnae3_handle *handle,
*media_type = hdev->hw.mac.media_type;
}
+static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+}
+
+static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+}
+
+static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hdev->reset_count;
+}
+
+#define MAX_SEPARATE_NUM 4
+#define SEPARATOR_VALUE 0xFFFFFFFF
+#define REG_NUM_PER_LINE 4
+#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
+
+static int hclgevf_get_regs_len(struct hnae3_handle *handle)
+{
+ int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
+ common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
+ ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
+ tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
+
+ return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps +
+ tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE;
+}
+
+static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int i, j, reg_um, separator_num;
+ u32 *reg = data;
+
+ *version = hdev->fw_version;
+
+ /* fetching per-VF registers values from VF PCIe register space */
+ reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+
+ reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+
+ reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ for (j = 0; j < hdev->num_tqps; j++) {
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw,
+ ring_reg_addr_list[i] +
+ 0x200 * j);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+ }
+
+ reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ for (j = 0; j < hdev->num_msi_used - 1; j++) {
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw,
+ tqp_intr_reg_addr_list[i] +
+ 4 * j);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+ }
+}
+
static const struct hnae3_ae_ops hclgevf_ops = {
.init_ae_dev = hclgevf_init_ae_dev,
.uninit_ae_dev = hclgevf_uninit_ae_dev,
+ .flr_prepare = hclgevf_flr_prepare,
+ .flr_done = hclgevf_flr_done,
.init_client_instance = hclgevf_init_client_instance,
.uninit_client_instance = hclgevf_uninit_client_instance,
.start = hclgevf_ae_start,
.stop = hclgevf_ae_stop,
+ .client_start = hclgevf_client_start,
+ .client_stop = hclgevf_client_stop,
.map_ring_to_vector = hclgevf_map_ring_to_vector,
.unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
.get_vector = hclgevf_get_vector,
@@ -2193,11 +2662,21 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.set_vlan_filter = hclgevf_set_vlan_filter,
.enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
.reset_event = hclgevf_reset_event,
+ .set_default_reset_request = hclgevf_set_def_reset_request,
.get_channels = hclgevf_get_channels,
.get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
+ .get_regs_len = hclgevf_get_regs_len,
+ .get_regs = hclgevf_get_regs,
.get_status = hclgevf_get_status,
.get_ksettings_an_result = hclgevf_get_ksettings_an_result,
.get_media_type = hclgevf_get_media_type,
+ .get_hw_reset_stat = hclgevf_get_hw_reset_stat,
+ .ae_dev_resetting = hclgevf_ae_dev_resetting,
+ .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
+ .set_gro_en = hclgevf_gro_en,
+ .set_mtu = hclgevf_set_mtu,
+ .get_global_queue_id = hclgevf_get_qid_global,
+ .set_timer_task = hclgevf_set_timer_task,
};
static struct hnae3_ae_algo ae_algovf = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index aed241e8ffab..787bc06944e5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -27,15 +27,77 @@
#define HCLGEVF_VECTOR_REG_OFFSET 0x4
#define HCLGEVF_VECTOR_VF_OFFSET 0x100000
+/* bar registers for cmdq */
+#define HCLGEVF_CMDQ_TX_ADDR_L_REG 0x27000
+#define HCLGEVF_CMDQ_TX_ADDR_H_REG 0x27004
+#define HCLGEVF_CMDQ_TX_DEPTH_REG 0x27008
+#define HCLGEVF_CMDQ_TX_TAIL_REG 0x27010
+#define HCLGEVF_CMDQ_TX_HEAD_REG 0x27014
+#define HCLGEVF_CMDQ_RX_ADDR_L_REG 0x27018
+#define HCLGEVF_CMDQ_RX_ADDR_H_REG 0x2701C
+#define HCLGEVF_CMDQ_RX_DEPTH_REG 0x27020
+#define HCLGEVF_CMDQ_RX_TAIL_REG 0x27024
+#define HCLGEVF_CMDQ_RX_HEAD_REG 0x27028
+#define HCLGEVF_CMDQ_INTR_SRC_REG 0x27100
+#define HCLGEVF_CMDQ_INTR_STS_REG 0x27104
+#define HCLGEVF_CMDQ_INTR_EN_REG 0x27108
+#define HCLGEVF_CMDQ_INTR_GEN_REG 0x2710C
+
+/* bar registers for common func */
+#define HCLGEVF_GRO_EN_REG 0x28000
+
+/* bar registers for rcb */
+#define HCLGEVF_RING_RX_ADDR_L_REG 0x80000
+#define HCLGEVF_RING_RX_ADDR_H_REG 0x80004
+#define HCLGEVF_RING_RX_BD_NUM_REG 0x80008
+#define HCLGEVF_RING_RX_BD_LENGTH_REG 0x8000C
+#define HCLGEVF_RING_RX_MERGE_EN_REG 0x80014
+#define HCLGEVF_RING_RX_TAIL_REG 0x80018
+#define HCLGEVF_RING_RX_HEAD_REG 0x8001C
+#define HCLGEVF_RING_RX_FBD_NUM_REG 0x80020
+#define HCLGEVF_RING_RX_OFFSET_REG 0x80024
+#define HCLGEVF_RING_RX_FBD_OFFSET_REG 0x80028
+#define HCLGEVF_RING_RX_STASH_REG 0x80030
+#define HCLGEVF_RING_RX_BD_ERR_REG 0x80034
+#define HCLGEVF_RING_TX_ADDR_L_REG 0x80040
+#define HCLGEVF_RING_TX_ADDR_H_REG 0x80044
+#define HCLGEVF_RING_TX_BD_NUM_REG 0x80048
+#define HCLGEVF_RING_TX_PRIORITY_REG 0x8004C
+#define HCLGEVF_RING_TX_TC_REG 0x80050
+#define HCLGEVF_RING_TX_MERGE_EN_REG 0x80054
+#define HCLGEVF_RING_TX_TAIL_REG 0x80058
+#define HCLGEVF_RING_TX_HEAD_REG 0x8005C
+#define HCLGEVF_RING_TX_FBD_NUM_REG 0x80060
+#define HCLGEVF_RING_TX_OFFSET_REG 0x80064
+#define HCLGEVF_RING_TX_EBD_NUM_REG 0x80068
+#define HCLGEVF_RING_TX_EBD_OFFSET_REG 0x80070
+#define HCLGEVF_RING_TX_BD_ERR_REG 0x80074
+#define HCLGEVF_RING_EN_REG 0x80090
+
+/* bar registers for tqp interrupt */
+#define HCLGEVF_TQP_INTR_CTRL_REG 0x20000
+#define HCLGEVF_TQP_INTR_GL0_REG 0x20100
+#define HCLGEVF_TQP_INTR_GL1_REG 0x20200
+#define HCLGEVF_TQP_INTR_GL2_REG 0x20300
+#define HCLGEVF_TQP_INTR_RL_REG 0x20900
+
/* Vector0 interrupt CMDQ event source register(RW) */
#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
+/* RST register bits for RESET event */
+#define HCLGEVF_VECTOR0_RST_INT_B 2
#define HCLGEVF_TQP_RESET_TRY_TIMES 10
/* Reset related Registers */
-#define HCLGEVF_FUN_RST_ING 0x20C00
-#define HCLGEVF_FUN_RST_ING_B 0
+#define HCLGEVF_RST_ING 0x20C00
+#define HCLGEVF_FUN_RST_ING_BIT BIT(0)
+#define HCLGEVF_GLOBAL_RST_ING_BIT BIT(5)
+#define HCLGEVF_CORE_RST_ING_BIT BIT(6)
+#define HCLGEVF_IMP_RST_ING_BIT BIT(7)
+#define HCLGEVF_RST_ING_BITS \
+ (HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \
+ HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT)
#define HCLGEVF_RSS_IND_TBL_SIZE 512
#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
@@ -54,17 +116,25 @@
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
+enum hclgevf_evt_cause {
+ HCLGEVF_VECTOR0_EVENT_RST,
+ HCLGEVF_VECTOR0_EVENT_MBX,
+ HCLGEVF_VECTOR0_EVENT_OTHER,
+};
+
/* states of hclgevf device & tasks */
enum hclgevf_states {
/* device states */
HCLGEVF_STATE_DOWN,
HCLGEVF_STATE_DISABLED,
+ HCLGEVF_STATE_IRQ_INITED,
/* task states */
HCLGEVF_STATE_SERVICE_SCHED,
HCLGEVF_STATE_RST_SERVICE_SCHED,
HCLGEVF_STATE_RST_HANDLING,
HCLGEVF_STATE_MBX_SERVICE_SCHED,
HCLGEVF_STATE_MBX_HANDLING,
+ HCLGEVF_STATE_CMD_DISABLE,
};
#define HCLGEVF_MPF_ENBALE 1
@@ -145,10 +215,17 @@ struct hclgevf_dev {
struct hclgevf_misc_vector misc_vector;
struct hclgevf_rss_cfg rss_cfg;
unsigned long state;
+ unsigned long flr_state;
+ unsigned long default_reset_request;
+ unsigned long last_reset_time;
+ enum hnae3_reset_type reset_level;
+ unsigned long reset_pending;
+ enum hnae3_reset_type reset_type;
#define HCLGEVF_RESET_REQUESTED 0
#define HCLGEVF_RESET_PENDING 1
unsigned long reset_state; /* requested, pending */
+ unsigned long reset_count; /* the number of reset has been done */
u32 reset_attempts;
u32 fw_version;
@@ -178,7 +255,9 @@ struct hclgevf_dev {
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
struct timer_list service_timer;
+ struct timer_list keep_alive_timer;
struct work_struct service_task;
+ struct work_struct keep_alive_task;
struct work_struct rst_service_task;
struct work_struct mbx_service_task;
@@ -192,18 +271,9 @@ struct hclgevf_dev {
u32 flag;
};
-static inline bool hclgevf_dev_ongoing_reset(struct hclgevf_dev *hdev)
-{
- return (hdev &&
- (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
- (hdev->nic.reset_level == HNAE3_VF_RESET));
-}
-
-static inline bool hclgevf_dev_ongoing_full_reset(struct hclgevf_dev *hdev)
+static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
{
- return (hdev &&
- (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
- (hdev->nic.reset_level == HNAE3_VF_FULL_RESET));
+ return !!hdev->reset_pending;
}
int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index e9d5a4f96304..84653f58b2d1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -26,7 +26,7 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
u8 *resp_data, u16 resp_len)
{
#define HCLGEVF_MAX_TRY_TIMES 500
-#define HCLGEVF_SLEEP_USCOEND 1000
+#define HCLGEVF_SLEEP_USECOND 1000
struct hclgevf_mbx_resp_status *mbx_resp;
u16 r_code0, r_code1;
int i = 0;
@@ -40,7 +40,10 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
}
while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
- udelay(HCLGEVF_SLEEP_USCOEND);
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
+ return -EIO;
+
+ usleep_range(HCLGEVF_SLEEP_USECOND, HCLGEVF_SLEEP_USECOND * 2);
i++;
}
@@ -148,6 +151,11 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
crq = &hdev->hw.cmq.crq;
while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+ dev_info(&hdev->pdev->dev, "vf crq need init\n");
+ return;
+ }
+
desc = &crq->desc[crq->next_to_use];
req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
@@ -233,6 +241,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
{
+ enum hnae3_reset_type reset_type;
u16 link_status;
u16 *msg_q;
u8 duplex;
@@ -248,6 +257,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
/* process all the async queue messages */
while (tail != hdev->arq.head) {
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+ dev_info(&hdev->pdev->dev,
+ "vf crq need init in async\n");
+ return;
+ }
+
msg_q = hdev->arq.msg_q[hdev->arq.head];
switch (msg_q[0]) {
@@ -267,7 +282,8 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
* has been completely reset. After this stack should
* eventually be re-initialized.
*/
- hdev->nic.reset_level = HNAE3_VF_RESET;
+ reset_type = le16_to_cpu(msg_q[1]);
+ set_bit(reset_type, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index 097b5502603f..d1a7d2522d82 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -50,6 +50,8 @@ enum hinic_port_cmd {
HINIC_PORT_CMD_GET_LINK_STATE = 24,
+ HINIC_PORT_CMD_SET_RX_CSUM = 26,
+
HINIC_PORT_CMD_SET_PORT_STATE = 41,
HINIC_PORT_CMD_FWCTXT_INIT = 69,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index f92f1bf3901a..1dfa7eb05c10 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -74,12 +74,6 @@
((void *)((cmdq_pages)->shadow_page_vaddr) \
+ (wq)->block_idx * CMDQ_BLOCK_SIZE)
-#define WQE_PAGE_OFF(wq, idx) (((idx) & ((wq)->num_wqebbs_per_page - 1)) * \
- (wq)->wqebb_size)
-
-#define WQE_PAGE_NUM(wq, idx) (((idx) / ((wq)->num_wqebbs_per_page)) \
- & ((wq)->num_q_pages - 1))
-
#define WQ_PAGE_ADDR(wq, idx) \
((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)])
@@ -93,6 +87,17 @@
(((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \
/ (wq)->max_wqe_size)
+static inline int WQE_PAGE_OFF(struct hinic_wq *wq, u16 idx)
+{
+ return (((idx) & ((wq)->num_wqebbs_per_page - 1))
+ << (wq)->wqebb_size_shift);
+}
+
+static inline int WQE_PAGE_NUM(struct hinic_wq *wq, u16 idx)
+{
+ return (((idx) >> ((wq)->wqebbs_per_page_shift))
+ & ((wq)->num_q_pages - 1));
+}
/**
* queue_alloc_page - allocate page for Queue
* @hwif: HW interface for allocating DMA
@@ -513,10 +518,11 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
struct hinic_hwif *hwif = wqs->hwif;
struct pci_dev *pdev = hwif->pdev;
u16 num_wqebbs_per_page;
+ u16 wqebb_size_shift;
int err;
- if (wqebb_size == 0) {
- dev_err(&pdev->dev, "wqebb_size must be > 0\n");
+ if (!is_power_of_2(wqebb_size)) {
+ dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
return -EINVAL;
}
@@ -530,9 +536,11 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
return -EINVAL;
}
- num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
+ wqebb_size_shift = ilog2(wqebb_size);
+ num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
+ >> wqebb_size_shift;
- if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
+ if (!is_power_of_2(num_wqebbs_per_page)) {
dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
return -EINVAL;
}
@@ -550,7 +558,8 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
wq->q_depth = q_depth;
wq->max_wqe_size = max_wqe_size;
wq->num_wqebbs_per_page = num_wqebbs_per_page;
-
+ wq->wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);
+ wq->wqebb_size_shift = wqebb_size_shift;
wq->block_vaddr = WQ_BASE_VADDR(wqs, wq);
wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq);
wq->block_paddr = WQ_BASE_PADDR(wqs, wq);
@@ -604,11 +613,13 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
u16 q_depth, u16 max_wqe_size)
{
struct pci_dev *pdev = hwif->pdev;
+ u16 num_wqebbs_per_page_shift;
u16 num_wqebbs_per_page;
+ u16 wqebb_size_shift;
int i, j, err = -ENOMEM;
- if (wqebb_size == 0) {
- dev_err(&pdev->dev, "wqebb_size must be > 0\n");
+ if (!is_power_of_2(wqebb_size)) {
+ dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
return -EINVAL;
}
@@ -622,9 +633,11 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
return -EINVAL;
}
- num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
+ wqebb_size_shift = ilog2(wqebb_size);
+ num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
+ >> wqebb_size_shift;
- if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
+ if (!is_power_of_2(num_wqebbs_per_page)) {
dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
return -EINVAL;
}
@@ -636,6 +649,7 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
dev_err(&pdev->dev, "Failed to allocate CMDQ page\n");
return err;
}
+ num_wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);
for (i = 0; i < cmdq_blocks; i++) {
wq[i].hwif = hwif;
@@ -647,7 +661,8 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
wq[i].q_depth = q_depth;
wq[i].max_wqe_size = max_wqe_size;
wq[i].num_wqebbs_per_page = num_wqebbs_per_page;
-
+ wq[i].wqebbs_per_page_shift = num_wqebbs_per_page_shift;
+ wq[i].wqebb_size_shift = wqebb_size_shift;
wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]);
wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]);
wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]);
@@ -741,7 +756,7 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
*prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx));
- num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+ num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) >> wq->wqebb_size_shift;
if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) {
atomic_add(num_wqebbs, &wq->delta);
@@ -795,7 +810,8 @@ void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size)
**/
void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
{
- int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+ int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
+ >> wq->wqebb_size_shift;
atomic_add(num_wqebbs, &wq->cons_idx);
@@ -813,7 +829,8 @@ void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
u16 *cons_idx)
{
- int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+ int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
+ >> wq->wqebb_size_shift;
u16 curr_cons_idx, end_cons_idx;
int curr_pg, end_pg;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
index 9b66545ba563..0a936cd6709b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
@@ -39,7 +39,8 @@ struct hinic_wq {
u16 q_depth;
u16 max_wqe_size;
u16 num_wqebbs_per_page;
-
+ u16 wqebbs_per_page_shift;
+ u16 wqebb_size_shift;
/* The addresses are 64 bit in the HW */
u64 block_paddr;
void **shadow_block_vaddr;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
index 9754d6ed5f4a..138941527872 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
@@ -170,6 +170,10 @@
#define HINIC_RQ_CQE_STATUS_RXDONE_MASK 0x1
+#define HINIC_RQ_CQE_STATUS_CSUM_ERR_SHIFT 0
+
+#define HINIC_RQ_CQE_STATUS_CSUM_ERR_MASK 0xFFFFU
+
#define HINIC_RQ_CQE_STATUS_GET(val, member) \
(((val) >> HINIC_RQ_CQE_STATUS_##member##_SHIFT) & \
HINIC_RQ_CQE_STATUS_##member##_MASK)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index fdf2bdb6b0d0..6d48dc62a44b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -600,9 +600,6 @@ static int add_mac_addr(struct net_device *netdev, const u8 *addr)
u16 vid = 0;
int err;
- if (!is_valid_ether_addr(addr))
- return -EADDRNOTAVAIL;
-
netif_info(nic_dev, drv, netdev, "set mac addr = %02x %02x %02x %02x %02x %02x\n",
addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
@@ -726,6 +723,7 @@ static void set_rx_mode(struct work_struct *work)
{
struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work);
struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work);
+ struct netdev_hw_addr *ha;
netif_info(nic_dev, drv, nic_dev->netdev, "set rx mode work\n");
@@ -733,6 +731,9 @@ static void set_rx_mode(struct work_struct *work)
__dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
__dev_mc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
+
+ netdev_for_each_mc_addr(ha, nic_dev->netdev)
+ add_mac_addr(nic_dev->netdev, ha->addr);
}
static void hinic_set_rx_mode(struct net_device *netdev)
@@ -806,7 +807,8 @@ static const struct net_device_ops hinic_netdev_ops = {
static void netdev_features_init(struct net_device *netdev)
{
netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_RXCSUM;
netdev->vlan_features = netdev->hw_features;
@@ -869,12 +871,16 @@ static int set_features(struct hinic_dev *nic_dev,
netdev_features_t features, bool force_change)
{
netdev_features_t changed = force_change ? ~0 : pre_features ^ features;
+ u32 csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
int err = 0;
if (changed & NETIF_F_TSO)
err = hinic_port_set_tso(nic_dev, (features & NETIF_F_TSO) ?
HINIC_TSO_ENABLE : HINIC_TSO_DISABLE);
+ if (changed & NETIF_F_RXCSUM)
+ err = hinic_set_rx_csum_offload(nic_dev, csum_en);
+
return err;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index 7575a7d3bd9f..122c93597268 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -409,3 +409,33 @@ int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state)
return 0;
}
+
+int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en)
+{
+ struct hinic_checksum_offload rx_csum_cfg = {0};
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif;
+ struct pci_dev *pdev;
+ u16 out_size;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ hwif = hwdev->hwif;
+ pdev = hwif->pdev;
+ rx_csum_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ rx_csum_cfg.rx_csum_offload = en;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_CSUM,
+ &rx_csum_cfg, sizeof(rx_csum_cfg),
+ &rx_csum_cfg, &out_size);
+ if (err || !out_size || rx_csum_cfg.status) {
+ dev_err(&pdev->dev,
+ "Failed to set rx csum offload, ret = %d\n",
+ rx_csum_cfg.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h
index f6e3220fe28f..02d896eed455 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h
@@ -183,6 +183,15 @@ struct hinic_tso_config {
u8 resv2[3];
};
+struct hinic_checksum_offload {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 rx_csum_offload;
+};
int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr,
u16 vlan_id);
@@ -213,4 +222,5 @@ int hinic_port_get_cap(struct hinic_dev *nic_dev,
int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state);
+int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index 4c0f7eda1166..0098b206e7e9 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -43,6 +43,7 @@
#define RX_IRQ_NO_LLI_TIMER 0
#define RX_IRQ_NO_CREDIT 0
#define RX_IRQ_NO_RESEND_TIMER 0
+#define HINIC_RX_BUFFER_WRITE 16
/**
* hinic_rxq_clean_stats - Clean the statistics of specific queue
@@ -89,6 +90,28 @@ static void rxq_stats_init(struct hinic_rxq *rxq)
hinic_rxq_clean_stats(rxq);
}
+static void rx_csum(struct hinic_rxq *rxq, u16 cons_idx,
+ struct sk_buff *skb)
+{
+ struct net_device *netdev = rxq->netdev;
+ struct hinic_rq_cqe *cqe;
+ struct hinic_rq *rq;
+ u32 csum_err;
+ u32 status;
+
+ rq = rxq->rq;
+ cqe = rq->cqe[cons_idx];
+ status = be32_to_cpu(cqe->status);
+ csum_err = HINIC_RQ_CQE_STATUS_GET(status, CSUM_ERR);
+
+ if (!(netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (!csum_err)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+}
/**
* rx_alloc_skb - allocate skb and map it to dma address
* @rxq: rx queue
@@ -209,7 +232,6 @@ skb_out:
hinic_rq_update(rxq->rq, prod_idx);
}
- tasklet_schedule(&rxq->rx_task);
return i;
}
@@ -237,17 +259,6 @@ static void free_all_rx_skbs(struct hinic_rxq *rxq)
}
/**
- * rx_alloc_task - tasklet for queue allocation
- * @data: rx queue
- **/
-static void rx_alloc_task(unsigned long data)
-{
- struct hinic_rxq *rxq = (struct hinic_rxq *)data;
-
- (void)rx_alloc_pkts(rxq);
-}
-
-/**
* rx_recv_jumbo_pkt - Rx handler for jumbo pkt
* @rxq: rx queue
* @head_skb: the first skb in the list
@@ -311,6 +322,7 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq);
u64 pkt_len = 0, rx_bytes = 0;
struct hinic_rq_wqe *rq_wqe;
+ unsigned int free_wqebbs;
int num_wqes, pkts = 0;
struct hinic_sge sge;
struct sk_buff *skb;
@@ -328,6 +340,8 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
+ rx_csum(rxq, ci, skb);
+
prefetch(skb->data);
pkt_len = sge.len;
@@ -352,8 +366,9 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
rx_bytes += pkt_len;
}
- if (pkts)
- tasklet_schedule(&rxq->rx_task); /* rx_alloc_pkts */
+ free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
+ if (free_wqebbs > HINIC_RX_BUFFER_WRITE)
+ rx_alloc_pkts(rxq);
u64_stats_update_begin(&rxq->rxq_stats.syncp);
rxq->rxq_stats.pkts += pkts;
@@ -470,8 +485,6 @@ int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
sprintf(rxq->irq_name, "hinic_rxq%d", qp->q_id);
- tasklet_init(&rxq->rx_task, rx_alloc_task, (unsigned long)rxq);
-
pkts = rx_alloc_pkts(rxq);
if (!pkts) {
err = -ENOMEM;
@@ -488,7 +501,6 @@ int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
err_req_rx_irq:
err_rx_pkts:
- tasklet_kill(&rxq->rx_task);
free_all_rx_skbs(rxq);
devm_kfree(&netdev->dev, rxq->irq_name);
return err;
@@ -504,7 +516,6 @@ void hinic_clean_rxq(struct hinic_rxq *rxq)
rx_free_irq(rxq);
- tasklet_kill(&rxq->rx_task);
free_all_rx_skbs(rxq);
devm_kfree(&netdev->dev, rxq->irq_name);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.h b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
index 27c9af4b1c12..f8ed3fa6c8ee 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
@@ -23,6 +23,10 @@
#include "hinic_hw_qp.h"
+#define HINIC_RX_CSUM_OFFLOAD_EN 0xFFF
+#define HINIC_RX_CSUM_HW_CHECK_NONE BIT(7)
+#define HINIC_RX_CSUM_IPSU_OTHER_ERR BIT(8)
+
struct hinic_rxq_stats {
u64 pkts;
u64 bytes;
@@ -38,8 +42,6 @@ struct hinic_rxq {
char *irq_name;
- struct tasklet_struct rx_task;
-
struct napi_struct napi;
};
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 760b2ad8e295..209255495bc9 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2455,7 +2455,8 @@ static void emac_adjust_link(struct net_device *ndev)
dev->phy.duplex = phy->duplex;
dev->phy.pause = phy->pause;
dev->phy.asym_pause = phy->asym_pause;
- dev->phy.advertising = phy->advertising;
+ ethtool_convert_link_mode_to_legacy_u32(&dev->phy.advertising,
+ phy->advertising);
}
static int emac_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
@@ -2490,7 +2491,8 @@ static int emac_mdio_phy_start_aneg(struct mii_phy *phy,
phy_dev->autoneg = phy->autoneg;
phy_dev->speed = phy->speed;
phy_dev->duplex = phy->duplex;
- phy_dev->advertising = phy->advertising;
+ ethtool_convert_legacy_u32_to_link_mode(phy_dev->advertising,
+ phy->advertising);
return phy_start_aneg(phy_dev);
}
@@ -2624,7 +2626,8 @@ static int emac_dt_phy_connect(struct emac_instance *dev,
dev->phy.def->phy_id_mask = dev->phy_dev->drv->phy_id_mask;
dev->phy.def->name = dev->phy_dev->drv->name;
dev->phy.def->ops = &emac_dt_mdio_phy_ops;
- dev->phy.features = dev->phy_dev->supported;
+ ethtool_convert_link_mode_to_legacy_u32(&dev->phy.features,
+ dev->phy_dev->supported);
dev->phy.address = dev->phy_dev->mdio.addr;
dev->phy.mode = dev->phy_dev->interface;
return 0;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 67cc6d9c8fd7..5ecbb1adcf3b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -773,11 +773,8 @@ static void release_napi(struct ibmvnic_adapter *adapter)
return;
for (i = 0; i < adapter->num_active_rx_napi; i++) {
- if (&adapter->napi[i]) {
- netdev_dbg(adapter->netdev,
- "Releasing napi[%d]\n", i);
- netif_napi_del(&adapter->napi[i]);
- }
+ netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
+ netif_napi_del(&adapter->napi[i]);
}
kfree(adapter->napi);
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 59e1bc0f609e..31fb76ee9d82 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -33,7 +33,7 @@ config E100
to identify the adapter.
More specific information on configuring the driver is in
- <file:Documentation/networking/e100.rst>.
+ <file:Documentation/networking/device_drivers/intel/e100.rst>.
To compile this driver as a module, choose M here. The module
will be called e100.
@@ -49,7 +49,7 @@ config E1000
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/e1000.rst>.
+ <file:Documentation/networking/device_drivers/intel/e1000.rst>.
To compile this driver as a module, choose M here. The module
will be called e1000.
@@ -69,7 +69,7 @@ config E1000E
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/e1000e.rst>.
+ <file:Documentation/networking/device_drivers/intel/e1000e.rst>.
To compile this driver as a module, choose M here. The module
will be called e1000e.
@@ -97,7 +97,7 @@ config IGB
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/igb.rst>.
+ <file:Documentation/networking/device_drivers/intel/igb.rst>.
To compile this driver as a module, choose M here. The module
will be called igb.
@@ -133,7 +133,7 @@ config IGBVF
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/igbvf.rst>.
+ <file:Documentation/networking/device_drivers/intel/igbvf.rst>.
To compile this driver as a module, choose M here. The module
will be called igbvf.
@@ -150,7 +150,7 @@ config IXGB
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/ixgb.rst>.
+ <file:Documentation/networking/device_drivers/intel/ixgb.rst>.
To compile this driver as a module, choose M here. The module
will be called ixgb.
@@ -159,6 +159,7 @@ config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI
select MDIO
+ select MDIO_DEVICE
imply PTP_1588_CLOCK
---help---
This driver supports Intel(R) 10GbE PCI Express family of
@@ -168,7 +169,7 @@ config IXGBE
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/ixgbe.rst>.
+ <file:Documentation/networking/device_drivers/intel/ixgbe.rst>.
To compile this driver as a module, choose M here. The module
will be called ixgbe.
@@ -220,7 +221,7 @@ config IXGBEVF
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/ixgbevf.rst>.
+ <file:Documentation/networking/device_drivers/intel/ixgbevf.rst>.
To compile this driver as a module, choose M here. The module
will be called ixgbevf. MSI-X interrupt support is required
@@ -247,7 +248,7 @@ config I40E
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/i40e.rst>.
+ <file:Documentation/networking/device_drivers/intel/i40e.rst>.
To compile this driver as a module, choose M here. The module
will be called i40e.
@@ -282,7 +283,7 @@ config I40EVF
This driver was formerly named i40evf.
More specific information on configuring the driver is in
- <file:Documentation/networking/iavf.rst>.
+ <file:Documentation/networking/device_drivers/intel/iavf.rst>.
To compile this driver as a module, choose M here. The module
will be called iavf. MSI-X interrupt support is required
@@ -300,7 +301,7 @@ config ICE
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/ice.rst>.
+ <file:Documentation/networking/device_drivers/intel/ice.rst>.
To compile this driver as a module, choose M here. The module
will be called ice.
@@ -318,7 +319,7 @@ config FM10K
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/fm10k.rst>.
+ <file:Documentation/networking/device_drivers/intel/fm10k.rst>.
To compile this driver as a module, choose M here. The module
will be called fm10k. MSI-X interrupt support is required
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 7c4b55482f72..0fd268070fb4 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1345,8 +1345,8 @@ static inline int e100_load_ucode_wait(struct nic *nic)
fw = e100_request_firmware(nic);
/* If it's NULL, then no ucode is required */
- if (!fw || IS_ERR(fw))
- return PTR_ERR(fw);
+ if (IS_ERR_OR_NULL(fw))
+ return PTR_ERR_OR_ZERO(fw);
if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
netif_err(nic, probe, nic->netdev,
@@ -2225,11 +2225,13 @@ static int e100_poll(struct napi_struct *napi, int budget)
e100_rx_clean(nic, &work_done, budget);
e100_tx_clean(nic);
- /* If budget not fully consumed, exit the polling mode */
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
+ /* If budget fully consumed, continue polling */
+ if (work_done == budget)
+ return budget;
+
+ /* only re-enable interrupt if stack agrees polling is really done */
+ if (likely(napi_complete_done(napi, work_done)))
e100_enable_irq(nic);
- }
return work_done;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 43b6d3cec3b3..8fe9af0e2ab7 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3803,14 +3803,15 @@ static int e1000_clean(struct napi_struct *napi, int budget)
adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
- if (!tx_clean_complete)
- work_done = budget;
+ if (!tx_clean_complete || work_done == budget)
+ return budget;
- /* If budget not fully consumed, exit the polling mode */
- if (work_done < budget) {
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done))) {
if (likely(adapter->itr_setting & 3))
e1000_set_itr(adapter);
- napi_complete_done(napi, work_done);
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_enable(adapter);
}
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index c760dc72c520..be13227f1697 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -505,6 +505,9 @@ extern const struct e1000_info e1000_es2_info;
void e1000e_ptp_init(struct e1000_adapter *adapter);
void e1000e_ptp_remove(struct e1000_adapter *adapter);
+u64 e1000e_read_systim(struct e1000_adapter *adapter,
+ struct ptp_system_timestamp *sts);
+
static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
{
return hw->phy.ops.reset(hw);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 16a73bd9f4cb..308c006cb41d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2651,9 +2651,9 @@ err:
/**
* e1000e_poll - NAPI Rx polling callback
* @napi: struct associated with this polling callback
- * @weight: number of packets driver is allowed to process this poll
+ * @budget: number of packets driver is allowed to process this poll
**/
-static int e1000e_poll(struct napi_struct *napi, int weight)
+static int e1000e_poll(struct napi_struct *napi, int budget)
{
struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
napi);
@@ -2667,16 +2667,17 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
- adapter->clean_rx(adapter->rx_ring, &work_done, weight);
+ adapter->clean_rx(adapter->rx_ring, &work_done, budget);
- if (!tx_cleaned)
- work_done = weight;
+ if (!tx_cleaned || work_done == budget)
+ return budget;
- /* If weight not fully consumed, exit the polling mode */
- if (work_done < weight) {
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done))) {
if (adapter->itr_setting & 3)
e1000_set_itr(adapter);
- napi_complete_done(napi, work_done);
if (!test_bit(__E1000_DOWN, &adapter->state)) {
if (adapter->msix_entries)
ew32(IMS, adapter->rx_ring->ims_val);
@@ -4319,13 +4320,16 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
/**
* e1000e_sanitize_systim - sanitize raw cycle counter reads
* @hw: pointer to the HW structure
- * @systim: time value read, sanitized and returned
+ * @systim: PHC time value read, sanitized and returned
+ * @sts: structure to hold system time before and after reading SYSTIML,
+ * may be NULL
*
* Errata for 82574/82583 possible bad bits read from SYSTIMH/L:
* check to see that the time is incrementing at a reasonable
* rate and is a multiple of incvalue.
**/
-static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim)
+static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim,
+ struct ptp_system_timestamp *sts)
{
u64 time_delta, rem, temp;
u64 systim_next;
@@ -4335,7 +4339,9 @@ static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim)
incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
/* latch SYSTIMH on read of SYSTIML */
+ ptp_read_system_prets(sts);
systim_next = (u64)er32(SYSTIML);
+ ptp_read_system_postts(sts);
systim_next |= (u64)er32(SYSTIMH) << 32;
time_delta = systim_next - systim;
@@ -4353,15 +4359,16 @@ static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim)
}
/**
- * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
- * @cc: cyclecounter structure
+ * e1000e_read_systim - read SYSTIM register
+ * @adapter: board private structure
+ * @sts: structure which will contain system time before and after reading
+ * SYSTIML, may be NULL
**/
-static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
+u64 e1000e_read_systim(struct e1000_adapter *adapter,
+ struct ptp_system_timestamp *sts)
{
- struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
- cc);
struct e1000_hw *hw = &adapter->hw;
- u32 systimel, systimeh;
+ u32 systimel, systimel_2, systimeh;
u64 systim;
/* SYSTIMH latching upon SYSTIML read does not work well.
* This means that if SYSTIML overflows after we read it but before
@@ -4369,11 +4376,15 @@ static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
* will experience a huge non linear increment in the systime value
* to fix that we test for overflow and if true, we re-read systime.
*/
+ ptp_read_system_prets(sts);
systimel = er32(SYSTIML);
+ ptp_read_system_postts(sts);
systimeh = er32(SYSTIMH);
/* Is systimel is so large that overflow is possible? */
if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) {
- u32 systimel_2 = er32(SYSTIML);
+ ptp_read_system_prets(sts);
+ systimel_2 = er32(SYSTIML);
+ ptp_read_system_postts(sts);
if (systimel > systimel_2) {
/* There was an overflow, read again SYSTIMH, and use
* systimel_2
@@ -4386,12 +4397,24 @@ static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
systim |= (u64)systimeh << 32;
if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW)
- systim = e1000e_sanitize_systim(hw, systim);
+ systim = e1000e_sanitize_systim(hw, systim, sts);
return systim;
}
/**
+ * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
+ * @cc: cyclecounter structure
+ **/
+static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
+{
+ struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
+ cc);
+
+ return e1000e_read_systim(adapter, NULL);
+}
+
+/**
* e1000_sw_init - Initialize general software structures (struct e1000_adapter)
* @adapter: board private structure to initialize
*
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 37c76945ad9b..1a4c65d9feb4 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -161,22 +161,30 @@ static int e1000e_phc_getcrosststamp(struct ptp_clock_info *ptp,
#endif/*CONFIG_E1000E_HWTS*/
/**
- * e1000e_phc_gettime - Reads the current time from the hardware clock
+ * e1000e_phc_gettimex - Reads the current time from the hardware clock and
+ * system clock
* @ptp: ptp clock structure
- * @ts: timespec structure to hold the current time value
+ * @ts: timespec structure to hold the current PHC time
+ * @sts: structure to hold the current system time
*
* Read the timecounter and return the correct value in ns after converting
* it into a struct timespec.
**/
-static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int e1000e_phc_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info);
unsigned long flags;
- u64 ns;
+ u64 cycles, ns;
spin_lock_irqsave(&adapter->systim_lock, flags);
- ns = timecounter_read(&adapter->tc);
+
+ /* NOTE: Non-monotonic SYSTIM readings may be returned */
+ cycles = e1000e_read_systim(adapter, sts);
+ ns = timecounter_cyc2time(&adapter->tc, cycles);
+
spin_unlock_irqrestore(&adapter->systim_lock, flags);
*ts = ns_to_timespec64(ns);
@@ -232,9 +240,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work)
systim_overflow_work.work);
struct e1000_hw *hw = &adapter->hw;
struct timespec64 ts;
+ u64 ns;
- adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts);
+ /* Update the timecounter */
+ ns = timecounter_read(&adapter->tc);
+ ts = ns_to_timespec64(ns);
e_dbg("SYSTIM overflow check at %lld.%09lu\n",
(long long) ts.tv_sec, ts.tv_nsec);
@@ -251,7 +262,7 @@ static const struct ptp_clock_info e1000e_ptp_clock_info = {
.pps = 0,
.adjfreq = e1000e_phc_adjfreq,
.adjtime = e1000e_phc_adjtime,
- .gettime64 = e1000e_phc_gettime,
+ .gettimex64 = e1000e_phc_gettimex,
.settime64 = e1000e_phc_settime,
.enable = e1000e_phc_enable,
};
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 5b2a50e5798f..6fd15a734324 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1465,11 +1465,11 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
- /* all work done, exit the polling mode */
- napi_complete_done(napi, work_done);
-
- /* re-enable the q_vector */
- fm10k_qv_enable(q_vector);
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ fm10k_qv_enable(q_vector);
return min(work_done, budget - 1);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 876cac317e79..8de9085bba9e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -122,6 +122,7 @@ enum i40e_state_t {
__I40E_MDD_EVENT_PENDING,
__I40E_VFLR_EVENT_PENDING,
__I40E_RESET_RECOVERY_PENDING,
+ __I40E_TIMEOUT_RECOVERY_PENDING,
__I40E_MISC_IRQ_REQUESTED,
__I40E_RESET_INTR_RECEIVED,
__I40E_REINIT_REQUESTED,
@@ -146,6 +147,7 @@ enum i40e_state_t {
__I40E_CLIENT_SERVICE_REQUESTED,
__I40E_CLIENT_L2_CHANGE,
__I40E_CLIENT_RESET,
+ __I40E_VIRTCHNL_OP_PENDING,
/* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__,
};
@@ -494,7 +496,6 @@ struct i40e_pf {
#define I40E_HW_STOP_FW_LLDP BIT(16)
#define I40E_HW_PORT_ID_VALID BIT(17)
#define I40E_HW_RESTART_AUTONEG BIT(18)
-#define I40E_HW_STOPPABLE_FW_LLDP BIT(19)
u32 flags;
#define I40E_FLAG_RX_CSUM_ENABLED BIT(0)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 501ee718177f..7ab61f6ebb5f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -588,6 +588,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ }
+ if (hw->mac.type == I40E_MAC_X722 &&
+ hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722) {
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
}
/* Newer versions of firmware require lock when reading the NVM */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 80e3eec6134e..11506102471c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -11,7 +11,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR_X722 0x0005
+#define I40E_FW_API_VERSION_MINOR_X722 0x0006
#define I40E_FW_API_VERSION_MINOR_X710 0x0007
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
@@ -20,6 +20,8 @@
/* API version 1.7 implements additional link and PHY-specific APIs */
#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
+/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
+#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006
struct i40e_aq_desc {
__le16 flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 85f75b5978fc..97a9b1fb4763 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -3723,6 +3723,9 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
i40e_status status;
+ if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_dcb_parameters);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 9f8464f80783..a6bc7847346b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -906,6 +906,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
ks->base.speed = SPEED_100;
break;
default:
+ ks->base.speed = SPEED_UNKNOWN;
break;
}
ks->base.duplex = DUPLEX_FULL;
@@ -1335,6 +1336,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
i40e_status status;
u8 aq_failures;
int err = 0;
+ u32 is_an;
/* Changing the port's flow control is not supported if this isn't the
* port's controlling PF
@@ -1347,15 +1349,14 @@ static int i40e_set_pauseparam(struct net_device *netdev,
if (vsi != pf->vsi[pf->lan_vsi])
return -EOPNOTSUPP;
- if (pause->autoneg != ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
- AUTONEG_ENABLE : AUTONEG_DISABLE)) {
+ is_an = hw_link_info->an_info & I40E_AQ_AN_COMPLETED;
+ if (pause->autoneg != is_an) {
netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
return -EOPNOTSUPP;
}
/* If we have link and don't have autoneg */
- if (!test_bit(__I40E_DOWN, pf->state) &&
- !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) {
+ if (!test_bit(__I40E_DOWN, pf->state) && !is_an) {
/* Send message that it might not necessarily work*/
netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
}
@@ -1406,7 +1407,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
err = -EAGAIN;
}
- if (!test_bit(__I40E_DOWN, pf->state)) {
+ if (!test_bit(__I40E_DOWN, pf->state) && is_an) {
/* Give it a little more time to try to come back */
msleep(75);
if (!test_bit(__I40E_DOWN, pf->state))
@@ -2377,7 +2378,8 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return -EOPNOTSUPP;
/* only magic packet is supported */
- if (wol->wolopts && (wol->wolopts != WAKE_MAGIC))
+ if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)
+ | (wol->wolopts != WAKE_FILTER))
return -EOPNOTSUPP;
/* is this a new value? */
@@ -4659,14 +4661,15 @@ flags_complete:
return -EOPNOTSUPP;
/* If the driver detected FW LLDP was disabled on init, this flag could
- * be set, however we do not support _changing_ the flag if NPAR is
- * enabled or FW API version < 1.7. There are situations where older
- * FW versions/NPAR enabled PFs could disable LLDP, however we _must_
- * not allow the user to enable/disable LLDP with this flag on
- * unsupported FW versions.
+ * be set, however we do not support _changing_ the flag:
+ * - on XL710 if NPAR is enabled or FW API version < 1.7
+ * - on X722 with FW API version < 1.6
+ * There are situations where older FW versions/NPAR enabled PFs could
+ * disable LLDP, however we _must_ not allow the user to enable/disable
+ * LLDP with this flag on unsupported FW versions.
*/
if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
- if (!(pf->hw_features & I40E_HW_STOPPABLE_FW_LLDP)) {
+ if (!(pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) {
dev_warn(&pf->pdev->dev,
"Device does not support changing FW LLDP\n");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 0e5dc74b4ef2..4d40878e395a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -26,8 +26,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 2
-#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 2
+#define DRV_VERSION_MINOR 7
+#define DRV_VERSION_BUILD 6
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -338,6 +338,10 @@ static void i40e_tx_timeout(struct net_device *netdev)
(pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
return; /* don't do any new action before the next timeout */
+ /* don't kick off another recovery if one is already pending */
+ if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
+ return;
+
if (tx_ring) {
head = i40e_get_head(tx_ring);
/* Read interrupt register */
@@ -1493,8 +1497,7 @@ int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
bool found = false;
int bkt;
- WARN(!spin_is_locked(&vsi->mac_filter_hash_lock),
- "Missing mac_filter_hash_lock\n");
+ lockdep_assert_held(&vsi->mac_filter_hash_lock);
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (ether_addr_equal(macaddr, f->macaddr)) {
__i40e_del_filter(vsi, f);
@@ -9632,6 +9635,7 @@ end_core_reset:
clear_bit(__I40E_RESET_FAILED, pf->state);
clear_recovery:
clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
+ clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
}
/**
@@ -11332,16 +11336,15 @@ static int i40e_sw_init(struct i40e_pf *pf)
/* IWARP needs one extra vector for CQP just like MISC.*/
pf->num_iwarp_msix = (int)num_online_cpus() + 1;
}
- /* Stopping the FW LLDP engine is only supported on the
- * XL710 with a FW ver >= 1.7. Also, stopping FW LLDP
- * engine is not supported if NPAR is functioning on this
- * part
+ /* Stopping FW LLDP engine is supported on XL710 and X722
+ * starting from FW versions determined in i40e_init_adminq.
+ * Stopping the FW LLDP engine is not supported on XL710
+ * if NPAR is functioning so unset this hw flag in this case.
*/
if (pf->hw.mac.type == I40E_MAC_XL710 &&
- !pf->hw.func_caps.npar_enable &&
- (pf->hw.aq.api_maj_ver > 1 ||
- (pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver > 6)))
- pf->hw_features |= I40E_HW_STOPPABLE_FW_LLDP;
+ pf->hw.func_caps.npar_enable &&
+ (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
+ pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
#ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
@@ -11682,6 +11685,7 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
* @dev: the netdev being configured
* @nlh: RTNL message
* @flags: bridge flags
+ * @extack: netlink extended ack
*
* Inserts a new hardware bridge if not already created and
* enables the bridging mode requested (VEB or VEPA). If the
@@ -11694,7 +11698,8 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
**/
static int i40e_ndo_bridge_setlink(struct net_device *dev,
struct nlmsghdr *nlh,
- u16 flags)
+ u16 flags,
+ struct netlink_ext_ack *extack)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
@@ -12334,6 +12339,9 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
ether_addr_copy(netdev->dev_addr, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
+ /* i40iw_net_event() reads 16 bytes from neigh->primary_key */
+ netdev->neigh_priv_len = sizeof(u32) * 4;
+
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
/* Setup netdev TC information */
@@ -14302,23 +14310,23 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
switch (hw->bus.speed) {
case i40e_bus_speed_8000:
- strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
+ strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
case i40e_bus_speed_5000:
- strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
+ strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
case i40e_bus_speed_2500:
- strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
+ strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
default:
break;
}
switch (hw->bus.width) {
case i40e_bus_width_pcie_x8:
- strncpy(width, "8", PCI_WIDTH_SIZE); break;
+ strlcpy(width, "8", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x4:
- strncpy(width, "4", PCI_WIDTH_SIZE); break;
+ strlcpy(width, "4", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x2:
- strncpy(width, "2", PCI_WIDTH_SIZE); break;
+ strlcpy(width, "2", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x1:
- strncpy(width, "1", PCI_WIDTH_SIZE); break;
+ strlcpy(width, "1", PCI_WIDTH_SIZE); break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 1199f0502d6d..5fb4353c742b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -28,19 +28,23 @@
* i40e_ptp_read - Read the PHC time from the device
* @pf: Board private structure
* @ts: timespec structure to hold the current time value
+ * @sts: structure to hold the system time before and after reading the PHC
*
* This function reads the PRTTSYN_TIME registers and stores them in a
* timespec. However, since the registers are 64 bits of nanoseconds, we must
* convert the result to a timespec before we can return.
**/
-static void i40e_ptp_read(struct i40e_pf *pf, struct timespec64 *ts)
+static void i40e_ptp_read(struct i40e_pf *pf, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct i40e_hw *hw = &pf->hw;
u32 hi, lo;
u64 ns;
/* The timer latches on the lowest register read. */
+ ptp_read_system_prets(sts);
lo = rd32(hw, I40E_PRTTSYN_TIME_L);
+ ptp_read_system_postts(sts);
hi = rd32(hw, I40E_PRTTSYN_TIME_H);
ns = (((u64)hi) << 32) | lo;
@@ -146,7 +150,7 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
mutex_lock(&pf->tmreg_lock);
- i40e_ptp_read(pf, &now);
+ i40e_ptp_read(pf, &now, NULL);
timespec64_add_ns(&now, delta);
i40e_ptp_write(pf, (const struct timespec64 *)&now);
@@ -156,19 +160,21 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
}
/**
- * i40e_ptp_gettime - Get the time of the PHC
+ * i40e_ptp_gettimex - Get the time of the PHC
* @ptp: The PTP clock structure
* @ts: timespec structure to hold the current time value
+ * @sts: structure to hold the system time before and after reading the PHC
*
* Read the device clock and return the correct value on ns, after converting it
* into a timespec struct.
**/
-static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int i40e_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
mutex_lock(&pf->tmreg_lock);
- i40e_ptp_read(pf, ts);
+ i40e_ptp_read(pf, ts, sts);
mutex_unlock(&pf->tmreg_lock);
return 0;
@@ -694,7 +700,7 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
if (!IS_ERR_OR_NULL(pf->ptp_clock))
return 0;
- strncpy(pf->ptp_caps.name, i40e_driver_name,
+ strlcpy(pf->ptp_caps.name, i40e_driver_name,
sizeof(pf->ptp_caps.name) - 1);
pf->ptp_caps.owner = THIS_MODULE;
pf->ptp_caps.max_adj = 999999999;
@@ -702,7 +708,7 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
pf->ptp_caps.pps = 0;
pf->ptp_caps.adjfreq = i40e_ptp_adjfreq;
pf->ptp_caps.adjtime = i40e_ptp_adjtime;
- pf->ptp_caps.gettime64 = i40e_ptp_gettime;
+ pf->ptp_caps.gettimex64 = i40e_ptp_gettimex;
pf->ptp_caps.settime64 = i40e_ptp_settime;
pf->ptp_caps.enable = i40e_ptp_feature_enable;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index d0a95424ce58..a7e14e98889f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2648,10 +2648,11 @@ tx_only:
if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
q_vector->arm_wb_state = false;
- /* Work is done so exit the polling mode and re-enable the interrupt */
- napi_complete_done(napi, work_done);
-
- i40e_update_enable_itr(vsi, q_vector);
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ i40e_update_enable_itr(vsi, q_vector);
return min(work_done, budget - 1);
}
@@ -3454,6 +3455,8 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag);
+ skb_tx_timestamp(skb);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
*
@@ -3507,6 +3510,7 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
u16 i = xdp_ring->next_to_use;
struct i40e_tx_buffer *tx_bi;
struct i40e_tx_desc *tx_desc;
+ void *data = xdpf->data;
u32 size = xdpf->len;
dma_addr_t dma;
@@ -3514,8 +3518,7 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
xdp_ring->tx_stats.tx_busy++;
return I40E_XDP_CONSUMED;
}
-
- dma = dma_map_single(xdp_ring->dev, xdpf->data, size, DMA_TO_DEVICE);
+ dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
if (dma_mapping_error(xdp_ring->dev, dma))
return I40E_XDP_CONSUMED;
@@ -3633,8 +3636,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (tsyn)
tx_flags |= I40E_TX_FLAGS_TSYN;
- skb_tx_timestamp(skb);
-
/* always enable CRC insertion offload */
td_cmd |= I40E_TX_DESC_CMD_ICRC;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 7df969c59855..2781ab91ca82 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -615,6 +615,7 @@ struct i40e_hw {
#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
+#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
u64 flags;
/* Used in set switch config AQ command */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index ac5698ed0b11..2ac23ebfbf31 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1112,7 +1112,8 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
return I40E_ERR_PARAM;
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
+ (allmulti || alluni)) {
dev_err(&pf->pdev->dev,
"Unprivileged VF %d is attempting to configure promiscuous mode\n",
vf->vf_id);
@@ -1675,13 +1676,20 @@ err_out:
int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
+ int ret = 0;
+
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
if (num_vfs) {
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
}
- return i40e_pci_sriov_enable(pdev, num_vfs);
+ ret = i40e_pci_sriov_enable(pdev, num_vfs);
+ goto sriov_configure_out;
}
if (!pci_vfs_assigned(pf->pdev)) {
@@ -1690,9 +1698,12 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto sriov_configure_out;
}
- return 0;
+sriov_configure_out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+ return ret;
}
/***********************virtual channel routines******************/
@@ -3893,6 +3904,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto error_param;
}
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
if (is_multicast_ether_addr(mac)) {
dev_err(&pf->pdev->dev,
"Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
@@ -3941,6 +3957,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
error_param:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -3992,6 +4009,11 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
struct i40e_vf *vf;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
ret = i40e_validate_vf(pf, vf_id);
if (ret)
@@ -4107,6 +4129,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
ret = 0;
error_pvid:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4128,6 +4151,11 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
struct i40e_vf *vf;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
ret = i40e_validate_vf(pf, vf_id);
if (ret)
@@ -4154,6 +4182,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
vf->tx_rate = max_tx_rate;
error:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4174,6 +4203,11 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
struct i40e_vf *vf;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
ret = i40e_validate_vf(pf, vf_id);
if (ret)
@@ -4209,6 +4243,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
ret = 0;
error_param:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4230,6 +4265,11 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
int abs_vf_id;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
if (vf_id >= pf->num_alloc_vfs) {
dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
@@ -4273,6 +4313,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
0, (u8 *)&pfe, sizeof(pfe), NULL);
error_out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4294,6 +4335,11 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
struct i40e_vf *vf;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
if (vf_id >= pf->num_alloc_vfs) {
dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
@@ -4327,6 +4373,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
ret = -EIO;
}
out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4345,15 +4392,22 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
struct i40e_vf *vf;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
if (vf_id >= pf->num_alloc_vfs) {
dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (pf->flags & I40E_FLAG_MFP_ENABLED) {
dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
vf = &pf->vf[vf_id];
@@ -4376,5 +4430,6 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
}
out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index bf67d62e2b5f..f9621026beef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -13,9 +13,9 @@
#define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED 3
#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10
-#define I40E_VLAN_PRIORITY_SHIFT 12
+#define I40E_VLAN_PRIORITY_SHIFT 13
#define I40E_VLAN_MASK 0xFFF
-#define I40E_PRIORITY_MASK 0x7000
+#define I40E_PRIORITY_MASK 0xE000
/* Various queue ctrls */
enum i40e_queue_ctrl {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index fb9bfad96daf..9b4d7cec2e18 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1761,10 +1761,11 @@ tx_only:
if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR)
q_vector->arm_wb_state = false;
- /* Work is done so exit the polling mode and re-enable the interrupt */
- napi_complete_done(napi, work_done);
-
- iavf_update_enable_itr(vsi, q_vector);
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ iavf_update_enable_itr(vsi, q_vector);
return min(work_done, budget - 1);
}
@@ -2343,6 +2344,8 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag);
+ skb_tx_timestamp(skb);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
*
@@ -2461,8 +2464,6 @@ static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
if (tso < 0)
goto out_drop;
- skb_tx_timestamp(skb);
-
/* always enable CRC insertion offload */
td_cmd |= IAVF_TX_DESC_CMD_ICRC;
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index b8548370f1c7..a385575600f6 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -52,7 +52,6 @@ extern const char ice_drv_ver[];
#define ICE_MBXQ_LEN 64
#define ICE_MIN_MSIX 2
#define ICE_NO_VSI 0xffff
-#define ICE_MAX_VSI_ALLOC 130
#define ICE_MAX_TXQS 2048
#define ICE_MAX_RXQS 2048
#define ICE_VSI_MAP_CONTIG 0
@@ -97,14 +96,14 @@ extern const char ice_drv_ver[];
#define ice_for_each_vsi(pf, i) \
for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
-/* Macros for each tx/rx ring in a VSI */
+/* Macros for each Tx/Rx ring in a VSI */
#define ice_for_each_txq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_txq; (i)++)
#define ice_for_each_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
-/* Macros for each allocated tx/rx ring whether used or not in a VSI */
+/* Macros for each allocated Tx/Rx ring whether used or not in a VSI */
#define ice_for_each_alloc_txq(vsi, i) \
for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
@@ -113,7 +112,9 @@ extern const char ice_drv_ver[];
struct ice_tc_info {
u16 qoffset;
- u16 qcount;
+ u16 qcount_tx;
+ u16 qcount_rx;
+ u8 netdev_tc;
};
struct ice_tc_cfg {
@@ -149,10 +150,10 @@ enum ice_state {
__ICE_RESET_FAILED, /* set by reset/rebuild */
/* When checking for the PF to be in a nominal operating state, the
* bits that are grouped at the beginning of the list need to be
- * checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will
- * be checked. If you need to add a bit into consideration for nominal
+ * checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will
+ * be checked. If you need to add a bit into consideration for nominal
* operating state, it must be added before
- * __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
+ * __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
* without appropriate consideration.
*/
__ICE_STATE_NOMINAL_CHECK_BITS,
@@ -182,8 +183,8 @@ struct ice_vsi {
struct ice_sw *vsw; /* switch this VSI is on */
struct ice_pf *back; /* back pointer to PF */
struct ice_port_info *port_info; /* back pointer to port_info */
- struct ice_ring **rx_rings; /* rx ring array */
- struct ice_ring **tx_rings; /* tx ring array */
+ struct ice_ring **rx_rings; /* Rx ring array */
+ struct ice_ring **tx_rings; /* Tx ring array */
struct ice_q_vector **q_vectors; /* q_vector array */
irqreturn_t (*irq_handler)(int irq, void *data);
@@ -200,8 +201,8 @@ struct ice_vsi {
int sw_base_vector; /* Irq base for OS reserved vectors */
int hw_base_vector; /* HW (absolute) index of a vector */
enum ice_vsi_type type;
- u16 vsi_num; /* HW (absolute) index of this VSI */
- u16 idx; /* software index in pf->vsi[] */
+ u16 vsi_num; /* HW (absolute) index of this VSI */
+ u16 idx; /* software index in pf->vsi[] */
/* Interrupt thresholds */
u16 work_lmt;
@@ -254,8 +255,8 @@ struct ice_q_vector {
struct ice_ring_container tx;
struct irq_affinity_notify affinity_notify;
u16 v_idx; /* index in the vsi->q_vector array. */
- u8 num_ring_tx; /* total number of tx rings in vector */
- u8 num_ring_rx; /* total number of rx rings in vector */
+ u8 num_ring_tx; /* total number of Tx rings in vector */
+ u8 num_ring_rx; /* total number of Rx rings in vector */
char name[ICE_INT_NAME_STR_LEN];
/* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
* value to the device
@@ -307,10 +308,10 @@ struct ice_pf {
u32 hw_oicr_idx; /* Other interrupt cause vector HW index */
u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */
u32 num_lan_msix; /* Total MSIX vectors for base driver */
- u16 num_lan_tx; /* num lan tx queues setup */
- u16 num_lan_rx; /* num lan rx queues setup */
- u16 q_left_tx; /* remaining num tx queues left unclaimed */
- u16 q_left_rx; /* remaining num rx queues left unclaimed */
+ u16 num_lan_tx; /* num lan Tx queues setup */
+ u16 num_lan_rx; /* num lan Rx queues setup */
+ u16 q_left_tx; /* remaining num Tx queues left unclaimed */
+ u16 q_left_rx; /* remaining num Rx queues left unclaimed */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
u16 num_alloc_vsi;
u16 corer_count; /* Core reset count */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 6653555f55dd..fcdcd80b18e7 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -5,7 +5,7 @@
#define _ICE_ADMINQ_CMD_H_
/* This header file defines the Admin Queue commands, error codes and
- * descriptor format. It is shared between Firmware and Software.
+ * descriptor format. It is shared between Firmware and Software.
*/
#define ICE_MAX_VSI 768
@@ -87,6 +87,7 @@ struct ice_aqc_list_caps {
/* Device/Function buffer entry, repeated per reported capability */
struct ice_aqc_list_caps_elem {
__le16 cap;
+#define ICE_AQC_CAPS_VALID_FUNCTIONS 0x0005
#define ICE_AQC_CAPS_SRIOV 0x0012
#define ICE_AQC_CAPS_VF 0x0013
#define ICE_AQC_CAPS_VSI 0x0017
@@ -462,7 +463,7 @@ struct ice_aqc_sw_rules {
};
/* Add/Update/Get/Remove lookup Rx/Tx command/response entry
- * This structures describes the lookup rules and associated actions. "index"
+ * This structures describes the lookup rules and associated actions. "index"
* is returned as part of a response to a successful Add command, and can be
* used to identify the rule for Update/Get/Remove commands.
*/
@@ -1065,10 +1066,10 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_LAST_CMD BIT(0)
#define ICE_AQC_NVM_PCIR_REQ BIT(0) /* Used by NVM Update reply */
#define ICE_AQC_NVM_PRESERVATION_S 1
-#define ICE_AQC_NVM_PRESERVATION_M (3 << CSR_AQ_NVM_PRESERVATION_S)
-#define ICE_AQC_NVM_NO_PRESERVATION (0 << CSR_AQ_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_PRESERVATION_M (3 << ICE_AQC_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_NO_PRESERVATION (0 << ICE_AQC_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_PRESERVE_ALL BIT(1)
-#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << CSR_AQ_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << ICE_AQC_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_FLASH_ONLY BIT(7)
__le16 module_typeid;
__le16 length;
@@ -1110,7 +1111,7 @@ struct ice_aqc_get_set_rss_keys {
};
/* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */
-struct ice_aqc_get_set_rss_lut {
+struct ice_aqc_get_set_rss_lut {
#define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15)
#define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0
#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x1FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
@@ -1314,10 +1315,10 @@ struct ice_aqc_get_clear_fw_log {
* @params: command-specific parameters
*
* Descriptor format for commands the driver posts on the Admin Transmit Queue
- * (ATQ). The firmware writes back onto the command descriptor and returns
- * the result of the command. Asynchronous events that are not an immediate
+ * (ATQ). The firmware writes back onto the command descriptor and returns
+ * the result of the command. Asynchronous events that are not an immediate
* result of the command are written to the Admin Receive Queue (ARQ) using
- * the same descriptor format. Descriptors are in little-endian notation with
+ * the same descriptor format. Descriptors are in little-endian notation with
* 32-bit words.
*/
struct ice_aq_desc {
@@ -1379,10 +1380,10 @@ struct ice_aq_desc {
/* error codes */
enum ice_aq_err {
- ICE_AQ_RC_OK = 0, /* success */
+ ICE_AQ_RC_OK = 0, /* Success */
ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
- ICE_AQ_RC_EEXIST = 13, /* object already exists */
+ ICE_AQ_RC_EEXIST = 13, /* Object already exists */
ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
};
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 554fd707a6d6..4c1d35da940d 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -405,9 +405,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
INIT_LIST_HEAD(&sw->vsi_list_map_head);
- ice_init_def_sw_recp(hw);
-
- return 0;
+ return ice_init_def_sw_recp(hw);
}
/**
@@ -715,7 +713,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
hw->evb_veb = true;
- /* Query the allocated resources for tx scheduler */
+ /* Query the allocated resources for Tx scheduler */
status = ice_sched_query_res_alloc(hw);
if (status) {
ice_debug(hw, ICE_DBG_SCHED,
@@ -958,7 +956,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
* ice_copy_rxq_ctx_to_hw
* @hw: pointer to the hardware structure
* @ice_rxq_ctx: pointer to the rxq context
- * @rxq_index: the index of the rx queue
+ * @rxq_index: the index of the Rx queue
*
* Copies rxq context from dense structure to hw register space
*/
@@ -1014,7 +1012,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* ice_write_rxq_ctx
* @hw: pointer to the hardware structure
* @rlan_ctx: pointer to the rxq context
- * @rxq_index: the index of the rx queue
+ * @rxq_index: the index of the Rx queue
*
* Converts rxq context from sparse to dense structure and then writes
* it to hw register space
@@ -1387,6 +1385,27 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
}
/**
+ * ice_get_guar_num_vsi - determine number of guar VSI for a PF
+ * @hw: pointer to the hw structure
+ *
+ * Determine the number of valid functions by going through the bitmap returned
+ * from parsing capabilities and use this to calculate the number of VSI per PF.
+ */
+static u32 ice_get_guar_num_vsi(struct ice_hw *hw)
+{
+ u8 funcs;
+
+#define ICE_CAPS_VALID_FUNCS_M 0xFF
+ funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
+ ICE_CAPS_VALID_FUNCS_M);
+
+ if (!funcs)
+ return 0;
+
+ return ICE_MAX_VSI / funcs;
+}
+
+/**
* ice_parse_caps - parse function/device capabilities
* @hw: pointer to the hw struct
* @buf: pointer to a buffer containing function/device capability records
@@ -1428,6 +1447,12 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
u16 cap = le16_to_cpu(cap_resp->cap);
switch (cap) {
+ case ICE_AQC_CAPS_VALID_FUNCTIONS:
+ caps->valid_functions = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: Valid Functions = %d\n",
+ caps->valid_functions);
+ break;
case ICE_AQC_CAPS_SRIOV:
caps->sr_iov_1_1 = (number == 1);
ice_debug(hw, ICE_DBG_INIT,
@@ -1457,10 +1482,10 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
"HW caps: Dev.VSI cnt = %d\n",
dev_p->num_vsi_allocd_to_host);
} else if (func_p) {
- func_p->guaranteed_num_vsi = number;
+ func_p->guar_num_vsi = ice_get_guar_num_vsi(hw);
ice_debug(hw, ICE_DBG_INIT,
"HW caps: Func.VSI cnt = %d\n",
- func_p->guaranteed_num_vsi);
+ number);
}
break;
case ICE_AQC_CAPS_RSS:
@@ -1688,8 +1713,7 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
* If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
* If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
*/
-static u16
-ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
+static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
{
u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 84c967294eaf..2bf5e11f559a 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -3,6 +3,26 @@
#include "ice_common.h"
+#define ICE_CQ_INIT_REGS(qinfo, prefix) \
+do { \
+ (qinfo)->sq.head = prefix##_ATQH; \
+ (qinfo)->sq.tail = prefix##_ATQT; \
+ (qinfo)->sq.len = prefix##_ATQLEN; \
+ (qinfo)->sq.bah = prefix##_ATQBAH; \
+ (qinfo)->sq.bal = prefix##_ATQBAL; \
+ (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
+ (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
+ (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
+ (qinfo)->rq.head = prefix##_ARQH; \
+ (qinfo)->rq.tail = prefix##_ARQT; \
+ (qinfo)->rq.len = prefix##_ARQLEN; \
+ (qinfo)->rq.bah = prefix##_ARQBAH; \
+ (qinfo)->rq.bal = prefix##_ARQBAL; \
+ (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
+ (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
+ (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
+} while (0)
+
/**
* ice_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
@@ -13,23 +33,7 @@ static void ice_adminq_init_regs(struct ice_hw *hw)
{
struct ice_ctl_q_info *cq = &hw->adminq;
- cq->sq.head = PF_FW_ATQH;
- cq->sq.tail = PF_FW_ATQT;
- cq->sq.len = PF_FW_ATQLEN;
- cq->sq.bah = PF_FW_ATQBAH;
- cq->sq.bal = PF_FW_ATQBAL;
- cq->sq.len_mask = PF_FW_ATQLEN_ATQLEN_M;
- cq->sq.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M;
- cq->sq.head_mask = PF_FW_ATQH_ATQH_M;
-
- cq->rq.head = PF_FW_ARQH;
- cq->rq.tail = PF_FW_ARQT;
- cq->rq.len = PF_FW_ARQLEN;
- cq->rq.bah = PF_FW_ARQBAH;
- cq->rq.bal = PF_FW_ARQBAL;
- cq->rq.len_mask = PF_FW_ARQLEN_ARQLEN_M;
- cq->rq.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M;
- cq->rq.head_mask = PF_FW_ARQH_ARQH_M;
+ ICE_CQ_INIT_REGS(cq, PF_FW);
}
/**
@@ -42,24 +46,7 @@ static void ice_mailbox_init_regs(struct ice_hw *hw)
{
struct ice_ctl_q_info *cq = &hw->mailboxq;
- /* set head and tail registers in our local struct */
- cq->sq.head = PF_MBX_ATQH;
- cq->sq.tail = PF_MBX_ATQT;
- cq->sq.len = PF_MBX_ATQLEN;
- cq->sq.bah = PF_MBX_ATQBAH;
- cq->sq.bal = PF_MBX_ATQBAL;
- cq->sq.len_mask = PF_MBX_ATQLEN_ATQLEN_M;
- cq->sq.len_ena_mask = PF_MBX_ATQLEN_ATQENABLE_M;
- cq->sq.head_mask = PF_MBX_ATQH_ATQH_M;
-
- cq->rq.head = PF_MBX_ARQH;
- cq->rq.tail = PF_MBX_ARQT;
- cq->rq.len = PF_MBX_ARQLEN;
- cq->rq.bah = PF_MBX_ARQBAH;
- cq->rq.bal = PF_MBX_ARQBAL;
- cq->rq.len_mask = PF_MBX_ARQLEN_ARQLEN_M;
- cq->rq.len_ena_mask = PF_MBX_ARQLEN_ARQENABLE_M;
- cq->rq.head_mask = PF_MBX_ARQH_ARQH_M;
+ ICE_CQ_INIT_REGS(cq, PF_MBX);
}
/**
@@ -131,37 +118,20 @@ ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
}
/**
- * ice_free_ctrlq_sq_ring - Free Control Transmit Queue (ATQ) rings
+ * ice_free_cq_ring - Free control queue ring
* @hw: pointer to the hardware structure
- * @cq: pointer to the specific Control queue
+ * @ring: pointer to the specific control queue ring
*
- * This assumes the posted send buffers have already been cleaned
+ * This assumes the posted buffers have already been cleaned
* and de-allocated
*/
-static void ice_free_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
{
- dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
- cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
- cq->sq.desc_buf.va = NULL;
- cq->sq.desc_buf.pa = 0;
- cq->sq.desc_buf.size = 0;
-}
-
-/**
- * ice_free_ctrlq_rq_ring - Free Control Receive Queue (ARQ) rings
- * @hw: pointer to the hardware structure
- * @cq: pointer to the specific Control queue
- *
- * This assumes the posted receive buffers have already been cleaned
- * and de-allocated
- */
-static void ice_free_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
-{
- dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.desc_buf.size,
- cq->rq.desc_buf.va, cq->rq.desc_buf.pa);
- cq->rq.desc_buf.va = NULL;
- cq->rq.desc_buf.pa = 0;
- cq->rq.desc_buf.size = 0;
+ dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size,
+ ring->desc_buf.va, ring->desc_buf.pa);
+ ring->desc_buf.va = NULL;
+ ring->desc_buf.pa = 0;
+ ring->desc_buf.size = 0;
}
/**
@@ -280,54 +250,23 @@ unwind_alloc_sq_bufs:
return ICE_ERR_NO_MEMORY;
}
-/**
- * ice_free_rq_bufs - Free ARQ buffer info elements
- * @hw: pointer to the hardware structure
- * @cq: pointer to the specific Control queue
- */
-static void ice_free_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
-{
- int i;
-
- /* free descriptors */
- for (i = 0; i < cq->num_rq_entries; i++) {
- dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
- cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
- cq->rq.r.rq_bi[i].va = NULL;
- cq->rq.r.rq_bi[i].pa = 0;
- cq->rq.r.rq_bi[i].size = 0;
- }
-
- /* free the dma header */
- devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
-}
-
-/**
- * ice_free_sq_bufs - Free ATQ buffer info elements
- * @hw: pointer to the hardware structure
- * @cq: pointer to the specific Control queue
- */
-static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static enum ice_status
+ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
{
- int i;
+ /* Clear Head and Tail */
+ wr32(hw, ring->head, 0);
+ wr32(hw, ring->tail, 0);
- /* only unmap if the address is non-NULL */
- for (i = 0; i < cq->num_sq_entries; i++)
- if (cq->sq.r.sq_bi[i].pa) {
- dmam_free_coherent(ice_hw_to_dev(hw),
- cq->sq.r.sq_bi[i].size,
- cq->sq.r.sq_bi[i].va,
- cq->sq.r.sq_bi[i].pa);
- cq->sq.r.sq_bi[i].va = NULL;
- cq->sq.r.sq_bi[i].pa = 0;
- cq->sq.r.sq_bi[i].size = 0;
- }
+ /* set starting point */
+ wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
+ wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa));
+ wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa));
- /* free the buffer info list */
- devm_kfree(ice_hw_to_dev(hw), cq->sq.cmd_buf);
+ /* Check one register to verify that config was applied */
+ if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
+ return ICE_ERR_AQ_ERROR;
- /* free the dma header */
- devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
+ return 0;
}
/**
@@ -340,23 +279,7 @@ static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
static enum ice_status
ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- u32 reg = 0;
-
- /* Clear Head and Tail */
- wr32(hw, cq->sq.head, 0);
- wr32(hw, cq->sq.tail, 0);
-
- /* set starting point */
- wr32(hw, cq->sq.len, (cq->num_sq_entries | cq->sq.len_ena_mask));
- wr32(hw, cq->sq.bal, lower_32_bits(cq->sq.desc_buf.pa));
- wr32(hw, cq->sq.bah, upper_32_bits(cq->sq.desc_buf.pa));
-
- /* Check one register to verify that config was applied */
- reg = rd32(hw, cq->sq.bal);
- if (reg != lower_32_bits(cq->sq.desc_buf.pa))
- return ICE_ERR_AQ_ERROR;
-
- return 0;
+ return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
}
/**
@@ -369,25 +292,15 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
static enum ice_status
ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- u32 reg = 0;
-
- /* Clear Head and Tail */
- wr32(hw, cq->rq.head, 0);
- wr32(hw, cq->rq.tail, 0);
+ enum ice_status status;
- /* set starting point */
- wr32(hw, cq->rq.len, (cq->num_rq_entries | cq->rq.len_ena_mask));
- wr32(hw, cq->rq.bal, lower_32_bits(cq->rq.desc_buf.pa));
- wr32(hw, cq->rq.bah, upper_32_bits(cq->rq.desc_buf.pa));
+ status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
+ if (status)
+ return status;
/* Update tail in the HW to post pre-allocated buffers */
wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
- /* Check one register to verify that config was applied */
- reg = rd32(hw, cq->rq.bal);
- if (reg != lower_32_bits(cq->rq.desc_buf.pa))
- return ICE_ERR_AQ_ERROR;
-
return 0;
}
@@ -444,7 +357,7 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
goto init_ctrlq_exit;
init_ctrlq_free_rings:
- ice_free_ctrlq_sq_ring(hw, cq);
+ ice_free_cq_ring(hw, &cq->sq);
init_ctrlq_exit:
return ret_code;
@@ -503,12 +416,33 @@ static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
goto init_ctrlq_exit;
init_ctrlq_free_rings:
- ice_free_ctrlq_rq_ring(hw, cq);
+ ice_free_cq_ring(hw, &cq->rq);
init_ctrlq_exit:
return ret_code;
}
+#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
+do { \
+ int i; \
+ /* free descriptors */ \
+ for (i = 0; i < (qi)->num_##ring##_entries; i++) \
+ if ((qi)->ring.r.ring##_bi[i].pa) { \
+ dmam_free_coherent(ice_hw_to_dev(hw), \
+ (qi)->ring.r.ring##_bi[i].size,\
+ (qi)->ring.r.ring##_bi[i].va,\
+ (qi)->ring.r.ring##_bi[i].pa);\
+ (qi)->ring.r.ring##_bi[i].va = NULL; \
+ (qi)->ring.r.ring##_bi[i].pa = 0; \
+ (qi)->ring.r.ring##_bi[i].size = 0; \
+ } \
+ /* free the buffer info list */ \
+ if ((qi)->ring.cmd_buf) \
+ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
+ /* free dma head */ \
+ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
+} while (0)
+
/**
* ice_shutdown_sq - shutdown the Control ATQ
* @hw: pointer to the hardware structure
@@ -538,8 +472,8 @@ ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->sq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers and the ring itself */
- ice_free_sq_bufs(hw, cq);
- ice_free_ctrlq_sq_ring(hw, cq);
+ ICE_FREE_CQ_BUFS(hw, cq, sq);
+ ice_free_cq_ring(hw, &cq->sq);
shutdown_sq_out:
mutex_unlock(&cq->sq_lock);
@@ -606,8 +540,8 @@ ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->rq.count = 0;
/* free ring buffers and the ring itself */
- ice_free_rq_bufs(hw, cq);
- ice_free_ctrlq_rq_ring(hw, cq);
+ ICE_FREE_CQ_BUFS(hw, cq, rq);
+ ice_free_cq_ring(hw, &cq->rq);
shutdown_rq_out:
mutex_unlock(&cq->rq_lock);
@@ -657,7 +591,6 @@ init_ctrlq_free_rq:
* - cq->num_rq_entries
* - cq->rq_buf_size
* - cq->sq_buf_size
- *
*/
static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{
@@ -841,7 +774,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @buf_size: size of buffer for indirect commands (or 0 for direct commands)
* @cd: pointer to command details structure
*
- * This is the main send command routine for the ATQ. It runs the q,
+ * This is the main send command routine for the ATQ. It runs the queue,
* cleans the queue, etc.
*/
enum ice_status
@@ -1035,7 +968,7 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
* @pending: number of events that could be left to process
*
* This function cleans one Admin Receive Queue element and returns
- * the contents through e. It can also return how many events are
+ * the contents through e. It can also return how many events are
* left to process through 'pending'.
*/
enum ice_status
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 648acdb4c644..3b6e387f5440 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -62,7 +62,7 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = {
* The PF_STATs are appended to the netdev stats only when ethtool -S
* is queried on the base PF netdev.
*/
-static struct ice_stats ice_gstrings_pf_stats[] = {
+static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes),
ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes),
ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast),
@@ -104,7 +104,7 @@ static struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
};
-static u32 ice_regs_dump_list[] = {
+static const u32 ice_regs_dump_list[] = {
PFGEN_STATE,
PRTGEN_STATUS,
QRX_CTRL(0),
@@ -260,10 +260,10 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
* a private ethtool flag). This is due to the nature of the
* ethtool stats API.
*
- * User space programs such as ethtool must make 3 separate
+ * Userspace programs such as ethtool must make 3 separate
* ioctl requests, one for size, one for the strings, and
* finally one for the stats. Since these cross into
- * user space, changes to the number or size could result in
+ * userspace, changes to the number or size could result in
* undefined memory access or incorrect string<->value
* correlations for statistics.
*
@@ -1392,17 +1392,17 @@ static int ice_nway_reset(struct net_device *netdev)
{
/* restart autonegotiation */
struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_link_status *hw_link_info;
struct ice_vsi *vsi = np->vsi;
struct ice_port_info *pi;
enum ice_status status;
- bool link_up;
pi = vsi->port_info;
- hw_link_info = &pi->phy.link_info;
- link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
+ /* If VSI state is up, then restart autoneg with link up */
+ if (!test_bit(__ICE_DOWN, vsi->back->state))
+ status = ice_aq_set_link_restart_an(pi, true, NULL);
+ else
+ status = ice_aq_set_link_restart_an(pi, false, NULL);
- status = ice_aq_set_link_restart_an(pi, link_up, NULL);
if (status) {
netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
status, pi->hw->adminq.sq_last_status);
@@ -1441,7 +1441,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
/**
* ice_set_pauseparam - Set Flow Control parameter
* @netdev: network interface device structure
- * @pause: return tx/rx flow control status
+ * @pause: return Tx/Rx flow control status
*/
static int
ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
@@ -1543,7 +1543,7 @@ static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev)
}
/**
- * ice_get_rxfh_indir_size - get the rx flow hash indirection table size
+ * ice_get_rxfh_indir_size - get the Rx flow hash indirection table size
* @netdev: network interface device structure
*
* Returns the table size.
@@ -1556,7 +1556,7 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
}
/**
- * ice_get_rxfh - get the rx flow hash indirection table
+ * ice_get_rxfh - get the Rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
@@ -1603,7 +1603,7 @@ out:
}
/**
- * ice_set_rxfh - set the rx flow hash indirection table
+ * ice_set_rxfh - set the Rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 596b9fb1c510..5507928c8fbe 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -7,6 +7,9 @@
#define _ICE_HW_AUTOGEN_H_
#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
+#define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4))
+#define QTX_COMM_HEAD_HEAD_S 0
+#define QTX_COMM_HEAD_HEAD_M ICE_M(0x1FFF, 0)
#define PF_FW_ARQBAH 0x00080180
#define PF_FW_ARQBAL 0x00080080
#define PF_FW_ARQH 0x00080380
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 7d2a66739e3f..bb51dd7defb5 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -6,11 +6,11 @@
union ice_32byte_rx_desc {
struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
/* bit 0 of hdr_addr is DD bit */
- __le64 rsvd1;
- __le64 rsvd2;
+ __le64 rsvd1;
+ __le64 rsvd2;
} read;
struct {
struct {
@@ -105,11 +105,11 @@ enum ice_rx_ptype_payload_layer {
*/
union ice_32b_rx_flex_desc {
struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
- /* bit 0 of hdr_addr is DD bit */
- __le64 rsvd1;
- __le64 rsvd2;
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
} read;
struct {
/* Qword 0 */
@@ -256,6 +256,9 @@ enum ice_rx_flex_desc_status_error_0_bits {
#define ICE_RXQ_CTX_SIZE_DWORDS 8
#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
+#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22
+#define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5
+#define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800))
/* RLAN Rx queue context data
*
@@ -274,18 +277,18 @@ struct ice_rlan_ctx {
u16 dbuf; /* bigger than needed, see above for reason */
#define ICE_RLAN_CTX_HBUF_S 6
u16 hbuf; /* bigger than needed, see above for reason */
- u8 dtype;
- u8 dsize;
- u8 crcstrip;
- u8 l2tsel;
- u8 hsplit_0;
- u8 hsplit_1;
- u8 showiv;
+ u8 dtype;
+ u8 dsize;
+ u8 crcstrip;
+ u8 l2tsel;
+ u8 hsplit_0;
+ u8 hsplit_1;
+ u8 showiv;
u32 rxmax; /* bigger than needed, see above for reason */
- u8 tphrdesc_ena;
- u8 tphwdesc_ena;
- u8 tphdata_ena;
- u8 tphhead_ena;
+ u8 tphrdesc_ena;
+ u8 tphwdesc_ena;
+ u8 tphdata_ena;
+ u8 tphhead_ena;
u16 lrxqthresh; /* bigger than needed, see above for reason */
};
@@ -413,35 +416,35 @@ enum ice_tx_ctx_desc_cmd_bits {
struct ice_tlan_ctx {
#define ICE_TLAN_CTX_BASE_S 7
u64 base; /* base is defined in 128-byte units */
- u8 port_num;
+ u8 port_num;
u16 cgd_num; /* bigger than needed, see above for reason */
- u8 pf_num;
+ u8 pf_num;
u16 vmvf_num;
- u8 vmvf_type;
+ u8 vmvf_type;
#define ICE_TLAN_CTX_VMVF_TYPE_VF 0
#define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1
#define ICE_TLAN_CTX_VMVF_TYPE_PF 2
u16 src_vsi;
- u8 tsyn_ena;
- u8 alt_vlan;
+ u8 tsyn_ena;
+ u8 alt_vlan;
u16 cpuid; /* bigger than needed, see above for reason */
- u8 wb_mode;
- u8 tphrd_desc;
- u8 tphrd;
- u8 tphwr_desc;
+ u8 wb_mode;
+ u8 tphrd_desc;
+ u8 tphrd;
+ u8 tphwr_desc;
u16 cmpq_id;
u16 qnum_in_func;
- u8 itr_notification_mode;
- u8 adjust_prof_id;
+ u8 itr_notification_mode;
+ u8 adjust_prof_id;
u32 qlen; /* bigger than needed, see above for reason */
- u8 quanta_prof_idx;
- u8 tso_ena;
+ u8 quanta_prof_idx;
+ u8 tso_ena;
u16 tso_qnum;
- u8 legacy_int;
- u8 drop_ena;
- u8 cache_prof_idx;
- u8 pkt_shaper_prof_idx;
- u8 int_q_state; /* width not needed - internal do not write */
+ u8 legacy_int;
+ u8 drop_ena;
+ u8 cache_prof_idx;
+ u8 pkt_shaper_prof_idx;
+ u8 int_q_state; /* width not needed - internal do not write */
};
/* macro to make the table lines short */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 1041fa2a7767..29b1dcfd4331 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -20,7 +20,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
u16 pf_q;
int err;
- /* what is RX queue number in global space of 2K Rx queues */
+ /* what is Rx queue number in global space of 2K Rx queues */
pf_q = vsi->rxq_map[ring->q_index];
/* clear the context structure first */
@@ -174,15 +174,15 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
{
int i;
- for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) {
+ for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));
if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
break;
- usleep_range(10, 20);
+ usleep_range(20, 40);
}
- if (i >= ICE_Q_WAIT_RETRY_LIMIT)
+ if (i >= ICE_Q_WAIT_MAX_RETRY)
return -ETIMEDOUT;
return 0;
@@ -774,11 +774,13 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
*/
static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
- u16 offset = 0, qmap = 0, numq_tc;
- u16 pow = 0, max_rss = 0, qcount;
+ u16 offset = 0, qmap = 0, tx_count = 0;
u16 qcount_tx = vsi->alloc_txq;
u16 qcount_rx = vsi->alloc_rxq;
+ u16 tx_numq_tc, rx_numq_tc;
+ u16 pow = 0, max_rss = 0;
bool ena_tc0 = false;
+ u8 netdev_tc = 0;
int i;
/* at least TC0 should be enabled by default */
@@ -794,7 +796,12 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
vsi->tc_cfg.ena_tc |= 1;
}
- numq_tc = qcount_rx / vsi->tc_cfg.numtc;
+ rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc;
+ if (!rx_numq_tc)
+ rx_numq_tc = 1;
+ tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc;
+ if (!tx_numq_tc)
+ tx_numq_tc = 1;
/* TC mapping is a function of the number of Rx queues assigned to the
* VSI for each traffic class and the offset of these queues.
@@ -808,7 +815,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
* Setup number and offset of Rx queues for all TCs for the VSI
*/
- qcount = numq_tc;
+ qcount_rx = rx_numq_tc;
+
/* qcount will change if RSS is enabled */
if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
@@ -816,37 +824,41 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
max_rss = ICE_MAX_LG_RSS_QS;
else
max_rss = ICE_MAX_SMALL_RSS_QS;
- qcount = min_t(int, numq_tc, max_rss);
- qcount = min_t(int, qcount, vsi->rss_size);
+ qcount_rx = min_t(int, rx_numq_tc, max_rss);
+ qcount_rx = min_t(int, qcount_rx, vsi->rss_size);
}
}
/* find the (rounded up) power-of-2 of qcount */
- pow = order_base_2(qcount);
+ pow = order_base_2(qcount_rx);
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
/* TC is not enabled */
vsi->tc_cfg.tc_info[i].qoffset = 0;
- vsi->tc_cfg.tc_info[i].qcount = 1;
+ vsi->tc_cfg.tc_info[i].qcount_rx = 1;
+ vsi->tc_cfg.tc_info[i].qcount_tx = 1;
+ vsi->tc_cfg.tc_info[i].netdev_tc = 0;
ctxt->info.tc_mapping[i] = 0;
continue;
}
/* TC is enabled */
vsi->tc_cfg.tc_info[i].qoffset = offset;
- vsi->tc_cfg.tc_info[i].qcount = qcount;
+ vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
+ vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc;
+ vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
ICE_AQ_VSI_TC_Q_OFFSET_M) |
((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
ICE_AQ_VSI_TC_Q_NUM_M);
- offset += qcount;
+ offset += qcount_rx;
+ tx_count += tx_numq_tc;
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
}
-
- vsi->num_txq = qcount_tx;
vsi->num_rxq = offset;
+ vsi->num_txq = tx_count;
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
@@ -1000,7 +1012,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
* @vsi: the VSI being configured
* @v_idx: index of the vector in the VSI struct
*
- * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
*/
static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
{
@@ -1039,7 +1051,7 @@ out:
* ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
* @vsi: the VSI being configured
*
- * We allocate one q_vector per queue interrupt. If allocation fails we
+ * We allocate one q_vector per queue interrupt. If allocation fails we
* return -ENOMEM.
*/
static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
@@ -1188,7 +1200,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back;
int i;
- /* Allocate tx_rings */
+ /* Allocate Tx rings */
for (i = 0; i < vsi->alloc_txq; i++) {
struct ice_ring *ring;
@@ -1207,7 +1219,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
vsi->tx_rings[i] = ring;
}
- /* Allocate rx_rings */
+ /* Allocate Rx rings */
for (i = 0; i < vsi->alloc_rxq; i++) {
struct ice_ring *ring;
@@ -1611,55 +1623,62 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
struct ice_aqc_add_tx_qgrp *qg_buf;
struct ice_aqc_add_txqs_perq *txq;
struct ice_pf *pf = vsi->back;
+ u8 num_q_grps, q_idx = 0;
enum ice_status status;
u16 buf_len, i, pf_q;
int err = 0, tc = 0;
- u8 num_q_grps;
buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
if (!qg_buf)
return -ENOMEM;
- if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) {
- err = -EINVAL;
- goto err_cfg_txqs;
- }
qg_buf->num_txqs = 1;
num_q_grps = 1;
- /* set up and configure the Tx queues */
- ice_for_each_txq(vsi, i) {
- struct ice_tlan_ctx tlan_ctx = { 0 };
+ /* set up and configure the Tx queues for each enabled TC */
+ for (tc = 0; tc < ICE_MAX_TRAFFIC_CLASS; tc++) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
+ break;
- pf_q = vsi->txq_map[i];
- ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q);
- /* copy context contents into the qg_buf */
- qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
- ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
- ice_tlan_ctx_info);
+ for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
+ struct ice_tlan_ctx tlan_ctx = { 0 };
+
+ pf_q = vsi->txq_map[q_idx];
+ ice_setup_tx_ctx(vsi->tx_rings[q_idx], &tlan_ctx,
+ pf_q);
+ /* copy context contents into the qg_buf */
+ qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
+ ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
+ ice_tlan_ctx_info);
+
+ /* init queue specific tail reg. It is referred as
+ * transmit comm scheduler queue doorbell.
+ */
+ vsi->tx_rings[q_idx]->tail =
+ pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
+ status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
+ num_q_grps, qg_buf, buf_len,
+ NULL);
+ if (status) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to set LAN Tx queue context, error: %d\n",
+ status);
+ err = -ENODEV;
+ goto err_cfg_txqs;
+ }
- /* init queue specific tail reg. It is referred as transmit
- * comm scheduler queue doorbell.
- */
- vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
- status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
- num_q_grps, qg_buf, buf_len, NULL);
- if (status) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to set LAN Tx queue context, error: %d\n",
- status);
- err = -ENODEV;
- goto err_cfg_txqs;
- }
+ /* Add Tx Queue TEID into the VSI Tx ring from the
+ * response. This will complete configuring and
+ * enabling the queue.
+ */
+ txq = &qg_buf->txqs[0];
+ if (pf_q == le16_to_cpu(txq->txq_id))
+ vsi->tx_rings[q_idx]->txq_teid =
+ le32_to_cpu(txq->q_teid);
- /* Add Tx Queue TEID into the VSI Tx ring from the response
- * This will complete configuring and enabling the queue.
- */
- txq = &qg_buf->txqs[0];
- if (pf_q == le16_to_cpu(txq->txq_id))
- vsi->tx_rings[i]->txq_teid =
- le32_to_cpu(txq->q_teid);
+ q_idx++;
+ }
}
err_cfg_txqs:
devm_kfree(&pf->pdev->dev, qg_buf);
@@ -1908,7 +1927,8 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
ice_for_each_txq(vsi, i) {
u16 v_idx;
- if (!vsi->tx_rings || !vsi->tx_rings[i]) {
+ if (!vsi->tx_rings || !vsi->tx_rings[i] ||
+ !vsi->tx_rings[i]->q_vector) {
err = -EINVAL;
goto err_out;
}
@@ -2056,6 +2076,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
/* set RSS capabilities */
ice_vsi_set_rss_params(vsi);
+ /* set tc configuration */
+ ice_vsi_set_tc_cfg(vsi);
+
/* create the VSI */
ret = ice_vsi_init(vsi);
if (ret)
@@ -2113,17 +2136,13 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
pf->q_left_rx -= vsi->alloc_rxq;
break;
default:
- /* if VSI type is not recognized, clean up the resources and
- * exit
- */
+ /* clean up the resources and exit */
goto unroll_vsi_init;
}
- ice_vsi_set_tc_cfg(vsi);
-
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++)
- max_txqs[i] = vsi->num_txq;
+ max_txqs[i] = pf->num_lan_tx;
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
@@ -2314,7 +2333,7 @@ static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
int start = res->search_hint;
int end = start;
- if ((start + needed) > res->num_entries)
+ if ((start + needed) > res->num_entries)
return -ENOMEM;
id |= ICE_RES_VALID_BIT;
@@ -2491,6 +2510,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
ice_remove_vsi_fltr(&pf->hw, vsi->idx);
+ ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_delete(vsi);
ice_vsi_free_q_vectors(vsi);
ice_vsi_clear_rings(vsi);
@@ -2518,11 +2538,14 @@ int ice_vsi_release(struct ice_vsi *vsi)
int ice_vsi_rebuild(struct ice_vsi *vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_pf *pf;
int ret, i;
if (!vsi)
return -EINVAL;
+ pf = vsi->back;
+ ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_free_q_vectors(vsi);
ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
@@ -2532,6 +2555,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
ice_vsi_free_arrays(vsi, false);
ice_dev_onetime_setup(&vsi->back->hw);
ice_vsi_set_num_qs(vsi);
+ ice_vsi_set_tc_cfg(vsi);
/* Initialize VSI struct elements and create VSI in FW */
ret = ice_vsi_init(vsi);
@@ -2578,11 +2602,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
break;
}
- ice_vsi_set_tc_cfg(vsi);
-
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++)
- max_txqs[i] = vsi->num_txq;
+ max_txqs[i] = pf->num_lan_tx;
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 333312a1d595..8725569d11f0 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -349,6 +349,9 @@ ice_prepare_for_reset(struct ice_pf *pf)
/* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi(pf);
+ if (hw->port_info)
+ ice_sched_clear_port(hw->port_info);
+
ice_shutdown_all_ctrlq(hw);
set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
@@ -405,7 +408,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
* OICR interrupt. The OICR handler (ice_misc_intr) determines what type
* of reset is pending and sets bits in pf->state indicating the reset
- * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set
+ * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set
* prepare for pending reset if not already (for PF software-initiated
* global resets the software should already be prepared for it as
* indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated
@@ -1379,7 +1382,7 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
* @pf: board private structure
*
* This sets up the handler for MSIX 0, which is used to manage the
- * non-queue interrupts, e.g. AdminQ and errors. This is not used
+ * non-queue interrupts, e.g. AdminQ and errors. This is not used
* when in MSI or Legacy interrupt mode.
*/
static int ice_req_irq_msix_misc(struct ice_pf *pf)
@@ -1783,7 +1786,7 @@ static void ice_determine_q_usage(struct ice_pf *pf)
pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus());
- /* only 1 rx queue unless RSS is enabled */
+ /* only 1 Rx queue unless RSS is enabled */
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
pf->num_lan_rx = 1;
else
@@ -2091,8 +2094,7 @@ static int ice_probe(struct pci_dev *pdev,
ice_determine_q_usage(pf);
- pf->num_alloc_vsi = min_t(u16, ICE_MAX_VSI_ALLOC,
- hw->func_caps.guaranteed_num_vsi);
+ pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
if (!pf->num_alloc_vsi) {
err = -EIO;
goto err_init_pf_unroll;
@@ -2544,7 +2546,6 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
if (err)
return err;
}
-
err = ice_vsi_cfg_txqs(vsi);
if (!err)
err = ice_vsi_cfg_rxqs(vsi);
@@ -2563,8 +2564,12 @@ static void ice_napi_enable_all(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
- napi_enable(&vsi->q_vectors[q_idx]->napi);
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_enable(&q_vector->napi);
+ }
}
/**
@@ -2931,8 +2936,12 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
- napi_disable(&vsi->q_vectors[q_idx]->napi);
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_disable(&q_vector->napi);
+ }
}
/**
@@ -3138,8 +3147,9 @@ static void ice_vsi_release_all(struct ice_pf *pf)
/**
* ice_dis_vsi - pause a VSI
* @vsi: the VSI being paused
+ * @locked: is the rtnl_lock already held
*/
-static void ice_dis_vsi(struct ice_vsi *vsi)
+static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
{
if (test_bit(__ICE_DOWN, vsi->state))
return;
@@ -3148,9 +3158,13 @@ static void ice_dis_vsi(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_PF && vsi->netdev) {
if (netif_running(vsi->netdev)) {
- rtnl_lock();
- vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
- rtnl_unlock();
+ if (!locked) {
+ rtnl_lock();
+ vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ rtnl_unlock();
+ } else {
+ vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ }
} else {
ice_vsi_close(vsi);
}
@@ -3189,7 +3203,7 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf)
ice_for_each_vsi(pf, v)
if (pf->vsi[v])
- ice_dis_vsi(pf->vsi[v]);
+ ice_dis_vsi(pf->vsi[v], false);
}
/**
@@ -3618,6 +3632,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
* @dev: the netdev being configured
* @nlh: RTNL message
* @flags: bridge setlink flags
+ * @extack: netlink extended ack
*
* Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
* hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
@@ -3626,7 +3641,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
*/
static int
ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
- u16 __always_unused flags)
+ u16 __always_unused flags, struct netlink_ext_ack *extack)
{
struct ice_netdev_priv *np = netdev_priv(dev);
struct ice_pf *pf = np->vsi->back;
@@ -3668,7 +3683,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
*/
status = ice_update_sw_rule_bridge_mode(hw);
if (status) {
- netdev_err(dev, "update SW_RULE for bridge mode failed, = %d err %d aq_err %d\n",
+ netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n",
mode, status, hw->adminq.sq_last_status);
/* revert hw->evb_veb */
hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
@@ -3691,40 +3706,36 @@ static void ice_tx_timeout(struct net_device *netdev)
struct ice_ring *tx_ring = NULL;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- u32 head, val = 0, i;
int hung_queue = -1;
+ u32 i;
pf->tx_timeout_count++;
- /* find the stopped queue the same way the stack does */
+ /* find the stopped queue the same way dev_watchdog() does */
for (i = 0; i < netdev->num_tx_queues; i++) {
- struct netdev_queue *q;
unsigned long trans_start;
+ struct netdev_queue *q;
q = netdev_get_tx_queue(netdev, i);
trans_start = q->trans_start;
if (netif_xmit_stopped(q) &&
time_after(jiffies,
- (trans_start + netdev->watchdog_timeo))) {
+ trans_start + netdev->watchdog_timeo)) {
hung_queue = i;
break;
}
}
- if (i == netdev->num_tx_queues) {
+ if (i == netdev->num_tx_queues)
netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
- } else {
+ else
/* now that we have an index, find the tx_ring struct */
- for (i = 0; i < vsi->num_txq; i++) {
- if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
- if (hung_queue ==
- vsi->tx_rings[i]->q_index) {
+ for (i = 0; i < vsi->num_txq; i++)
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
+ if (hung_queue == vsi->tx_rings[i]->q_index) {
tx_ring = vsi->tx_rings[i];
break;
}
- }
- }
- }
/* Reset recovery level if enough time has elapsed after last timeout.
* Also ensure no new reset action happens before next timeout period.
@@ -3736,17 +3747,20 @@ static void ice_tx_timeout(struct net_device *netdev)
return;
if (tx_ring) {
- head = tx_ring->next_to_clean;
+ struct ice_hw *hw = &pf->hw;
+ u32 head, val = 0;
+
+ head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) &
+ QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
/* Read interrupt register */
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- val = rd32(&pf->hw,
+ val = rd32(hw,
GLINT_DYN_CTL(tx_ring->q_vector->v_idx +
- tx_ring->vsi->hw_base_vector));
+ tx_ring->vsi->hw_base_vector));
- netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
+ netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
- head, tx_ring->next_to_use,
- readl(tx_ring->tail), val);
+ head, tx_ring->next_to_use, val);
}
pf->tx_timeout_last_recovery = jiffies;
@@ -3780,7 +3794,7 @@ static void ice_tx_timeout(struct net_device *netdev)
* @netdev: network interface device structure
*
* The open entry point is called when a network interface is made
- * active by the system (IFF_UP). At this point all resources needed
+ * active by the system (IFF_UP). At this point all resources needed
* for transmit and receive operations are allocated, the interrupt
* handler is registered with the OS, the netdev watchdog is enabled,
* and the stack is notified that the interface is ready.
@@ -3813,7 +3827,7 @@ static int ice_open(struct net_device *netdev)
* @netdev: network interface device structure
*
* The stop entry point is called when an interface is de-activated by the OS,
- * and the netdevice enters the DOWN state. The hardware is still under the
+ * and the netdevice enters the DOWN state. The hardware is still under the
* driver's control, but the netdev interface is disabled.
*
* Returns success only - not allowed to fail
@@ -3842,14 +3856,14 @@ ice_features_check(struct sk_buff *skb,
size_t len;
/* No point in doing any of this if neither checksum nor GSO are
- * being requested for this frame. We can rule out both by just
+ * being requested for this frame. We can rule out both by just
* checking for CHECKSUM_PARTIAL
*/
if (skb->ip_summed != CHECKSUM_PARTIAL)
return features;
/* We cannot support GSO if the MSS is going to be less than
- * 64 bytes. If it is then we need to drop support for GSO.
+ * 64 bytes. If it is then we need to drop support for GSO.
*/
if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
features &= ~NETIF_F_GSO_MASK;
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 7cc8aa18a22b..a1681853df2e 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -630,7 +630,7 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
*
* Cleanup scheduling elements from SW DB
*/
-static void ice_sched_clear_port(struct ice_port_info *pi)
+void ice_sched_clear_port(struct ice_port_info *pi)
{
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return;
@@ -894,8 +894,7 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
* This function removes the leaf node that was created by the FW
* during initialization
*/
-static void
-ice_rm_dflt_leaf_node(struct ice_port_info *pi)
+static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
{
struct ice_sched_node *node;
@@ -923,8 +922,7 @@ ice_rm_dflt_leaf_node(struct ice_port_info *pi)
* This function frees all the nodes except root and TC that were created by
* the FW during initialization
*/
-static void
-ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
+static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
{
struct ice_sched_node *node;
@@ -1339,7 +1337,7 @@ ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi,
* @num_nodes: pointer to num nodes array
*
* This function calculates the number of supported nodes needed to add this
- * VSI into tx tree including the VSI, parent and intermediate nodes in below
+ * VSI into Tx tree including the VSI, parent and intermediate nodes in below
* layers
*/
static void
@@ -1376,13 +1374,13 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
}
/**
- * ice_sched_add_vsi_support_nodes - add VSI supported nodes into tx tree
+ * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc_node: pointer to TC node
* @num_nodes: pointer to num nodes array
*
- * This function adds the VSI supported nodes into tx tree including the
+ * This function adds the VSI supported nodes into Tx tree including the
* VSI, its parent and intermediate nodes in below layers
*/
static enum ice_status
@@ -1527,7 +1525,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
}
/**
- * ice_sched_cfg_vsi - configure the new/exisiting VSI
+ * ice_sched_cfg_vsi - configure the new/existing VSI
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc: TC number
@@ -1605,3 +1603,109 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
return status;
}
+
+/**
+ * ice_sched_rm_agg_vsi_entry - remove agg related VSI info entry
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function removes single aggregator VSI info entry from
+ * aggregator list.
+ */
+static void
+ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
+{
+ struct ice_sched_agg_info *agg_info;
+ struct ice_sched_agg_info *atmp;
+
+ list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) {
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+ struct ice_sched_agg_vsi_info *vtmp;
+
+ list_for_each_entry_safe(agg_vsi_info, vtmp,
+ &agg_info->agg_vsi_list, list_entry)
+ if (agg_vsi_info->vsi_handle == vsi_handle) {
+ list_del(&agg_vsi_info->list_entry);
+ devm_kfree(ice_hw_to_dev(pi->hw),
+ agg_vsi_info);
+ return;
+ }
+ }
+}
+
+/**
+ * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @owner: LAN or RDMA
+ *
+ * This function removes the VSI and its LAN or RDMA children nodes from the
+ * scheduler tree.
+ */
+static enum ice_status
+ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
+{
+ enum ice_status status = ICE_ERR_PARAM;
+ struct ice_vsi_ctx *vsi_ctx;
+ u8 i, j = 0;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return status;
+ mutex_lock(&pi->sched_lock);
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+ if (!vsi_ctx)
+ goto exit_sched_rm_vsi_cfg;
+
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ struct ice_sched_node *vsi_node, *tc_node;
+
+ tc_node = ice_sched_get_tc_node(pi, i);
+ if (!tc_node)
+ continue;
+
+ vsi_node = ice_sched_get_vsi_node(pi->hw, tc_node, vsi_handle);
+ if (!vsi_node)
+ continue;
+
+ while (j < vsi_node->num_children) {
+ if (vsi_node->children[j]->owner == owner) {
+ ice_free_sched_node(pi, vsi_node->children[j]);
+
+ /* reset the counter again since the num
+ * children will be updated after node removal
+ */
+ j = 0;
+ } else {
+ j++;
+ }
+ }
+ /* remove the VSI if it has no children */
+ if (!vsi_node->num_children) {
+ ice_free_sched_node(pi, vsi_node);
+ vsi_ctx->sched.vsi_node[i] = NULL;
+
+ /* clean up agg related vsi info if any */
+ ice_sched_rm_agg_vsi_info(pi, vsi_handle);
+ }
+ if (owner == ICE_SCHED_NODE_OWNER_LAN)
+ vsi_ctx->sched.max_lanq[i] = 0;
+ }
+ status = 0;
+
+exit_sched_rm_vsi_cfg:
+ mutex_unlock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function clears the VSI and its LAN children nodes from scheduler tree
+ * for all TCs.
+ */
+enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
+{
+ return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 5dc9cfa04c58..da5b4c166da8 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -12,6 +12,7 @@
struct ice_sched_agg_vsi_info {
struct list_head list_entry;
DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
+ u16 vsi_handle;
};
struct ice_sched_agg_info {
@@ -25,6 +26,7 @@ struct ice_sched_agg_info {
/* FW AQ command calls */
enum ice_status ice_sched_init_port(struct ice_port_info *pi);
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
+void ice_sched_clear_port(struct ice_port_info *pi);
void ice_sched_cleanup_all(struct ice_hw *hw);
struct ice_sched_node *
ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);
@@ -39,4 +41,5 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_status
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
+enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 027eba4e13f8..533b989a23e1 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -46,7 +46,7 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
* @link_speed: variable containing the link_speed to be converted
*
* Convert link speed supported by HW to link speed supported by virtchnl.
- * If adv_link_support is true, then return link speed in Mbps. Else return
+ * If adv_link_support is true, then return link speed in Mbps. Else return
* link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
* needs to cast back to an enum virtchnl_link_speed in the case where
* adv_link_support is false, but when adv_link_support is true the caller can
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 40c9c6558956..2e5693107fa4 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -92,8 +92,7 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
* Allocate memory for the entire recipe table and initialize the structures/
* entries corresponding to basic recipes.
*/
-enum ice_status
-ice_init_def_sw_recp(struct ice_hw *hw)
+enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
{
struct ice_sw_recipe *recps;
u8 i;
@@ -130,7 +129,7 @@ ice_init_def_sw_recp(struct ice_hw *hw)
*
* NOTE: *req_desc is both an input/output parameter.
* The caller of this function first calls this function with *request_desc set
- * to 0. If the response from f/w has *req_desc set to 0, all the switch
+ * to 0. If the response from f/w has *req_desc set to 0, all the switch
* configuration information has been returned; if non-zero (meaning not all
* the information was returned), the caller should call this function again
* with *req_desc set to the previous value returned by f/w to get the
@@ -629,25 +628,36 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
/**
* ice_fill_sw_info - Helper function to populate lb_en and lan_en
* @hw: pointer to the hardware structure
- * @f_info: filter info structure to fill/update
+ * @fi: filter info structure to fill/update
*
* This helper function populates the lb_en and lan_en elements of the provided
* ice_fltr_info struct using the switch's type and characteristics of the
* switch rule being configured.
*/
-static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *f_info)
+static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
{
- f_info->lb_en = false;
- f_info->lan_en = false;
- if ((f_info->flag & ICE_FLTR_TX) &&
- (f_info->fltr_act == ICE_FWD_TO_VSI ||
- f_info->fltr_act == ICE_FWD_TO_VSI_LIST ||
- f_info->fltr_act == ICE_FWD_TO_Q ||
- f_info->fltr_act == ICE_FWD_TO_QGRP)) {
- f_info->lb_en = true;
- if (!(hw->evb_veb && f_info->lkup_type == ICE_SW_LKUP_MAC &&
- is_unicast_ether_addr(f_info->l_data.mac.mac_addr)))
- f_info->lan_en = true;
+ fi->lb_en = false;
+ fi->lan_en = false;
+ if ((fi->flag & ICE_FLTR_TX) &&
+ (fi->fltr_act == ICE_FWD_TO_VSI ||
+ fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
+ fi->fltr_act == ICE_FWD_TO_Q ||
+ fi->fltr_act == ICE_FWD_TO_QGRP)) {
+ fi->lb_en = true;
+ /* Do not set lan_en to TRUE if
+ * 1. The switch is a VEB AND
+ * 2
+ * 2.1 The lookup is MAC with unicast addr for MAC, OR
+ * 2.2 The lookup is MAC_VLAN with unicast addr for MAC
+ *
+ * In all other cases, the LAN enable has to be set to true.
+ */
+ if (!(hw->evb_veb &&
+ ((fi->lkup_type == ICE_SW_LKUP_MAC &&
+ is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
+ (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
+ is_unicast_ether_addr(fi->l_data.mac_vlan.mac_addr)))))
+ fi->lan_en = true;
}
}
@@ -817,7 +827,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
/* Create two back-to-back switch rules and submit them to the HW using
* one memory buffer:
* 1. Large Action
- * 2. Look up tx rx
+ * 2. Look up Tx Rx
*/
lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
@@ -861,7 +871,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
- /* call the fill switch rule to fill the lookup tx rx structure */
+ /* call the fill switch rule to fill the lookup Tx Rx structure */
ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
ice_aqc_opc_update_sw_rules);
@@ -1158,8 +1168,8 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
* Call AQ command to add or update previously created VSI list with new VSI.
*
* Helper function to do book keeping associated with adding filter information
- * The algorithm to do the booking keeping is described below :
- * When a VSI needs to subscribe to a given filter( MAC/VLAN/Ethtype etc.)
+ * The algorithm to do the book keeping is described below :
+ * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
* if only one VSI has been added till now
* Allocate a new VSI list and add two VSIs
* to this list using switch rule command
@@ -1237,6 +1247,9 @@ ice_add_update_vsi_list(struct ice_hw *hw,
u16 vsi_handle = new_fltr->vsi_handle;
enum ice_adminq_opc opcode;
+ if (!m_entry->vsi_list_info)
+ return ICE_ERR_CFG;
+
/* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
return 0;
@@ -1853,7 +1866,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
/* Update the previous switch rule to a new VSI list which
- * includes current VSI thats requested
+ * includes current VSI that is requested
*/
status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
if (status)
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index fe5bbabbb41e..49fc38094185 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -219,7 +219,7 @@ static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring,
/**
* ice_setup_tx_ring - Allocate the Tx descriptors
- * @tx_ring: the tx ring to set up
+ * @tx_ring: the Tx ring to set up
*
* Return 0 on success, negative on error
*/
@@ -324,7 +324,7 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
/**
* ice_setup_rx_ring - Allocate the Rx descriptors
- * @rx_ring: the rx ring to set up
+ * @rx_ring: the Rx ring to set up
*
* Return 0 on success, negative on error
*/
@@ -377,7 +377,7 @@ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
rx_ring->next_to_alloc = val;
/* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
+ * know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
@@ -586,7 +586,7 @@ static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
/**
* ice_reuse_rx_page - page flip buffer and store it back on the ring
- * @rx_ring: rx descriptor ring to store buffers on
+ * @rx_ring: Rx descriptor ring to store buffers on
* @old_buf: donor buffer to have page reused
*
* Synchronizes page for reuse by the adapter
@@ -609,7 +609,7 @@ static void ice_reuse_rx_page(struct ice_ring *rx_ring,
/**
* ice_fetch_rx_buf - Allocate skb and populate it
- * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_ring: Rx descriptor ring to transact packets on
* @rx_desc: descriptor containing info written by hardware
*
* This function allocates an skb on the fly, and populates it with the page
@@ -686,7 +686,7 @@ static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring,
* ice_pull_tail - ice specific version of skb_pull_tail
* @skb: pointer to current skb being adjusted
*
- * This function is an ice specific version of __pskb_pull_tail. The
+ * This function is an ice specific version of __pskb_pull_tail. The
* main difference between this version and the original function is that
* this function can make several assumptions about the state of things
* that allow for significant optimizations versus the standard function.
@@ -768,7 +768,7 @@ static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc,
* @rx_desc: Rx descriptor for current buffer
* @skb: Current socket buffer containing buffer in progress
*
- * This function updates next to clean. If the buffer is an EOP buffer
+ * This function updates next to clean. If the buffer is an EOP buffer
* this function exits returning false, otherwise it will place the
* sk_buff in the next buffer to be chained and return true indicating
* that this is in fact a non-EOP buffer.
@@ -904,7 +904,7 @@ checksum_fail:
/**
* ice_process_skb_fields - Populate skb header fields from Rx descriptor
- * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_ring: Rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
* @ptype: the packet type decoded by hardware
@@ -927,7 +927,7 @@ static void ice_process_skb_fields(struct ice_ring *rx_ring,
/**
* ice_receive_skb - Send a completed packet up the stack
- * @rx_ring: rx ring in play
+ * @rx_ring: Rx ring in play
* @skb: packet to send up
* @vlan_tag: vlan tag for packet
*
@@ -946,11 +946,11 @@ static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb,
/**
* ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
- * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_ring: Rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
*
* This function provides a "bounce buffer" approach to Rx interrupt
- * processing. The advantage to this is that on systems that have
+ * processing. The advantage to this is that on systems that have
* expensive overhead for IOMMU access this provides a means of avoiding
* it by maintaining the mapping of the page to the system.
*
@@ -1103,11 +1103,14 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
- /* Work is done so exit the polling mode and re-enable the interrupt */
- napi_complete_done(napi, work_done);
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector);
- return 0;
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+ ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector);
+
+ return min(work_done, budget - 1);
}
/* helper function for building cmd/type/offset */
@@ -1122,7 +1125,7 @@ build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
}
/**
- * __ice_maybe_stop_tx - 2nd level check for tx stop conditions
+ * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
@@ -1145,7 +1148,7 @@ static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
}
/**
- * ice_maybe_stop_tx - 1st level check for tx stop conditions
+ * ice_maybe_stop_tx - 1st level check for Tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
@@ -1155,6 +1158,7 @@ static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
{
if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
return 0;
+
return __ice_maybe_stop_tx(tx_ring, size);
}
@@ -1552,7 +1556,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
* Finally, we add one to round up. Because 256 isn't an exact multiple of
* 3, we'll underestimate near each multiple of 12K. This is actually more
* accurate as we have 4K - 1 of wiggle room that we can fit into the last
- * segment. For our purposes this is accurate out to 1M which is orders of
+ * segment. For our purposes this is accurate out to 1M which is orders of
* magnitude greater than our largest possible GSO size.
*
* This would then be implemented as:
@@ -1568,7 +1572,7 @@ static unsigned int ice_txd_use_count(unsigned int size)
}
/**
- * ice_xmit_desc_count - calculate number of tx descriptors needed
+ * ice_xmit_desc_count - calculate number of Tx descriptors needed
* @skb: send buffer
*
* Returns number of data descriptors needed for this skb.
@@ -1620,7 +1624,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
nr_frags -= ICE_MAX_BUF_TXD - 2;
frag = &skb_shinfo(skb)->frags[0];
- /* Initialize size to the negative value of gso_size minus 1. We
+ /* Initialize size to the negative value of gso_size minus 1. We
* use this as the worst case scenerio in which the frag ahead
* of us only provides one byte which is why we are limited to 6
* descriptors for a single transmit as the header and previous
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index f4dbc81c1988..0ea428104215 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -124,6 +124,8 @@ struct ice_phy_info {
/* Common HW capabilities for SW use */
struct ice_hw_common_caps {
+ u32 valid_functions;
+
/* TX/RX queues */
u16 num_rxq; /* Number/Total RX queues */
u16 rxq_first_id; /* First queue ID for RX queues */
@@ -150,7 +152,7 @@ struct ice_hw_func_caps {
struct ice_hw_common_caps common_cap;
u32 num_allocd_vfs; /* Number of allocated VFs */
u32 vf_base_id; /* Logical ID of the first VF */
- u32 guaranteed_num_vsi;
+ u32 guar_num_vsi;
};
/* Device wide capabilities */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index e71065f9d391..05ff4f910649 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -156,8 +156,6 @@ static void ice_free_vf_res(struct ice_vf *vf)
clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
}
-/***********************enable_vf routines*****************************/
-
/**
* ice_dis_vf_mappings
* @vf: pointer to the VF structure
@@ -215,6 +213,15 @@ void ice_free_vfs(struct ice_pf *pf)
while (test_and_set_bit(__ICE_VF_DIS, pf->state))
usleep_range(1000, 2000);
+ /* Disable IOV before freeing resources. This lets any VF drivers
+ * running in the host get themselves cleaned up before we yank
+ * the carpet out from underneath their feet.
+ */
+ if (!pci_vfs_assigned(pf->pdev))
+ pci_disable_sriov(pf->pdev);
+ else
+ dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
+
/* Avoid wait time by stopping all VFs at the same time */
for (i = 0; i < pf->num_alloc_vfs; i++) {
if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states))
@@ -228,15 +235,6 @@ void ice_free_vfs(struct ice_pf *pf)
clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
}
- /* Disable IOV before freeing resources. This lets any VF drivers
- * running in the host get themselves cleaned up before we yank
- * the carpet out from underneath their feet.
- */
- if (!pci_vfs_assigned(pf->pdev))
- pci_disable_sriov(pf->pdev);
- else
- dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
-
tmp = pf->num_alloc_vfs;
pf->num_vf_qps = 0;
pf->num_alloc_vfs = 0;
@@ -454,7 +452,7 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
/* Clear this bit after VF initialization since we shouldn't reclaim
* and reassign interrupts for synchronous or asynchronous VFR events.
- * We don't want to reconfigure interrupts since AVF driver doesn't
+ * We dont want to reconfigure interrupts since AVF driver doesn't
* expect vector assignment to be changed unless there is a request for
* more vectors.
*/
@@ -1105,7 +1103,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
* ice_process_vflr_event - Free VF resources via IRQ calls
* @pf: pointer to the PF structure
*
- * called from the VLFR IRQ handler to
+ * called from the VFLR IRQ handler to
* free up VF resources and state variables
*/
void ice_process_vflr_event(struct ice_pf *pf)
@@ -1764,7 +1762,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
/* copy Tx queue info from VF into VSI */
vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
vsi->tx_rings[i]->count = qpi->txq.ring_len;
- /* copy Rx queue info from VF into vsi */
+ /* copy Rx queue info from VF into VSI */
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) {
@@ -1830,7 +1828,7 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf)
* @msg: pointer to the msg buffer
* @set: true if mac filters are being set, false otherwise
*
- * add guest mac address filter
+ * add guest MAC address filter
*/
static int
ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
@@ -1968,9 +1966,9 @@ static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
* @msg: pointer to the msg buffer
*
* VFs get a default number of queues but can use this message to request a
- * different number. If the request is successful, PF will reset the VF and
+ * different number. If the request is successful, PF will reset the VF and
* return 0. If unsuccessful, PF will send message informing VF of number of
- * available queue pairs via virtchnl message response to VF.
+ * available queue pairs via virtchnl message response to vf.
*/
static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
{
@@ -1991,7 +1989,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
if (req_queues <= 0) {
dev_err(&pf->pdev->dev,
- "VF %d tried to request %d queues. Ignoring.\n",
+ "VF %d tried to request %d queues. Ignoring.\n",
vf->vf_id, req_queues);
} else if (req_queues > ICE_MAX_QS_PER_VF) {
dev_err(&pf->pdev->dev,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 10131e0180f9..01470a8ee03a 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -70,7 +70,7 @@ struct ice_vf {
u8 spoofchk;
u16 num_mac;
u16 num_vlan;
- u8 num_req_qs; /* num of queue pairs requested by VF */
+ u8 num_req_qs; /* num of queue pairs requested by VF */
};
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 8a28f3388f69..01fcfc6f3415 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -334,6 +334,7 @@
#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
#define I210_RXPBSIZE_MASK 0x0000003F
+#define I210_RXPBSIZE_PB_30KB 0x0000001E
#define I210_RXPBSIZE_PB_32KB 0x00000020
#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
#define I210_TXPBSIZE_MASK 0xC0FFFFFF
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index ca54e268d157..fe1592ae8769 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -515,7 +515,7 @@ struct igb_adapter {
/* OS defined structs */
struct pci_dev *pdev;
- spinlock_t stats64_lock;
+ struct mutex stats64_lock;
struct rtnl_link_stats64 stats64;
/* structs defined in e1000_hw.h */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 5acf3b743876..7426060b678f 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2113,7 +2113,7 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
+ if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER))
return -EOPNOTSUPP;
if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
@@ -2295,7 +2295,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
int i, j;
char *p;
- spin_lock(&adapter->stats64_lock);
+ mutex_lock(&adapter->stats64_lock);
igb_update_stats(adapter);
for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
@@ -2338,7 +2338,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
i += IGB_RX_QUEUE_STATS_LEN;
}
- spin_unlock(&adapter->stats64_lock);
+ mutex_unlock(&adapter->stats64_lock);
}
static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 5df88ad8ac81..87bdf1604ae2 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1850,13 +1850,12 @@ static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
* configuration' in respect to these parameters.
*/
- netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d \
- idleslope %d sendslope %d hiCredit %d \
- locredit %d\n",
- (ring->cbs_enable) ? "enabled" : "disabled",
- (ring->launchtime_enable) ? "enabled" : "disabled", queue,
- ring->idleslope, ring->sendslope, ring->hicredit,
- ring->locredit);
+ netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
+ ring->cbs_enable ? "enabled" : "disabled",
+ ring->launchtime_enable ? "enabled" : "disabled",
+ queue,
+ ring->idleslope, ring->sendslope,
+ ring->hicredit, ring->locredit);
}
static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
@@ -1935,7 +1934,7 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter)
val = rd32(E1000_RXPBS);
val &= ~I210_RXPBSIZE_MASK;
- val |= I210_RXPBSIZE_PB_32KB;
+ val |= I210_RXPBSIZE_PB_30KB;
wr32(E1000_RXPBS, val);
/* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ
@@ -2204,9 +2203,9 @@ void igb_down(struct igb_adapter *adapter)
del_timer_sync(&adapter->phy_info_timer);
/* record the stats before reset*/
- spin_lock(&adapter->stats64_lock);
+ mutex_lock(&adapter->stats64_lock);
igb_update_stats(adapter);
- spin_unlock(&adapter->stats64_lock);
+ mutex_unlock(&adapter->stats64_lock);
adapter->link_speed = 0;
adapter->link_duplex = 0;
@@ -3841,7 +3840,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
spin_lock_init(&adapter->nfc_lock);
- spin_lock_init(&adapter->stats64_lock);
+ mutex_init(&adapter->stats64_lock);
#ifdef CONFIG_PCI_IOV
switch (hw->mac.type) {
case e1000_82576:
@@ -5407,9 +5406,9 @@ no_wait:
}
}
- spin_lock(&adapter->stats64_lock);
+ mutex_lock(&adapter->stats64_lock);
igb_update_stats(adapter);
- spin_unlock(&adapter->stats64_lock);
+ mutex_unlock(&adapter->stats64_lock);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igb_ring *tx_ring = adapter->tx_ring[i];
@@ -6019,6 +6018,8 @@ static int igb_tx_map(struct igb_ring *tx_ring,
/* set the timestamp */
first->time_stamp = jiffies;
+ skb_tx_timestamp(skb);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered
* memory model archs, such as IA-64).
@@ -6147,8 +6148,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
else if (!tso)
igb_tx_csum(tx_ring, first);
- skb_tx_timestamp(skb);
-
if (igb_tx_map(tx_ring, first, hdr_len))
goto cleanup_tx_tstamp;
@@ -6236,10 +6235,10 @@ static void igb_get_stats64(struct net_device *netdev,
{
struct igb_adapter *adapter = netdev_priv(netdev);
- spin_lock(&adapter->stats64_lock);
+ mutex_lock(&adapter->stats64_lock);
igb_update_stats(adapter);
memcpy(stats, &adapter->stats64, sizeof(*stats));
- spin_unlock(&adapter->stats64_lock);
+ mutex_unlock(&adapter->stats64_lock);
}
/**
@@ -7753,11 +7752,13 @@ static int igb_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
- /* If not enough Rx work done, exit the polling mode */
- napi_complete_done(napi, work_done);
- igb_ring_irq_enable(q_vector);
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ igb_ring_irq_enable(q_vector);
- return 0;
+ return min(work_done, budget - 1);
}
/**
@@ -8770,9 +8771,11 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
rtnl_unlock();
#ifdef CONFIG_PM
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
+ if (!runtime) {
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+ }
#endif
status = rd32(E1000_STATUS);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 2b95dc9c7a6a..fd3071f55bd3 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -277,17 +277,53 @@ static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
-static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
+static int igb_ptp_gettimex_82576(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
+ struct e1000_hw *hw = &igb->hw;
unsigned long flags;
+ u32 lo, hi;
u64 ns;
spin_lock_irqsave(&igb->tmreg_lock, flags);
- ns = timecounter_read(&igb->tc);
+ ptp_read_system_prets(sts);
+ lo = rd32(E1000_SYSTIML);
+ ptp_read_system_postts(sts);
+ hi = rd32(E1000_SYSTIMH);
+
+ ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo);
+
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int igb_ptp_gettimex_82580(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ struct e1000_hw *hw = &igb->hw;
+ unsigned long flags;
+ u32 lo, hi;
+ u64 ns;
+
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+ ptp_read_system_prets(sts);
+ rd32(E1000_SYSTIMR);
+ ptp_read_system_postts(sts);
+ lo = rd32(E1000_SYSTIML);
+ hi = rd32(E1000_SYSTIMH);
+
+ ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo);
spin_unlock_irqrestore(&igb->tmreg_lock, flags);
@@ -296,16 +332,22 @@ static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
return 0;
}
-static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
+static int igb_ptp_gettimex_i210(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
+ struct e1000_hw *hw = &igb->hw;
unsigned long flags;
spin_lock_irqsave(&igb->tmreg_lock, flags);
- igb_ptp_read_i210(igb, ts);
+ ptp_read_system_prets(sts);
+ rd32(E1000_SYSTIMR);
+ ptp_read_system_postts(sts);
+ ts->tv_nsec = rd32(E1000_SYSTIML);
+ ts->tv_sec = rd32(E1000_SYSTIMH);
spin_unlock_irqrestore(&igb->tmreg_lock, flags);
@@ -658,9 +700,12 @@ static void igb_ptp_overflow_check(struct work_struct *work)
struct igb_adapter *igb =
container_of(work, struct igb_adapter, ptp_overflow_work.work);
struct timespec64 ts;
+ u64 ns;
- igb->ptp_caps.gettime64(&igb->ptp_caps, &ts);
+ /* Update the timecounter */
+ ns = timecounter_read(&igb->tc);
+ ts = ns_to_timespec64(ns);
pr_debug("igb overflow check at %lld.%09lu\n",
(long long) ts.tv_sec, ts.tv_nsec);
@@ -1126,7 +1171,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
- adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576;
+ adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82576;
adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
adapter->ptp_caps.enable = igb_ptp_feature_enable;
adapter->cc.read = igb_ptp_read_82576;
@@ -1145,7 +1190,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
- adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576;
+ adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82580;
adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
adapter->ptp_caps.enable = igb_ptp_feature_enable;
adapter->cc.read = igb_ptp_read_82580;
@@ -1173,7 +1218,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
- adapter->ptp_caps.gettime64 = igb_ptp_gettime_i210;
+ adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_i210;
adapter->ptp_caps.settime64 = igb_ptp_settime_i210;
adapter->ptp_caps.enable = igb_ptp_feature_enable_i210;
adapter->ptp_caps.verify = igb_ptp_verify_pin;
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c
index 163e5838f7c2..a3cd7ac48d4b 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.c
+++ b/drivers/net/ethernet/intel/igbvf/mbx.c
@@ -241,7 +241,7 @@ static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
s32 err;
u16 i;
- WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock));
+ lockdep_assert_held(&hw->mbx_lock);
/* lock the mailbox to prevent pf/vf race condition */
err = e1000_obtain_mbx_lock_vf(hw);
@@ -279,7 +279,7 @@ static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
s32 err;
u16 i;
- WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock));
+ lockdep_assert_held(&hw->mbx_lock);
/* lock the mailbox to prevent pf/vf race condition */
err = e1000_obtain_mbx_lock_vf(hw);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 820d49eb41ab..4eab83faec62 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1186,10 +1186,13 @@ static int igbvf_poll(struct napi_struct *napi, int budget)
igbvf_clean_rx_irq(adapter, &work_done, budget);
- /* If not enough Rx work done, exit the polling mode */
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
+ if (work_done == budget)
+ return budget;
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done))) {
if (adapter->requested_itr & 3)
igbvf_set_itr(adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index cdf18a5d9e08..b1039dd3dd13 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -5,23 +5,12 @@
#define _IGC_H_
#include <linux/kobject.h>
-
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
-
#include <linux/ethtool.h>
-
#include <linux/sctp.h>
-#define IGC_ERR(args...) pr_err("igc: " args)
-
-#define PFX "igc: "
-
-#include <linux/timecounter.h>
-#include <linux/net_tstamp.h>
-#include <linux/ptp_clock_kernel.h>
-
#include "igc_hw.h"
/* main */
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index 832da609d9a7..df40af759542 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -237,7 +237,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
s32 ret_val = 0;
- u32 ctrl_ext;
if (hw->phy.media_type != igc_media_type_copper) {
phy->type = igc_phy_none;
@@ -247,8 +246,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500;
phy->reset_delay_us = 100;
- ctrl_ext = rd32(IGC_CTRL_EXT);
-
/* set lan id */
hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >>
IGC_STATUS_FUNC_SHIFT;
@@ -287,8 +284,6 @@ out:
static s32 igc_get_invariants_base(struct igc_hw *hw)
{
struct igc_mac_info *mac = &hw->mac;
- u32 link_mode = 0;
- u32 ctrl_ext = 0;
s32 ret_val = 0;
switch (hw->device_id) {
@@ -302,9 +297,6 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
hw->phy.media_type = igc_media_type_copper;
- ctrl_ext = rd32(IGC_CTRL_EXT);
- link_mode = ctrl_ext & IGC_CTRL_EXT_LINK_MODE_MASK;
-
/* mac initialization and operations */
ret_val = igc_init_mac_params_base(hw);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 9d85707e8a81..f20183037fb2 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -865,6 +865,8 @@ static int igc_tx_map(struct igc_ring *tx_ring,
/* set the timestamp */
first->time_stamp = jiffies;
+ skb_tx_timestamp(skb);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered
* memory model archs, such as IA-64).
@@ -959,8 +961,6 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
first->bytecount = skb->len;
first->gso_segs = 1;
- skb_tx_timestamp(skb);
-
/* record initial flags and protocol */
first->tx_flags = tx_flags;
first->protocol = protocol;
@@ -1108,7 +1108,7 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
/* update pointers within the skb to store the data */
skb_reserve(skb, IGC_SKB_PAD);
- __skb_put(skb, size);
+ __skb_put(skb, size);
/* update buffer offset */
#if (PAGE_SIZE < 8192)
@@ -1160,9 +1160,9 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
(va + headlen) - page_address(rx_buffer->page),
size, truesize);
#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
+ rx_buffer->page_offset ^= truesize;
#else
- rx_buffer->page_offset += truesize;
+ rx_buffer->page_offset += truesize;
#endif
} else {
rx_buffer->pagecnt_bias++;
@@ -1668,8 +1668,8 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
tx_buffer->next_to_watch,
jiffies,
tx_buffer->next_to_watch->wb.status);
- netif_stop_subqueue(tx_ring->netdev,
- tx_ring->queue_index);
+ netif_stop_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
/* we are about to reset, no point in enabling stuff */
return true;
@@ -1700,20 +1700,6 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
}
/**
- * igc_ioctl - I/O control method
- * @netdev: network interface device structure
- * @ifreq: frequency
- * @cmd: command
- */
-static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/**
* igc_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
*/
@@ -2866,11 +2852,13 @@ static int igc_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
- /* If not enough Rx work done, exit the polling mode */
- napi_complete_done(napi, work_done);
- igc_ring_irq_enable(q_vector);
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ igc_ring_irq_enable(q_vector);
- return 0;
+ return min(work_done, budget - 1);
}
/**
@@ -3358,7 +3346,7 @@ static int __igc_open(struct net_device *netdev, bool resuming)
goto err_req_irq;
/* Notify the stack of the actual queue counts. */
- netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
+ err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
if (err)
goto err_set_queues;
@@ -3445,7 +3433,6 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_mac_address = igc_set_mac,
.ndo_change_mtu = igc_change_mtu,
.ndo_get_stats = igc_get_stats,
- .ndo_do_ioctl = igc_ioctl,
};
/* PCIe configuration access */
@@ -3532,26 +3519,23 @@ static int igc_probe(struct pci_dev *pdev,
struct net_device *netdev;
struct igc_hw *hw;
const struct igc_info *ei = igc_info_tbl[ent->driver_data];
- int err, pci_using_dac;
+ int err;
err = pci_enable_device_mem(pdev);
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
err = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
} else {
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
err = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(32));
if (err) {
- IGC_ERR("Wrong DMA configuration, aborting\n");
+ dev_err(&pdev->dev, "igc: Wrong DMA config\n");
goto err_dma;
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 143bdd5ee2a0..08d85e336bd4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -12,6 +12,7 @@
#include <linux/aer.h>
#include <linux/if_vlan.h>
#include <linux/jiffies.h>
+#include <linux/phy.h>
#include <linux/timecounter.h>
#include <linux/net_tstamp.h>
@@ -561,6 +562,7 @@ struct ixgbe_adapter {
struct net_device *netdev;
struct bpf_prog *xdp_prog;
struct pci_dev *pdev;
+ struct mii_bus *mii_bus;
unsigned long state;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 732b1e6ecc43..acba067cc15a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2206,7 +2206,8 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+ if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE |
+ WAKE_FILTER))
return -EOPNOTSUPP;
if (ixgbe_wol_exclusion(adapter, wol))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index fd1b0546fd67..ff85ce5791a3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -4,6 +4,7 @@
#include "ixgbe.h"
#include <net/xfrm.h>
#include <crypto/aead.h>
+#include <linux/if_bridge.h>
#define IXGBE_IPSEC_KEY_BITS 160
static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
@@ -693,7 +694,8 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
} else {
struct tx_sa tsa;
- if (adapter->num_vfs)
+ if (adapter->num_vfs &&
+ adapter->bridge_mode != BRIDGE_MODE_VEPA)
return -EOPNOTSUPP;
/* find the first unused index */
@@ -1063,11 +1065,13 @@ int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
struct ixgbe_ipsec *ipsec = adapter->ipsec;
struct xfrm_state *xs;
+ struct sec_path *sp;
struct tx_sa *tsa;
- if (unlikely(!first->skb->sp->len)) {
+ sp = skb_sec_path(first->skb);
+ if (unlikely(!sp->len)) {
netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
- __func__, first->skb->sp->len);
+ __func__, sp->len);
return 0;
}
@@ -1157,6 +1161,7 @@ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
struct xfrm_state *xs = NULL;
struct ipv6hdr *ip6 = NULL;
struct iphdr *ip4 = NULL;
+ struct sec_path *sp;
void *daddr;
__be32 spi;
u8 *c_hdr;
@@ -1196,12 +1201,12 @@ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
if (unlikely(!xs))
return;
- skb->sp = secpath_dup(skb->sp);
- if (unlikely(!skb->sp))
+ sp = secpath_set(skb);
+ if (unlikely(!sp))
return;
- skb->sp->xvec[skb->sp->len++] = xs;
- skb->sp->olen++;
+ sp->xvec[sp->len++] = xs;
+ sp->olen++;
xo = xfrm_offload(skb);
xo->flags = CRYPTO_DONE;
xo->status = CRYPTO_SUCCESS;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 113b38e0defb..daff8183534b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -39,6 +39,7 @@
#include "ixgbe.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb_82599.h"
+#include "ixgbe_phy.h"
#include "ixgbe_sriov.h"
#include "ixgbe_model.h"
#include "ixgbe_txrx_common.h"
@@ -6077,9 +6078,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* Disable Rx */
ixgbe_disable_rx(adapter);
- /* synchronize_sched() needed for pending XDP buffers to drain */
+ /* synchronize_rcu() needed for pending XDP buffers to drain */
if (adapter->xdp_ring[0])
- synchronize_sched();
+ synchronize_rcu();
ixgbe_irq_disable(adapter);
@@ -8269,6 +8270,8 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
/* set the timestamp */
first->time_stamp = jiffies;
+ skb_tx_timestamp(skb);
+
/*
* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered
@@ -8646,8 +8649,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
}
}
- skb_tx_timestamp(skb);
-
#ifdef CONFIG_PCI_IOV
/*
* Use the l2switch_enable flag - would be false if the DMA
@@ -8695,7 +8696,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
#endif /* IXGBE_FCOE */
#ifdef CONFIG_IXGBE_IPSEC
- if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
+ if (secpath_exists(skb) &&
+ !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
goto out_drop;
#endif
tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx);
@@ -8789,6 +8791,15 @@ ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
u16 value;
int rc;
+ if (adapter->mii_bus) {
+ int regnum = addr;
+
+ if (devad != MDIO_DEVAD_NONE)
+ regnum |= (devad << 16) | MII_ADDR_C45;
+
+ return mdiobus_read(adapter->mii_bus, prtad, regnum);
+ }
+
if (prtad != hw->phy.mdio.prtad)
return -EINVAL;
rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
@@ -8803,6 +8814,15 @@ static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ if (adapter->mii_bus) {
+ int regnum = addr;
+
+ if (devad != MDIO_DEVAD_NONE)
+ regnum |= (devad << 16) | MII_ADDR_C45;
+
+ return mdiobus_write(adapter->mii_bus, prtad, regnum, value);
+ }
+
if (prtad != hw->phy.mdio.prtad)
return -EINVAL;
return hw->phy.ops.write_reg(hw, addr, devad, value);
@@ -9979,7 +9999,8 @@ static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter,
}
static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh, u16 flags)
+ struct nlmsghdr *nlh, u16 flags,
+ struct netlink_ext_ack *extack)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct nlattr *attr, *br_spec;
@@ -10191,7 +10212,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
*/
if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) {
#ifdef CONFIG_IXGBE_IPSEC
- if (!skb->sp)
+ if (!secpath_exists(skb))
#endif
features &= ~NETIF_F_TSO;
}
@@ -10476,7 +10497,7 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
ixgbe_disable_rxr_hw(adapter, rx_ring);
if (xdp_ring)
- synchronize_sched();
+ synchronize_rcu();
/* Rx/Tx/XDP Tx share the same napi context. */
napi_disable(&rx_ring->q_vector->napi);
@@ -10517,7 +10538,8 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
ixgbe_configure_rx_ring(adapter, rx_ring);
clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
- clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
+ if (xdp_ring)
+ clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
}
/**
@@ -11119,6 +11141,8 @@ skip_sriov:
IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
true);
+ ixgbe_mii_bus_init(hw);
+
return 0;
err_register:
@@ -11169,6 +11193,8 @@ static void ixgbe_remove(struct pci_dev *pdev)
set_bit(__IXGBE_REMOVING, &adapter->state);
cancel_work_sync(&adapter->service_task);
+ if (adapter->mii_bus)
+ mdiobus_unregister(adapter->mii_bus);
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 919a7af84b42..cc4907f9ff02 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -3,6 +3,7 @@
#include <linux/pci.h>
#include <linux/delay.h>
+#include <linux/iopoll.h>
#include <linux/sched.h>
#include "ixgbe.h"
@@ -658,6 +659,304 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
return status;
}
+#define IXGBE_HW_READ_REG(addr) IXGBE_READ_REG(hw, addr)
+
+/**
+ * ixgbe_msca_cmd - Write the command register and poll for completion/timeout
+ * @hw: pointer to hardware structure
+ * @cmd: command register value to write
+ **/
+static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
+{
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, cmd);
+
+ return readx_poll_timeout(IXGBE_HW_READ_REG, IXGBE_MSCA, cmd,
+ !(cmd & IXGBE_MSCA_MDI_COMMAND), 10,
+ 10 * IXGBE_MDIO_COMMAND_TIMEOUT);
+}
+
+/**
+ * ixgbe_mii_bus_read_generic - Read a clause 22/45 register with gssr flags
+ * @hw: pointer to hardware structure
+ * @addr: address
+ * @regnum: register number
+ * @gssr: semaphore flags to acquire
+ **/
+static s32 ixgbe_mii_bus_read_generic(struct ixgbe_hw *hw, int addr,
+ int regnum, u32 gssr)
+{
+ u32 hwaddr, cmd;
+ s32 data;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
+ return -EBUSY;
+
+ hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
+ if (regnum & MII_ADDR_C45) {
+ hwaddr |= regnum & GENMASK(21, 0);
+ cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
+ } else {
+ hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
+ cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL |
+ IXGBE_MSCA_READ_AUTOINC | IXGBE_MSCA_MDI_COMMAND;
+ }
+
+ data = ixgbe_msca_cmd(hw, cmd);
+ if (data < 0)
+ goto mii_bus_read_done;
+
+ /* For a clause 45 access the address cycle just completed, we still
+ * need to do the read command, otherwise just get the data
+ */
+ if (!(regnum & MII_ADDR_C45))
+ goto do_mii_bus_read;
+
+ cmd = hwaddr | IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND;
+ data = ixgbe_msca_cmd(hw, cmd);
+ if (data < 0)
+ goto mii_bus_read_done;
+
+do_mii_bus_read:
+ data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+ data = (data >> IXGBE_MSRWD_READ_DATA_SHIFT) & GENMASK(16, 0);
+
+mii_bus_read_done:
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ return data;
+}
+
+/**
+ * ixgbe_mii_bus_write_generic - Write a clause 22/45 register with gssr flags
+ * @hw: pointer to hardware structure
+ * @addr: address
+ * @regnum: register number
+ * @val: value to write
+ * @gssr: semaphore flags to acquire
+ **/
+static s32 ixgbe_mii_bus_write_generic(struct ixgbe_hw *hw, int addr,
+ int regnum, u16 val, u32 gssr)
+{
+ u32 hwaddr, cmd;
+ s32 err;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
+ return -EBUSY;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)val);
+
+ hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
+ if (regnum & MII_ADDR_C45) {
+ hwaddr |= regnum & GENMASK(21, 0);
+ cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
+ } else {
+ hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
+ cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
+ IXGBE_MSCA_MDI_COMMAND;
+ }
+
+ /* For clause 45 this is an address cycle, for clause 22 this is the
+ * entire transaction
+ */
+ err = ixgbe_msca_cmd(hw, cmd);
+ if (err < 0 || !(regnum & MII_ADDR_C45))
+ goto mii_bus_write_done;
+
+ cmd = hwaddr | IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND;
+ err = ixgbe_msca_cmd(hw, cmd);
+
+mii_bus_write_done:
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ return err;
+}
+
+/**
+ * ixgbe_mii_bus_read - Read a clause 22/45 register
+ * @hw: pointer to hardware structure
+ * @addr: address
+ * @regnum: register number
+ **/
+static s32 ixgbe_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct ixgbe_adapter *adapter = bus->priv;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr);
+}
+
+/**
+ * ixgbe_mii_bus_write - Write a clause 22/45 register
+ * @hw: pointer to hardware structure
+ * @addr: address
+ * @regnum: register number
+ * @val: value to write
+ **/
+static s32 ixgbe_mii_bus_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct ixgbe_adapter *adapter = bus->priv;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr);
+}
+
+/**
+ * ixgbe_x550em_a_mii_bus_read - Read a clause 22/45 register on x550em_a
+ * @hw: pointer to hardware structure
+ * @addr: address
+ * @regnum: register number
+ **/
+static s32 ixgbe_x550em_a_mii_bus_read(struct mii_bus *bus, int addr,
+ int regnum)
+{
+ struct ixgbe_adapter *adapter = bus->priv;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
+ return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr);
+}
+
+/**
+ * ixgbe_x550em_a_mii_bus_write - Write a clause 22/45 register on x550em_a
+ * @hw: pointer to hardware structure
+ * @addr: address
+ * @regnum: register number
+ * @val: value to write
+ **/
+static s32 ixgbe_x550em_a_mii_bus_write(struct mii_bus *bus, int addr,
+ int regnum, u16 val)
+{
+ struct ixgbe_adapter *adapter = bus->priv;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
+ return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr);
+}
+
+/**
+ * ixgbe_get_first_secondary_devfn - get first device downstream of root port
+ * @devfn: PCI_DEVFN of root port on domain 0, bus 0
+ *
+ * Returns pci_dev pointer to PCI_DEVFN(0, 0) on subordinate side of root
+ * on domain 0, bus 0, devfn = 'devfn'
+ **/
+static struct pci_dev *ixgbe_get_first_secondary_devfn(unsigned int devfn)
+{
+ struct pci_dev *rp_pdev;
+ int bus;
+
+ rp_pdev = pci_get_domain_bus_and_slot(0, 0, devfn);
+ if (rp_pdev && rp_pdev->subordinate) {
+ bus = rp_pdev->subordinate->number;
+ return pci_get_domain_bus_and_slot(0, bus, 0);
+ }
+
+ return NULL;
+}
+
+/**
+ * ixgbe_x550em_a_has_mii - is this the first ixgbe x550em_a PCI function?
+ * @hw: pointer to hardware structure
+ *
+ * Returns true if hw points to lowest numbered PCI B:D.F x550_em_a device in
+ * the SoC. There are up to 4 MACs sharing a single MDIO bus on the x550em_a,
+ * but we only want to register one MDIO bus.
+ **/
+static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ struct pci_dev *pdev = adapter->pdev;
+ struct pci_dev *func0_pdev;
+
+ /* For the C3000 family of SoCs (x550em_a) the internal ixgbe devices
+ * are always downstream of root ports @ 0000:00:16.0 & 0000:00:17.0
+ * It's not valid for function 0 to be disabled and function 1 is up,
+ * so the lowest numbered ixgbe dev will be device 0 function 0 on one
+ * of those two root ports
+ */
+ func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x16, 0));
+ if (func0_pdev) {
+ if (func0_pdev == pdev)
+ return true;
+ else
+ return false;
+ }
+ func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x17, 0));
+ if (func0_pdev == pdev)
+ return true;
+
+ return false;
+}
+
+/**
+ * ixgbe_mii_bus_init - mii_bus structure setup
+ * @hw: pointer to hardware structure
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * ixgbe_mii_bus_init initializes a mii_bus structure in adapter
+ **/
+s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = &adapter->netdev->dev;
+ struct mii_bus *bus;
+
+ adapter->mii_bus = devm_mdiobus_alloc(dev);
+ if (!adapter->mii_bus)
+ return -ENOMEM;
+
+ bus = adapter->mii_bus;
+
+ switch (hw->device_id) {
+ /* C3000 SoCs */
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ case IXGBE_DEV_ID_X550EM_A_SGMII:
+ case IXGBE_DEV_ID_X550EM_A_SGMII_L:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ if (!ixgbe_x550em_a_has_mii(hw))
+ goto ixgbe_no_mii_bus;
+ bus->read = &ixgbe_x550em_a_mii_bus_read;
+ bus->write = &ixgbe_x550em_a_mii_bus_write;
+ break;
+ default:
+ bus->read = &ixgbe_mii_bus_read;
+ bus->write = &ixgbe_mii_bus_write;
+ break;
+ }
+
+ /* Use the position of the device in the PCI hierarchy as the id */
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mdio-%s", ixgbe_driver_name,
+ pci_name(pdev));
+
+ bus->name = "ixgbe-mdio";
+ bus->priv = adapter;
+ bus->parent = dev;
+ bus->phy_mask = GENMASK(31, 0);
+
+ /* Support clause 22/45 natively. ixgbe_probe() sets MDIO_EMULATE_C22
+ * unfortunately that causes some clause 22 frames to be sent with
+ * clause 45 addressing. We don't want that.
+ */
+ hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22;
+
+ return mdiobus_register(bus);
+
+ixgbe_no_mii_bus:
+ devm_mdiobus_free(dev, bus);
+ adapter->mii_bus = NULL;
+ return -ENODEV;
+}
+
/**
* ixgbe_setup_phy_link_generic - Set and restart autoneg
* @hw: pointer to hardware structure
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 64e44e01c973..214b01085718 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -120,6 +120,8 @@
/* SFP+ SFF-8472 Compliance code */
#define IXGBE_SFF_SFF_8472_UNSUP 0x00
+s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw);
+
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index b3e0d8bb5cbd..d81a50dc9535 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -443,22 +443,52 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
}
/**
- * ixgbe_ptp_gettime
+ * ixgbe_ptp_gettimex
* @ptp: the ptp clock structure
- * @ts: timespec structure to hold the current time value
+ * @ts: timespec to hold the PHC timestamp
+ * @sts: structure to hold the system time before and after reading the PHC
*
* read the timecounter and return the correct value on ns,
* after converting it into a struct timespec.
*/
-static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int ixgbe_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
+ struct ixgbe_hw *hw = &adapter->hw;
unsigned long flags;
- u64 ns;
+ u64 ns, stamp;
spin_lock_irqsave(&adapter->tmreg_lock, flags);
- ns = timecounter_read(&adapter->hw_tc);
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ /* Upper 32 bits represent billions of cycles, lower 32 bits
+ * represent cycles. However, we use timespec64_to_ns for the
+ * correct math even though the units haven't been corrected
+ * yet.
+ */
+ ptp_read_system_prets(sts);
+ IXGBE_READ_REG(hw, IXGBE_SYSTIMR);
+ ptp_read_system_postts(sts);
+ ts->tv_nsec = IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+ ts->tv_sec = IXGBE_READ_REG(hw, IXGBE_SYSTIMH);
+ stamp = timespec64_to_ns(ts);
+ break;
+ default:
+ ptp_read_system_prets(sts);
+ stamp = IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+ ptp_read_system_postts(sts);
+ stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32;
+ break;
+ }
+
+ ns = timecounter_cyc2time(&adapter->hw_tc, stamp);
+
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
*ts = ns_to_timespec64(ns);
@@ -567,10 +597,14 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
{
bool timeout = time_is_before_jiffies(adapter->last_overflow_check +
IXGBE_OVERFLOW_PERIOD);
- struct timespec64 ts;
+ unsigned long flags;
if (timeout) {
- ixgbe_ptp_gettime(&adapter->ptp_caps, &ts);
+ /* Update the timecounter */
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+ timecounter_read(&adapter->hw_tc);
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
adapter->last_overflow_check = jiffies;
}
}
@@ -1216,7 +1250,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.pps = 1;
adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
- adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime;
+ adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_x540;
@@ -1233,7 +1267,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
- adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime;
+ adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
break;
@@ -1249,7 +1283,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_X550;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
- adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime;
+ adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
adapter->ptp_setup_sdp = NULL;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index e8a3231be0bf..5170dd9d8705 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -450,12 +450,14 @@ int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
struct ixgbevf_ipsec *ipsec = adapter->ipsec;
struct xfrm_state *xs;
+ struct sec_path *sp;
struct tx_sa *tsa;
u16 sa_idx;
- if (unlikely(!first->skb->sp->len)) {
+ sp = skb_sec_path(first->skb);
+ if (unlikely(!sp->len)) {
netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
- __func__, first->skb->sp->len);
+ __func__, sp->len);
return 0;
}
@@ -546,6 +548,7 @@ void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring,
struct xfrm_state *xs = NULL;
struct ipv6hdr *ip6 = NULL;
struct iphdr *ip4 = NULL;
+ struct sec_path *sp;
void *daddr;
__be32 spi;
u8 *c_hdr;
@@ -585,12 +588,12 @@ void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring,
if (unlikely(!xs))
return;
- skb->sp = secpath_dup(skb->sp);
- if (unlikely(!skb->sp))
+ sp = secpath_set(skb);
+ if (unlikely(!sp))
return;
- skb->sp->xvec[skb->sp->len++] = xs;
- skb->sp->olen++;
+ sp->xvec[sp->len++] = xs;
+ sp->olen++;
xo = xfrm_offload(skb);
xo->flags = CRYPTO_DONE;
xo->status = CRYPTO_SUCCESS;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 5e47ede7e832..49e23afa05a2 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1293,16 +1293,20 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
return budget;
- /* all work done, exit the polling mode */
- napi_complete_done(napi, work_done);
- if (adapter->rx_itr_setting == 1)
- ixgbevf_set_itr(q_vector);
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
- !test_bit(__IXGBEVF_REMOVING, &adapter->state))
- ixgbevf_irq_enable_queues(adapter,
- BIT(q_vector->v_idx));
- return 0;
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done))) {
+ if (adapter->rx_itr_setting == 1)
+ ixgbevf_set_itr(q_vector);
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
+ !test_bit(__IXGBEVF_REMOVING, &adapter->state))
+ ixgbevf_irq_enable_queues(adapter,
+ BIT(q_vector->v_idx));
+ }
+
+ return min(work_done, budget - 1);
}
/**
@@ -4016,6 +4020,8 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
/* set the timestamp */
first->time_stamp = jiffies;
+ skb_tx_timestamp(skb);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered
* memory model archs, such as IA-64).
@@ -4151,7 +4157,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
first->protocol = vlan_get_protocol(skb);
#ifdef CONFIG_IXGBEVF_IPSEC
- if (skb->sp && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
+ if (secpath_exists(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
goto out_drop;
#endif
tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 1e9bcbdc6a90..2f427271a793 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1499,23 +1499,16 @@ mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp,
struct ethtool_link_ksettings *cmd)
{
struct net_device *dev = mp->dev;
- u32 supported, advertising;
phy_ethtool_ksettings_get(dev->phydev, cmd);
/*
* The MAC does not support 1000baseT_Half.
*/
- ethtool_convert_link_mode_to_legacy_u32(&supported,
- cmd->link_modes.supported);
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising);
- supported &= ~SUPPORTED_1000baseT_Half;
- advertising &= ~ADVERTISED_1000baseT_Half;
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ cmd->link_modes.supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ cmd->link_modes.advertising);
return 0;
}
@@ -3031,10 +3024,12 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
phy->autoneg = AUTONEG_ENABLE;
phy->speed = 0;
phy->duplex = 0;
- phy->advertising = phy->supported | ADVERTISED_Autoneg;
+ linkmode_copy(phy->advertising, phy->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phy->advertising);
} else {
phy->autoneg = AUTONEG_DISABLE;
- phy->advertising = 0;
+ linkmode_zero(phy->advertising);
phy->speed = speed;
phy->duplex = duplex;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 61b23497f836..9d4568eb2297 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4248,8 +4248,7 @@ static int mvneta_ethtool_set_eee(struct net_device *dev,
/* The Armada 37x documents do not give limits for this other than
* it being an 8-bit register. */
- if (eee->tx_lpi_enabled &&
- (eee->tx_lpi_timer < 0 || eee->tx_lpi_timer > 255))
+ if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
return -EINVAL;
lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 12db256c8c9f..742f0c1f60df 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -31,6 +31,7 @@
* @resp: command response
* @link_info: link related information
* @event_cb: callback for linkchange events
+ * @event_cb_lock: lock for serializing callback with unregister
* @cmd_pend: flag set before new command is started
* flag cleared after command response is received
* @cgx: parent cgx port
@@ -43,6 +44,7 @@ struct lmac {
u64 resp;
struct cgx_link_user_info link_info;
struct cgx_event_cb event_cb;
+ spinlock_t event_cb_lock;
bool cmd_pend;
struct cgx *cgx;
u8 lmac_id;
@@ -55,6 +57,8 @@ struct cgx {
u8 cgx_id;
u8 lmac_count;
struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
+ struct work_struct cgx_cmd_work;
+ struct workqueue_struct *cgx_cmd_workq;
struct list_head cgx_list;
};
@@ -66,6 +70,9 @@ static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX];
/* Convert firmware lmac type encoding to string */
static char *cgx_lmactype_string[LMAC_MODE_MAX];
+/* CGX PHY management internal APIs */
+static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
+
/* Supported devices */
static const struct pci_device_id cgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
@@ -92,17 +99,21 @@ static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
return cgx->lmac_idmap[lmac_id];
}
-int cgx_get_cgx_cnt(void)
+int cgx_get_cgxcnt_max(void)
{
struct cgx *cgx_dev;
- int count = 0;
+ int idmax = -ENODEV;
list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
- count++;
+ if (cgx_dev->cgx_id > idmax)
+ idmax = cgx_dev->cgx_id;
+
+ if (idmax < 0)
+ return 0;
- return count;
+ return idmax + 1;
}
-EXPORT_SYMBOL(cgx_get_cgx_cnt);
+EXPORT_SYMBOL(cgx_get_cgxcnt_max);
int cgx_get_lmac_cnt(void *cgxd)
{
@@ -445,6 +456,9 @@ static inline void cgx_link_change_handler(u64 lstat,
lmac->link_info = event.link_uinfo;
linfo = &lmac->link_info;
+ /* Ensure callback doesn't get unregistered until we finish it */
+ spin_lock(&lmac->event_cb_lock);
+
if (!lmac->event_cb.notify_link_chg) {
dev_dbg(dev, "cgx port %d:%d Link change handler null",
cgx->cgx_id, lmac->lmac_id);
@@ -455,11 +469,13 @@ static inline void cgx_link_change_handler(u64 lstat,
dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
cgx->cgx_id, lmac->lmac_id,
linfo->link_up ? "UP" : "DOWN", linfo->speed);
- return;
+ goto err;
}
if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
dev_err(dev, "event notification failure\n");
+err:
+ spin_unlock(&lmac->event_cb_lock);
}
static inline bool cgx_cmdresp_is_linkevent(u64 event)
@@ -482,6 +498,60 @@ static inline bool cgx_event_is_linkevent(u64 event)
return false;
}
+static inline int cgx_fwi_get_mkex_prfl_sz(u64 *prfl_sz,
+ struct cgx *cgx)
+{
+ u64 req = 0;
+ u64 resp;
+ int err;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_SIZE, req);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
+ if (!err)
+ *prfl_sz = FIELD_GET(RESP_MKEX_PRFL_SIZE, resp);
+
+ return err;
+}
+
+static inline int cgx_fwi_get_mkex_prfl_addr(u64 *prfl_addr,
+ struct cgx *cgx)
+{
+ u64 req = 0;
+ u64 resp;
+ int err;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_ADDR, req);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
+ if (!err)
+ *prfl_addr = FIELD_GET(RESP_MKEX_PRFL_ADDR, resp);
+
+ return err;
+}
+
+int cgx_get_mkex_prfl_info(u64 *addr, u64 *size)
+{
+ struct cgx *cgx_dev;
+ int err;
+
+ if (!addr || !size)
+ return -EINVAL;
+
+ cgx_dev = list_first_entry(&cgx_list, struct cgx, cgx_list);
+ if (!cgx_dev)
+ return -ENXIO;
+
+ err = cgx_fwi_get_mkex_prfl_sz(size, cgx_dev);
+ if (err)
+ return -EIO;
+
+ err = cgx_fwi_get_mkex_prfl_addr(addr, cgx_dev);
+ if (err)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL(cgx_get_mkex_prfl_info);
+
static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
{
struct lmac *lmac = data;
@@ -548,6 +618,38 @@ int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
}
EXPORT_SYMBOL(cgx_lmac_evh_register);
+int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
+{
+ struct lmac *lmac;
+ unsigned long flags;
+ struct cgx *cgx = cgxd;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ spin_lock_irqsave(&lmac->event_cb_lock, flags);
+ lmac->event_cb.notify_link_chg = NULL;
+ lmac->event_cb.data = NULL;
+ spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_evh_unregister);
+
+static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
+{
+ u64 req = 0;
+ u64 resp;
+
+ if (enable)
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
+ else
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
+
+ return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+
static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
{
u64 req = 0;
@@ -581,6 +683,34 @@ static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
return 0;
}
+static void cgx_lmac_linkup_work(struct work_struct *work)
+{
+ struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
+ struct device *dev = &cgx->pdev->dev;
+ int i, err;
+
+ /* Do Link up for all the lmacs */
+ for (i = 0; i < cgx->lmac_count; i++) {
+ err = cgx_fwi_link_change(cgx, i, true);
+ if (err)
+ dev_info(dev, "cgx port %d:%d Link up command failed\n",
+ cgx->cgx_id, i);
+ }
+}
+
+int cgx_lmac_linkup_start(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx)
+ return -ENODEV;
+
+ queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
+
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_linkup_start);
+
static int cgx_lmac_init(struct cgx *cgx)
{
struct lmac *lmac;
@@ -602,6 +732,7 @@ static int cgx_lmac_init(struct cgx *cgx)
lmac->cgx = cgx;
init_waitqueue_head(&lmac->wq_cmd_cmplt);
mutex_init(&lmac->cmd_lock);
+ spin_lock_init(&lmac->event_cb_lock);
err = request_irq(pci_irq_vector(cgx->pdev,
CGX_LMAC_FWI + i * 9),
cgx_fwi_event_handler, 0, lmac->name, lmac);
@@ -624,6 +755,12 @@ static int cgx_lmac_exit(struct cgx *cgx)
struct lmac *lmac;
int i;
+ if (cgx->cgx_cmd_workq) {
+ flush_workqueue(cgx->cgx_cmd_workq);
+ destroy_workqueue(cgx->cgx_cmd_workq);
+ cgx->cgx_cmd_workq = NULL;
+ }
+
/* Free all lmac related resources */
for (i = 0; i < cgx->lmac_count; i++) {
lmac = cgx->lmac_idmap[i];
@@ -679,8 +816,19 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_regions;
}
+ cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
+ & CGX_ID_MASK;
+
+ /* init wq for processing linkup requests */
+ INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
+ cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
+ if (!cgx->cgx_cmd_workq) {
+ dev_err(dev, "alloc workqueue failed for cgx cmd");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
list_add(&cgx->cgx_list, &cgx_list);
- cgx->cgx_id = cgx_get_cgx_cnt() - 1;
cgx_link_usertable_init();
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 0a66d2717442..206dc5dc1df8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -20,40 +20,41 @@
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 0
-#define MAX_CGX 3
+#define CGX_ID_MASK 0x7
#define MAX_LMAC_PER_CGX 4
+#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */
#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
/* Registers */
#define CGXX_CMRX_CFG 0x00
-#define CMR_EN BIT_ULL(55)
-#define DATA_PKT_TX_EN BIT_ULL(53)
-#define DATA_PKT_RX_EN BIT_ULL(54)
-#define CGX_LMAC_TYPE_SHIFT 40
-#define CGX_LMAC_TYPE_MASK 0xF
+#define CMR_EN BIT_ULL(55)
+#define DATA_PKT_TX_EN BIT_ULL(53)
+#define DATA_PKT_RX_EN BIT_ULL(54)
+#define CGX_LMAC_TYPE_SHIFT 40
+#define CGX_LMAC_TYPE_MASK 0xF
#define CGXX_CMRX_INT 0x040
-#define FW_CGX_INT BIT_ULL(1)
+#define FW_CGX_INT BIT_ULL(1)
#define CGXX_CMRX_INT_ENA_W1S 0x058
#define CGXX_CMRX_RX_ID_MAP 0x060
#define CGXX_CMRX_RX_STAT0 0x070
#define CGXX_CMRX_RX_LMACS 0x128
#define CGXX_CMRX_RX_DMAC_CTL0 0x1F8
-#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
-#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
-#define CGX_DMAC_MCAST_MODE BIT_ULL(1)
-#define CGX_DMAC_BCAST_MODE BIT_ULL(0)
+#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
+#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
+#define CGX_DMAC_MCAST_MODE BIT_ULL(1)
+#define CGX_DMAC_BCAST_MODE BIT_ULL(0)
#define CGXX_CMRX_RX_DMAC_CAM0 0x200
-#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
+#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
#define CGXX_CMRX_RX_DMAC_CAM1 0x400
-#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
+#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
#define CGXX_CMRX_TX_STAT0 0x700
#define CGXX_SCRATCH0_REG 0x1050
#define CGXX_SCRATCH1_REG 0x1058
#define CGX_CONST 0x2000
#define CGXX_SPUX_CONTROL1 0x10000
-#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14)
+#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14)
#define CGXX_GMP_PCS_MRX_CTL 0x30000
-#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
+#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
#define CGX_EVENT_REG CGXX_SCRATCH0_REG
@@ -94,11 +95,12 @@ struct cgx_event_cb {
extern struct pci_driver cgx_driver;
-int cgx_get_cgx_cnt(void);
+int cgx_get_cgxcnt_max(void);
int cgx_get_lmac_cnt(void *cgxd);
void *cgx_get_pdata(int cgx_id);
int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind);
int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id);
+int cgx_lmac_evh_unregister(void *cgxd, int lmac_id);
int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
@@ -108,4 +110,6 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo);
+int cgx_lmac_linkup_start(void *cgxd);
+int cgx_get_mkex_prfl_info(u64 *addr, u64 *size);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index fa17af3f4ba7..fb3ba4968a9b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -78,8 +78,8 @@ enum cgx_cmd_id {
CGX_CMD_LINK_STATE_CHANGE,
CGX_CMD_MODE_CHANGE, /* hot plug support */
CGX_CMD_INTF_SHUTDOWN,
- CGX_CMD_IRQ_ENABLE,
- CGX_CMD_IRQ_DISABLE,
+ CGX_CMD_GET_MKEX_PRFL_SIZE,
+ CGX_CMD_GET_MKEX_PRFL_ADDR
};
/* async event ids */
@@ -139,6 +139,16 @@ enum cgx_cmd_own {
*/
#define RESP_MAC_ADDR GENMASK_ULL(56, 9)
+/* Response to cmd ID as CGX_CMD_GET_MKEX_PRFL_SIZE with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MKEX_PRFL_SIZE GENMASK_ULL(63, 9)
+
+/* Response to cmd ID as CGX_CMD_GET_MKEX_PRFL_ADDR with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MKEX_PRFL_ADDR GENMASK_ULL(63, 9)
+
/* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE
* status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS
*
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index d39ada404c8f..ec50a21c5aaf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -143,6 +143,14 @@ enum nix_scheduler {
NIX_TXSCH_LVL_CNT = 0x5,
};
+#define TXSCH_TL1_DFLT_RR_QTM ((1 << 24) - 1)
+#define TXSCH_TL1_DFLT_RR_PRIO (0x1ull)
+
+/* Min/Max packet sizes, excluding FCS */
+#define NIC_HW_MIN_FRS 40
+#define NIC_HW_MAX_FRS 9212
+#define SDP_HW_MAX_FRS 65535
+
/* NIX RX action operation*/
#define NIX_RX_ACTIONOP_DROP (0x0ull)
#define NIX_RX_ACTIONOP_UCAST (0x1ull)
@@ -169,7 +177,9 @@ enum nix_scheduler {
#define MAX_LMAC_PKIND 12
#define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b))
+#define NIX_LINK_LBK(a) (12 + (a))
#define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c))
+#define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b))
/* NIX LSO format indices.
* As of now TSO is the only one using, so statically assigning indices.
@@ -186,26 +196,4 @@ enum nix_scheduler {
#define DEFAULT_RSS_CONTEXT_GROUP 0
#define MAX_RSS_INDIR_TBL_SIZE 256 /* 1 << Max adder bits */
-/* NIX flow tag, key type flags */
-#define FLOW_KEY_TYPE_PORT BIT(0)
-#define FLOW_KEY_TYPE_IPV4 BIT(1)
-#define FLOW_KEY_TYPE_IPV6 BIT(2)
-#define FLOW_KEY_TYPE_TCP BIT(3)
-#define FLOW_KEY_TYPE_UDP BIT(4)
-#define FLOW_KEY_TYPE_SCTP BIT(5)
-
-/* NIX flow tag algorithm indices, max is 31 */
-enum {
- FLOW_KEY_ALG_PORT,
- FLOW_KEY_ALG_IP,
- FLOW_KEY_ALG_TCP,
- FLOW_KEY_ALG_UDP,
- FLOW_KEY_ALG_SCTP,
- FLOW_KEY_ALG_TCP_UDP,
- FLOW_KEY_ALG_TCP_SCTP,
- FLOW_KEY_ALG_UDP_SCTP,
- FLOW_KEY_ALG_TCP_UDP_SCTP,
- FLOW_KEY_ALG_MAX,
-};
-
#endif /* COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 85ba24a05774..d6f9ed8ea966 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -290,7 +290,7 @@ EXPORT_SYMBOL(otx2_mbox_nonempty);
const char *otx2_mbox_id2name(u16 id)
{
switch (id) {
-#define M(_name, _id, _1, _2) case _id: return # _name;
+#define M(_name, _id, _1, _2, _3) case _id: return # _name;
MBOX_MESSAGES
#undef M
default:
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index a15a59c9a239..76a4575d18ff 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -120,54 +120,101 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
#define MBOX_MESSAGES \
/* Generic mbox IDs (range 0x000 - 0x1FF) */ \
-M(READY, 0x001, msg_req, ready_msg_rsp) \
-M(ATTACH_RESOURCES, 0x002, rsrc_attach, msg_rsp) \
-M(DETACH_RESOURCES, 0x003, rsrc_detach, msg_rsp) \
-M(MSIX_OFFSET, 0x004, msg_req, msix_offset_rsp) \
+M(READY, 0x001, ready, msg_req, ready_msg_rsp) \
+M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
+M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
+M(MSIX_OFFSET, 0x004, msix_offset, msg_req, msix_offset_rsp) \
+M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
-M(CGX_START_RXTX, 0x200, msg_req, msg_rsp) \
-M(CGX_STOP_RXTX, 0x201, msg_req, msg_rsp) \
-M(CGX_STATS, 0x202, msg_req, cgx_stats_rsp) \
-M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set_or_get, \
+M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
+M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
+M(CGX_STATS, 0x202, cgx_stats, msg_req, cgx_stats_rsp) \
+M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set, cgx_mac_addr_set_or_get, \
cgx_mac_addr_set_or_get) \
-M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_set_or_get, \
+M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_get, cgx_mac_addr_set_or_get, \
cgx_mac_addr_set_or_get) \
-M(CGX_PROMISC_ENABLE, 0x205, msg_req, msg_rsp) \
-M(CGX_PROMISC_DISABLE, 0x206, msg_req, msg_rsp) \
-M(CGX_START_LINKEVENTS, 0x207, msg_req, msg_rsp) \
-M(CGX_STOP_LINKEVENTS, 0x208, msg_req, msg_rsp) \
-M(CGX_GET_LINKINFO, 0x209, msg_req, cgx_link_info_msg) \
-M(CGX_INTLBK_ENABLE, 0x20A, msg_req, msg_rsp) \
-M(CGX_INTLBK_DISABLE, 0x20B, msg_req, msg_rsp) \
+M(CGX_PROMISC_ENABLE, 0x205, cgx_promisc_enable, msg_req, msg_rsp) \
+M(CGX_PROMISC_DISABLE, 0x206, cgx_promisc_disable, msg_req, msg_rsp) \
+M(CGX_START_LINKEVENTS, 0x207, cgx_start_linkevents, msg_req, msg_rsp) \
+M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp) \
+M(CGX_GET_LINKINFO, 0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \
+M(CGX_INTLBK_ENABLE, 0x20A, cgx_intlbk_enable, msg_req, msg_rsp) \
+M(CGX_INTLBK_DISABLE, 0x20B, cgx_intlbk_disable, msg_req, msg_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
-M(NPA_LF_ALLOC, 0x400, npa_lf_alloc_req, npa_lf_alloc_rsp) \
-M(NPA_LF_FREE, 0x401, msg_req, msg_rsp) \
-M(NPA_AQ_ENQ, 0x402, npa_aq_enq_req, npa_aq_enq_rsp) \
-M(NPA_HWCTX_DISABLE, 0x403, hwctx_disable_req, msg_rsp) \
+M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
+ npa_lf_alloc_req, npa_lf_alloc_rsp) \
+M(NPA_LF_FREE, 0x401, npa_lf_free, msg_req, msg_rsp) \
+M(NPA_AQ_ENQ, 0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp) \
+M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\
/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
/* TIM mbox IDs (range 0x800 - 0x9FF) */ \
/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
+M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry, npc_mcam_alloc_entry_req,\
+ npc_mcam_alloc_entry_rsp) \
+M(NPC_MCAM_FREE_ENTRY, 0x6001, npc_mcam_free_entry, \
+ npc_mcam_free_entry_req, msg_rsp) \
+M(NPC_MCAM_WRITE_ENTRY, 0x6002, npc_mcam_write_entry, \
+ npc_mcam_write_entry_req, msg_rsp) \
+M(NPC_MCAM_ENA_ENTRY, 0x6003, npc_mcam_ena_entry, \
+ npc_mcam_ena_dis_entry_req, msg_rsp) \
+M(NPC_MCAM_DIS_ENTRY, 0x6004, npc_mcam_dis_entry, \
+ npc_mcam_ena_dis_entry_req, msg_rsp) \
+M(NPC_MCAM_SHIFT_ENTRY, 0x6005, npc_mcam_shift_entry, npc_mcam_shift_entry_req,\
+ npc_mcam_shift_entry_rsp) \
+M(NPC_MCAM_ALLOC_COUNTER, 0x6006, npc_mcam_alloc_counter, \
+ npc_mcam_alloc_counter_req, \
+ npc_mcam_alloc_counter_rsp) \
+M(NPC_MCAM_FREE_COUNTER, 0x6007, npc_mcam_free_counter, \
+ npc_mcam_oper_counter_req, msg_rsp) \
+M(NPC_MCAM_UNMAP_COUNTER, 0x6008, npc_mcam_unmap_counter, \
+ npc_mcam_unmap_counter_req, msg_rsp) \
+M(NPC_MCAM_CLEAR_COUNTER, 0x6009, npc_mcam_clear_counter, \
+ npc_mcam_oper_counter_req, msg_rsp) \
+M(NPC_MCAM_COUNTER_STATS, 0x600a, npc_mcam_counter_stats, \
+ npc_mcam_oper_counter_req, \
+ npc_mcam_oper_counter_rsp) \
+M(NPC_MCAM_ALLOC_AND_WRITE_ENTRY, 0x600b, npc_mcam_alloc_and_write_entry, \
+ npc_mcam_alloc_and_write_entry_req, \
+ npc_mcam_alloc_and_write_entry_rsp) \
+M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \
+ msg_req, npc_get_kex_cfg_rsp) \
/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
-M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc_req, nix_lf_alloc_rsp) \
-M(NIX_LF_FREE, 0x8001, msg_req, msg_rsp) \
-M(NIX_AQ_ENQ, 0x8002, nix_aq_enq_req, nix_aq_enq_rsp) \
-M(NIX_HWCTX_DISABLE, 0x8003, hwctx_disable_req, msg_rsp) \
-M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc_req, nix_txsch_alloc_rsp) \
-M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free_req, msg_rsp) \
-M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_config, msg_rsp) \
-M(NIX_STATS_RST, 0x8007, msg_req, msg_rsp) \
-M(NIX_VTAG_CFG, 0x8008, nix_vtag_config, msg_rsp) \
-M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, msg_rsp) \
-M(NIX_SET_MAC_ADDR, 0x800a, nix_set_mac_addr, msg_rsp) \
-M(NIX_SET_RX_MODE, 0x800b, nix_rx_mode, msg_rsp)
+M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \
+ nix_lf_alloc_req, nix_lf_alloc_rsp) \
+M(NIX_LF_FREE, 0x8001, nix_lf_free, msg_req, msg_rsp) \
+M(NIX_AQ_ENQ, 0x8002, nix_aq_enq, nix_aq_enq_req, nix_aq_enq_rsp) \
+M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, \
+ hwctx_disable_req, msg_rsp) \
+M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, \
+ nix_txsch_alloc_req, nix_txsch_alloc_rsp) \
+M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \
+M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp) \
+M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \
+M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, msg_rsp) \
+M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, \
+ nix_rss_flowkey_cfg, \
+ nix_rss_flowkey_cfg_rsp) \
+M(NIX_SET_MAC_ADDR, 0x800a, nix_set_mac_addr, nix_set_mac_addr, msg_rsp) \
+M(NIX_SET_RX_MODE, 0x800b, nix_set_rx_mode, nix_rx_mode, msg_rsp) \
+M(NIX_SET_HW_FRS, 0x800c, nix_set_hw_frs, nix_frs_cfg, msg_rsp) \
+M(NIX_LF_START_RX, 0x800d, nix_lf_start_rx, msg_req, msg_rsp) \
+M(NIX_LF_STOP_RX, 0x800e, nix_lf_stop_rx, msg_req, msg_rsp) \
+M(NIX_MARK_FORMAT_CFG, 0x800f, nix_mark_format_cfg, \
+ nix_mark_format_cfg, \
+ nix_mark_format_cfg_rsp) \
+M(NIX_SET_RX_CFG, 0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp) \
+M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
+ nix_lso_format_cfg, \
+ nix_lso_format_cfg_rsp) \
+M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp)
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
#define MBOX_UP_CGX_MESSAGES \
-M(CGX_LINK_EVENT, 0xC00, cgx_link_info_msg, msg_rsp)
+M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp)
enum {
-#define M(_name, _id, _1, _2) MBOX_MSG_ ## _name = _id,
+#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES
#undef M
@@ -191,6 +238,13 @@ struct msg_rsp {
struct mbox_msghdr hdr;
};
+/* RVU mailbox error codes
+ * Range 256 - 300.
+ */
+enum rvu_af_status {
+ RVU_INVALID_VF_ID = -256,
+};
+
struct ready_msg_rsp {
struct mbox_msghdr hdr;
u16 sclk_feq; /* SCLK frequency */
@@ -347,6 +401,8 @@ struct hwctx_disable_req {
u8 ctype;
};
+/* NIX mbox message formats */
+
/* NIX mailbox error codes
* Range 401 - 500.
*/
@@ -365,6 +421,12 @@ enum nix_af_status {
NIX_AF_INVAL_TXSCHQ_CFG = -412,
NIX_AF_SMQ_FLUSH_FAILED = -413,
NIX_AF_ERR_LF_RESET = -414,
+ NIX_AF_ERR_RSS_NOSPC_FIELD = -415,
+ NIX_AF_ERR_RSS_NOSPC_ALGO = -416,
+ NIX_AF_ERR_MARK_CFG_FAIL = -417,
+ NIX_AF_ERR_LSO_CFG_FAIL = -418,
+ NIX_AF_INVAL_NPA_PF_FUNC = -419,
+ NIX_AF_INVAL_SSO_PF_FUNC = -420,
};
/* For NIX LF context alloc and init */
@@ -392,6 +454,10 @@ struct nix_lf_alloc_rsp {
u8 lso_tsov4_idx;
u8 lso_tsov6_idx;
u8 mac_addr[ETH_ALEN];
+ u8 lf_rx_stats; /* NIX_AF_CONST1::LF_RX_STATS */
+ u8 lf_tx_stats; /* NIX_AF_CONST1::LF_TX_STATS */
+ u16 cints; /* NIX_AF_CONST2::CINTS */
+ u16 qints; /* NIX_AF_CONST2::QINTS */
};
/* NIX AQ enqueue msg */
@@ -472,6 +538,7 @@ struct nix_txschq_config {
struct nix_vtag_config {
struct mbox_msghdr hdr;
+ /* '0' for 4 octet VTAG, '1' for 8 octet VTAG */
u8 vtag_size;
/* cfg_type is '0' for tx vlan cfg
* cfg_type is '1' for rx vlan cfg
@@ -492,7 +559,7 @@ struct nix_vtag_config {
/* valid when cfg_type is '1' */
struct {
- /* rx vtag type index */
+ /* rx vtag type index, valid values are in 0..7 range */
u8 vtag_type;
/* rx vtag strip */
u8 strip_vtag :1;
@@ -505,15 +572,40 @@ struct nix_vtag_config {
struct nix_rss_flowkey_cfg {
struct mbox_msghdr hdr;
int mcam_index; /* MCAM entry index to modify */
+#define NIX_FLOW_KEY_TYPE_PORT BIT(0)
+#define NIX_FLOW_KEY_TYPE_IPV4 BIT(1)
+#define NIX_FLOW_KEY_TYPE_IPV6 BIT(2)
+#define NIX_FLOW_KEY_TYPE_TCP BIT(3)
+#define NIX_FLOW_KEY_TYPE_UDP BIT(4)
+#define NIX_FLOW_KEY_TYPE_SCTP BIT(5)
u32 flowkey_cfg; /* Flowkey types selected */
u8 group; /* RSS context or group */
};
+struct nix_rss_flowkey_cfg_rsp {
+ struct mbox_msghdr hdr;
+ u8 alg_idx; /* Selected algo index */
+};
+
struct nix_set_mac_addr {
struct mbox_msghdr hdr;
u8 mac_addr[ETH_ALEN]; /* MAC address to be set for this pcifunc */
};
+struct nix_mark_format_cfg {
+ struct mbox_msghdr hdr;
+ u8 offset;
+ u8 y_mask;
+ u8 y_val;
+ u8 r_mask;
+ u8 r_val;
+};
+
+struct nix_mark_format_cfg_rsp {
+ struct mbox_msghdr hdr;
+ u8 mark_format_idx;
+};
+
struct nix_rx_mode {
struct mbox_msghdr hdr;
#define NIX_RX_MODE_UCAST BIT(0)
@@ -522,4 +614,182 @@ struct nix_rx_mode {
u16 mode;
};
+struct nix_rx_cfg {
+ struct mbox_msghdr hdr;
+#define NIX_RX_OL3_VERIFY BIT(0)
+#define NIX_RX_OL4_VERIFY BIT(1)
+ u8 len_verify; /* Outer L3/L4 len check */
+#define NIX_RX_CSUM_OL4_VERIFY BIT(0)
+ u8 csum_verify; /* Outer L4 checksum verification */
+};
+
+struct nix_frs_cfg {
+ struct mbox_msghdr hdr;
+ u8 update_smq; /* Update SMQ's min/max lens */
+ u8 update_minlen; /* Set minlen also */
+ u8 sdp_link; /* Set SDP RX link */
+ u16 maxlen;
+ u16 minlen;
+};
+
+struct nix_lso_format_cfg {
+ struct mbox_msghdr hdr;
+ u64 field_mask;
+#define NIX_LSO_FIELD_MAX 8
+ u64 fields[NIX_LSO_FIELD_MAX];
+};
+
+struct nix_lso_format_cfg_rsp {
+ struct mbox_msghdr hdr;
+ u8 lso_format_idx;
+};
+
+/* NPC mbox message structs */
+
+#define NPC_MCAM_ENTRY_INVALID 0xFFFF
+#define NPC_MCAM_INVALID_MAP 0xFFFF
+
+/* NPC mailbox error codes
+ * Range 701 - 800.
+ */
+enum npc_af_status {
+ NPC_MCAM_INVALID_REQ = -701,
+ NPC_MCAM_ALLOC_DENIED = -702,
+ NPC_MCAM_ALLOC_FAILED = -703,
+ NPC_MCAM_PERM_DENIED = -704,
+};
+
+struct npc_mcam_alloc_entry_req {
+ struct mbox_msghdr hdr;
+#define NPC_MAX_NONCONTIG_ENTRIES 256
+ u8 contig; /* Contiguous entries ? */
+#define NPC_MCAM_ANY_PRIO 0
+#define NPC_MCAM_LOWER_PRIO 1
+#define NPC_MCAM_HIGHER_PRIO 2
+ u8 priority; /* Lower or higher w.r.t ref_entry */
+ u16 ref_entry;
+ u16 count; /* Number of entries requested */
+};
+
+struct npc_mcam_alloc_entry_rsp {
+ struct mbox_msghdr hdr;
+ u16 entry; /* Entry allocated or start index if contiguous.
+ * Invalid incase of non-contiguous.
+ */
+ u16 count; /* Number of entries allocated */
+ u16 free_count; /* Number of entries available */
+ u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES];
+};
+
+struct npc_mcam_free_entry_req {
+ struct mbox_msghdr hdr;
+ u16 entry; /* Entry index to be freed */
+ u8 all; /* If all entries allocated to this PFVF to be freed */
+};
+
+struct mcam_entry {
+#define NPC_MAX_KWS_IN_KEY 7 /* Number of keywords in max keywidth */
+ u64 kw[NPC_MAX_KWS_IN_KEY];
+ u64 kw_mask[NPC_MAX_KWS_IN_KEY];
+ u64 action;
+ u64 vtag_action;
+};
+
+struct npc_mcam_write_entry_req {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry_data;
+ u16 entry; /* MCAM entry to write this match key */
+ u16 cntr; /* Counter for this MCAM entry */
+ u8 intf; /* Rx or Tx interface */
+ u8 enable_entry;/* Enable this MCAM entry ? */
+ u8 set_cntr; /* Set counter for this entry ? */
+};
+
+/* Enable/Disable a given entry */
+struct npc_mcam_ena_dis_entry_req {
+ struct mbox_msghdr hdr;
+ u16 entry;
+};
+
+struct npc_mcam_shift_entry_req {
+ struct mbox_msghdr hdr;
+#define NPC_MCAM_MAX_SHIFTS 64
+ u16 curr_entry[NPC_MCAM_MAX_SHIFTS];
+ u16 new_entry[NPC_MCAM_MAX_SHIFTS];
+ u16 shift_count; /* Number of entries to shift */
+};
+
+struct npc_mcam_shift_entry_rsp {
+ struct mbox_msghdr hdr;
+ u16 failed_entry_idx; /* Index in 'curr_entry', not entry itself */
+};
+
+struct npc_mcam_alloc_counter_req {
+ struct mbox_msghdr hdr;
+ u8 contig; /* Contiguous counters ? */
+#define NPC_MAX_NONCONTIG_COUNTERS 64
+ u16 count; /* Number of counters requested */
+};
+
+struct npc_mcam_alloc_counter_rsp {
+ struct mbox_msghdr hdr;
+ u16 cntr; /* Counter allocated or start index if contiguous.
+ * Invalid incase of non-contiguous.
+ */
+ u16 count; /* Number of counters allocated */
+ u16 cntr_list[NPC_MAX_NONCONTIG_COUNTERS];
+};
+
+struct npc_mcam_oper_counter_req {
+ struct mbox_msghdr hdr;
+ u16 cntr; /* Free a counter or clear/fetch it's stats */
+};
+
+struct npc_mcam_oper_counter_rsp {
+ struct mbox_msghdr hdr;
+ u64 stat; /* valid only while fetching counter's stats */
+};
+
+struct npc_mcam_unmap_counter_req {
+ struct mbox_msghdr hdr;
+ u16 cntr;
+ u16 entry; /* Entry and counter to be unmapped */
+ u8 all; /* Unmap all entries using this counter ? */
+};
+
+struct npc_mcam_alloc_and_write_entry_req {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry_data;
+ u16 ref_entry;
+ u8 priority; /* Lower or higher w.r.t ref_entry */
+ u8 intf; /* Rx or Tx interface */
+ u8 enable_entry;/* Enable this MCAM entry ? */
+ u8 alloc_cntr; /* Allocate counter and map ? */
+};
+
+struct npc_mcam_alloc_and_write_entry_rsp {
+ struct mbox_msghdr hdr;
+ u16 entry;
+ u16 cntr;
+};
+
+struct npc_get_kex_cfg_rsp {
+ struct mbox_msghdr hdr;
+ u64 rx_keyx_cfg; /* NPC_AF_INTF(0)_KEX_CFG */
+ u64 tx_keyx_cfg; /* NPC_AF_INTF(1)_KEX_CFG */
+#define NPC_MAX_INTF 2
+#define NPC_MAX_LID 8
+#define NPC_MAX_LT 16
+#define NPC_MAX_LD 2
+#define NPC_MAX_LFL 16
+ /* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */
+ u64 kex_ld_flags[NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
+ u64 intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */
+ u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
+#define MKEX_NAME_LEN 128
+ u8 mkex_pfl_name[MKEX_NAME_LEN];
+};
+
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index f98b0113def3..8d6d90fdfb73 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -259,4 +259,28 @@ struct nix_rx_action {
#endif
};
+/* NIX Receive Vtag Action Structure */
+#define VTAG0_VALID_BIT BIT_ULL(15)
+#define VTAG0_TYPE_MASK GENMASK_ULL(14, 12)
+#define VTAG0_LID_MASK GENMASK_ULL(10, 8)
+#define VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
+
+struct npc_mcam_kex {
+ /* MKEX Profle Header */
+ u64 mkex_sign; /* "mcam-kex-profile" (8 bytes/ASCII characters) */
+ u8 name[MKEX_NAME_LEN]; /* MKEX Profile name */
+ u64 cpu_model; /* Format as profiled by CPU hardware */
+ u64 kpu_version; /* KPU firmware/profile version */
+ u64 reserved; /* Reserved for extension */
+
+ /* MKEX Profle Data */
+ u64 keyx_cfg[NPC_MAX_INTF]; /* NPC_AF_INTF(0..1)_KEX_CFG */
+ /* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */
+ u64 kex_ld_flags[NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
+ u64 intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */
+ u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
+} __packed;
+
#endif /* NPC_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index dc28fa2b9481..e581091c09c4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -29,6 +29,16 @@ static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct rvu_block *block, int lf);
static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct rvu_block *block, int lf);
+static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
+
+static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ int type, int num,
+ void (mbox_handler)(struct work_struct *),
+ void (mbox_up_handler)(struct work_struct *));
+enum {
+ TYPE_AFVF,
+ TYPE_AFPF,
+};
/* Supported devices */
static const struct pci_device_id rvu_id_table[] = {
@@ -42,6 +52,10 @@ MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, rvu_id_table);
+static char *mkex_profile; /* MKEX profile name */
+module_param(mkex_profile, charp, 0000);
+MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
+
/* Poll a RVU block's register 'offset', for a 'zero'
* or 'nonzero' at bits specified by 'mask'
*/
@@ -153,17 +167,17 @@ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
u16 match = 0;
int lf;
- spin_lock(&rvu->rsrc_lock);
+ mutex_lock(&rvu->rsrc_lock);
for (lf = 0; lf < block->lf.max; lf++) {
if (block->fn_map[lf] == pcifunc) {
if (slot == match) {
- spin_unlock(&rvu->rsrc_lock);
+ mutex_unlock(&rvu->rsrc_lock);
return lf;
}
match++;
}
}
- spin_unlock(&rvu->rsrc_lock);
+ mutex_unlock(&rvu->rsrc_lock);
return -ENODEV;
}
@@ -337,6 +351,28 @@ struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
return &rvu->pf[rvu_get_pf(pcifunc)];
}
+static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
+{
+ int pf, vf, nvfs;
+ u64 cfg;
+
+ pf = rvu_get_pf(pcifunc);
+ if (pf >= rvu->hw->total_pfs)
+ return false;
+
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ return true;
+
+ /* Check if VF is within number of VFs attached to this PF */
+ vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ nvfs = (cfg >> 12) & 0xFF;
+ if (vf >= nvfs)
+ return false;
+
+ return true;
+}
+
bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
{
struct rvu_block *block;
@@ -597,6 +633,8 @@ static void rvu_free_hw_resources(struct rvu *rvu)
dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
max_msix * PCI_MSIX_ENTRY_SIZE,
DMA_BIDIRECTIONAL, 0);
+
+ mutex_destroy(&rvu->rsrc_lock);
}
static int rvu_setup_hw_resources(struct rvu *rvu)
@@ -752,7 +790,7 @@ init:
if (!rvu->hwvf)
return -ENOMEM;
- spin_lock_init(&rvu->rsrc_lock);
+ mutex_init(&rvu->rsrc_lock);
err = rvu_setup_msix_resources(rvu);
if (err)
@@ -777,17 +815,26 @@ init:
err = rvu_npc_init(rvu);
if (err)
- return err;
+ goto exit;
+
+ err = rvu_cgx_init(rvu);
+ if (err)
+ goto exit;
err = rvu_npa_init(rvu);
if (err)
- return err;
+ goto cgx_err;
err = rvu_nix_init(rvu);
if (err)
- return err;
+ goto cgx_err;
return 0;
+
+cgx_err:
+ rvu_cgx_exit(rvu);
+exit:
+ return err;
}
/* NPA and NIX admin queue APIs */
@@ -830,7 +877,7 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
return 0;
}
-static int rvu_mbox_handler_READY(struct rvu *rvu, struct msg_req *req,
+static int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
struct ready_msg_rsp *rsp)
{
return 0;
@@ -858,6 +905,22 @@ static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
return 0;
}
+bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
+{
+ struct rvu_pfvf *pfvf;
+
+ if (!is_pf_func_valid(rvu, pcifunc))
+ return false;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ /* Check if this PFFUNC has a LF of type blktype attached */
+ if (!rvu_get_rsrc_mapcount(pfvf, blktype))
+ return false;
+
+ return true;
+}
+
static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
int pcifunc, int slot)
{
@@ -926,7 +989,7 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
struct rvu_block *block;
int blkid;
- spin_lock(&rvu->rsrc_lock);
+ mutex_lock(&rvu->rsrc_lock);
/* Check for partial resource detach */
if (detach && detach->partial)
@@ -956,11 +1019,11 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
rvu_detach_block(rvu, pcifunc, block->type);
}
- spin_unlock(&rvu->rsrc_lock);
+ mutex_unlock(&rvu->rsrc_lock);
return 0;
}
-static int rvu_mbox_handler_DETACH_RESOURCES(struct rvu *rvu,
+static int rvu_mbox_handler_detach_resources(struct rvu *rvu,
struct rsrc_detach *detach,
struct msg_rsp *rsp)
{
@@ -1108,7 +1171,7 @@ fail:
return -ENOSPC;
}
-static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu,
+static int rvu_mbox_handler_attach_resources(struct rvu *rvu,
struct rsrc_attach *attach,
struct msg_rsp *rsp)
{
@@ -1119,7 +1182,7 @@ static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu,
if (!attach->modify)
rvu_detach_rsrcs(rvu, NULL, pcifunc);
- spin_lock(&rvu->rsrc_lock);
+ mutex_lock(&rvu->rsrc_lock);
/* Check if the request can be accommodated */
err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
@@ -1163,7 +1226,7 @@ static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu,
}
exit:
- spin_unlock(&rvu->rsrc_lock);
+ mutex_unlock(&rvu->rsrc_lock);
return err;
}
@@ -1231,7 +1294,7 @@ static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
}
-static int rvu_mbox_handler_MSIX_OFFSET(struct rvu *rvu, struct msg_req *req,
+static int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
struct msix_offset_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -1280,22 +1343,51 @@ static int rvu_mbox_handler_MSIX_OFFSET(struct rvu *rvu, struct msg_req *req,
return 0;
}
-static int rvu_process_mbox_msg(struct rvu *rvu, int devid,
+static int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ u16 vf, numvfs;
+ u64 cfg;
+
+ vf = pcifunc & RVU_PFVF_FUNC_MASK;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
+ numvfs = (cfg >> 12) & 0xFF;
+
+ if (vf && vf <= numvfs)
+ __rvu_flr_handler(rvu, pcifunc);
+ else
+ return RVU_INVALID_VF_ID;
+
+ return 0;
+}
+
+static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
struct mbox_msghdr *req)
{
+ struct rvu *rvu = pci_get_drvdata(mbox->pdev);
+
/* Check if valid, if not reply with a invalid msg */
if (req->sig != OTX2_MBOX_REQ_SIG)
goto bad_message;
switch (req->id) {
-#define M(_name, _id, _req_type, _rsp_type) \
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
case _id: { \
struct _rsp_type *rsp; \
int err; \
\
rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
- &rvu->mbox, devid, \
+ mbox, devid, \
sizeof(struct _rsp_type)); \
+ /* some handlers should complete even if reply */ \
+ /* could not be allocated */ \
+ if (!rsp && \
+ _id != MBOX_MSG_DETACH_RESOURCES && \
+ _id != MBOX_MSG_NIX_TXSCH_FREE && \
+ _id != MBOX_MSG_VF_FLR) \
+ return -ENOMEM; \
if (rsp) { \
rsp->hdr.id = _id; \
rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
@@ -1303,9 +1395,9 @@ static int rvu_process_mbox_msg(struct rvu *rvu, int devid,
rsp->hdr.rc = 0; \
} \
\
- err = rvu_mbox_handler_ ## _name(rvu, \
- (struct _req_type *)req, \
- rsp); \
+ err = rvu_mbox_handler_ ## _fn_name(rvu, \
+ (struct _req_type *)req, \
+ rsp); \
if (rsp && err) \
rsp->hdr.rc = err; \
\
@@ -1313,29 +1405,38 @@ static int rvu_process_mbox_msg(struct rvu *rvu, int devid,
}
MBOX_MESSAGES
#undef M
- break;
+
bad_message:
default:
- otx2_reply_invalid_msg(&rvu->mbox, devid, req->pcifunc,
- req->id);
+ otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
return -ENODEV;
}
}
-static void rvu_mbox_handler(struct work_struct *work)
+static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
{
- struct rvu_work *mwork = container_of(work, struct rvu_work, work);
struct rvu *rvu = mwork->rvu;
+ int offset, err, id, devid;
struct otx2_mbox_dev *mdev;
struct mbox_hdr *req_hdr;
struct mbox_msghdr *msg;
+ struct mbox_wq_info *mw;
struct otx2_mbox *mbox;
- int offset, id, err;
- u16 pf;
- mbox = &rvu->mbox;
- pf = mwork - rvu->mbox_wrk;
- mdev = &mbox->dev[pf];
+ switch (type) {
+ case TYPE_AFPF:
+ mw = &rvu->afpf_wq_info;
+ break;
+ case TYPE_AFVF:
+ mw = &rvu->afvf_wq_info;
+ break;
+ default:
+ return;
+ }
+
+ devid = mwork - mw->mbox_wrk;
+ mbox = &mw->mbox;
+ mdev = &mbox->dev[devid];
/* Process received mbox messages */
req_hdr = mdev->mbase + mbox->rx_start;
@@ -1347,10 +1448,21 @@ static void rvu_mbox_handler(struct work_struct *work)
for (id = 0; id < req_hdr->num_msgs; id++) {
msg = mdev->mbase + offset;
- /* Set which PF sent this message based on mbox IRQ */
- msg->pcifunc &= ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
- msg->pcifunc |= (pf << RVU_PFVF_PF_SHIFT);
- err = rvu_process_mbox_msg(rvu, pf, msg);
+ /* Set which PF/VF sent this message based on mbox IRQ */
+ switch (type) {
+ case TYPE_AFPF:
+ msg->pcifunc &=
+ ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
+ msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
+ break;
+ case TYPE_AFVF:
+ msg->pcifunc &=
+ ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
+ msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
+ break;
+ }
+
+ err = rvu_process_mbox_msg(mbox, devid, msg);
if (!err) {
offset = mbox->rx_start + msg->next_msgoff;
continue;
@@ -1358,31 +1470,57 @@ static void rvu_mbox_handler(struct work_struct *work)
if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
- err, otx2_mbox_id2name(msg->id), msg->id, pf,
+ err, otx2_mbox_id2name(msg->id),
+ msg->id, devid,
(msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
else
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
- err, otx2_mbox_id2name(msg->id), msg->id, pf);
+ err, otx2_mbox_id2name(msg->id),
+ msg->id, devid);
}
- /* Send mbox responses to PF */
- otx2_mbox_msg_send(mbox, pf);
+ /* Send mbox responses to VF/PF */
+ otx2_mbox_msg_send(mbox, devid);
+}
+
+static inline void rvu_afpf_mbox_handler(struct work_struct *work)
+{
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+ __rvu_mbox_handler(mwork, TYPE_AFPF);
}
-static void rvu_mbox_up_handler(struct work_struct *work)
+static inline void rvu_afvf_mbox_handler(struct work_struct *work)
{
struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+ __rvu_mbox_handler(mwork, TYPE_AFVF);
+}
+
+static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
+{
struct rvu *rvu = mwork->rvu;
struct otx2_mbox_dev *mdev;
struct mbox_hdr *rsp_hdr;
struct mbox_msghdr *msg;
+ struct mbox_wq_info *mw;
struct otx2_mbox *mbox;
- int offset, id;
- u16 pf;
+ int offset, id, devid;
+
+ switch (type) {
+ case TYPE_AFPF:
+ mw = &rvu->afpf_wq_info;
+ break;
+ case TYPE_AFVF:
+ mw = &rvu->afvf_wq_info;
+ break;
+ default:
+ return;
+ }
- mbox = &rvu->mbox_up;
- pf = mwork - rvu->mbox_wrk_up;
- mdev = &mbox->dev[pf];
+ devid = mwork - mw->mbox_wrk_up;
+ mbox = &mw->mbox_up;
+ mdev = &mbox->dev[devid];
rsp_hdr = mdev->mbase + mbox->rx_start;
if (rsp_hdr->num_msgs == 0) {
@@ -1423,128 +1561,182 @@ end:
mdev->msgs_acked++;
}
- otx2_mbox_reset(mbox, 0);
+ otx2_mbox_reset(mbox, devid);
}
-static int rvu_mbox_init(struct rvu *rvu)
+static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
{
- struct rvu_hwinfo *hw = rvu->hw;
- void __iomem *hwbase = NULL;
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+ __rvu_mbox_up_handler(mwork, TYPE_AFPF);
+}
+
+static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
+{
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+ __rvu_mbox_up_handler(mwork, TYPE_AFVF);
+}
+
+static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ int type, int num,
+ void (mbox_handler)(struct work_struct *),
+ void (mbox_up_handler)(struct work_struct *))
+{
+ void __iomem *hwbase = NULL, *reg_base;
+ int err, i, dir, dir_up;
struct rvu_work *mwork;
+ const char *name;
u64 bar4_addr;
- int err, pf;
- rvu->mbox_wq = alloc_workqueue("rvu_afpf_mailbox",
- WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
- hw->total_pfs);
- if (!rvu->mbox_wq)
+ switch (type) {
+ case TYPE_AFPF:
+ name = "rvu_afpf_mailbox";
+ bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
+ dir = MBOX_DIR_AFPF;
+ dir_up = MBOX_DIR_AFPF_UP;
+ reg_base = rvu->afreg_base;
+ break;
+ case TYPE_AFVF:
+ name = "rvu_afvf_mailbox";
+ bar4_addr = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
+ dir = MBOX_DIR_PFVF;
+ dir_up = MBOX_DIR_PFVF_UP;
+ reg_base = rvu->pfreg_base;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mw->mbox_wq = alloc_workqueue(name,
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
+ num);
+ if (!mw->mbox_wq)
return -ENOMEM;
- rvu->mbox_wrk = devm_kcalloc(rvu->dev, hw->total_pfs,
- sizeof(struct rvu_work), GFP_KERNEL);
- if (!rvu->mbox_wrk) {
+ mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
+ sizeof(struct rvu_work), GFP_KERNEL);
+ if (!mw->mbox_wrk) {
err = -ENOMEM;
goto exit;
}
- rvu->mbox_wrk_up = devm_kcalloc(rvu->dev, hw->total_pfs,
- sizeof(struct rvu_work), GFP_KERNEL);
- if (!rvu->mbox_wrk_up) {
+ mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
+ sizeof(struct rvu_work), GFP_KERNEL);
+ if (!mw->mbox_wrk_up) {
err = -ENOMEM;
goto exit;
}
- /* Map mbox region shared with PFs */
- bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
/* Mailbox is a reserved memory (in RAM) region shared between
* RVU devices, shouldn't be mapped as device memory to allow
* unaligned accesses.
*/
- hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * hw->total_pfs);
+ hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * num);
if (!hwbase) {
dev_err(rvu->dev, "Unable to map mailbox region\n");
err = -ENOMEM;
goto exit;
}
- err = otx2_mbox_init(&rvu->mbox, hwbase, rvu->pdev, rvu->afreg_base,
- MBOX_DIR_AFPF, hw->total_pfs);
+ err = otx2_mbox_init(&mw->mbox, hwbase, rvu->pdev, reg_base, dir, num);
if (err)
goto exit;
- err = otx2_mbox_init(&rvu->mbox_up, hwbase, rvu->pdev, rvu->afreg_base,
- MBOX_DIR_AFPF_UP, hw->total_pfs);
+ err = otx2_mbox_init(&mw->mbox_up, hwbase, rvu->pdev,
+ reg_base, dir_up, num);
if (err)
goto exit;
- for (pf = 0; pf < hw->total_pfs; pf++) {
- mwork = &rvu->mbox_wrk[pf];
+ for (i = 0; i < num; i++) {
+ mwork = &mw->mbox_wrk[i];
mwork->rvu = rvu;
- INIT_WORK(&mwork->work, rvu_mbox_handler);
- }
+ INIT_WORK(&mwork->work, mbox_handler);
- for (pf = 0; pf < hw->total_pfs; pf++) {
- mwork = &rvu->mbox_wrk_up[pf];
+ mwork = &mw->mbox_wrk_up[i];
mwork->rvu = rvu;
- INIT_WORK(&mwork->work, rvu_mbox_up_handler);
+ INIT_WORK(&mwork->work, mbox_up_handler);
}
return 0;
exit:
if (hwbase)
iounmap((void __iomem *)hwbase);
- destroy_workqueue(rvu->mbox_wq);
+ destroy_workqueue(mw->mbox_wq);
return err;
}
-static void rvu_mbox_destroy(struct rvu *rvu)
+static void rvu_mbox_destroy(struct mbox_wq_info *mw)
{
- if (rvu->mbox_wq) {
- flush_workqueue(rvu->mbox_wq);
- destroy_workqueue(rvu->mbox_wq);
- rvu->mbox_wq = NULL;
+ if (mw->mbox_wq) {
+ flush_workqueue(mw->mbox_wq);
+ destroy_workqueue(mw->mbox_wq);
+ mw->mbox_wq = NULL;
}
- if (rvu->mbox.hwbase)
- iounmap((void __iomem *)rvu->mbox.hwbase);
+ if (mw->mbox.hwbase)
+ iounmap((void __iomem *)mw->mbox.hwbase);
- otx2_mbox_destroy(&rvu->mbox);
- otx2_mbox_destroy(&rvu->mbox_up);
+ otx2_mbox_destroy(&mw->mbox);
+ otx2_mbox_destroy(&mw->mbox_up);
}
-static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+static void rvu_queue_work(struct mbox_wq_info *mw, int first,
+ int mdevs, u64 intr)
{
- struct rvu *rvu = (struct rvu *)rvu_irq;
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
struct mbox_hdr *hdr;
+ int i;
+
+ for (i = first; i < mdevs; i++) {
+ /* start from 0 */
+ if (!(intr & BIT_ULL(i - first)))
+ continue;
+
+ mbox = &mw->mbox;
+ mdev = &mbox->dev[i];
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs)
+ queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
+
+ mbox = &mw->mbox_up;
+ mdev = &mbox->dev[i];
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs)
+ queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
+ }
+}
+
+static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ int vfs = rvu->vfs;
u64 intr;
- u8 pf;
intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
/* Clear interrupts */
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
/* Sync with mbox memory region */
- smp_wmb();
+ rmb();
- for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
- if (intr & (1ULL << pf)) {
- mbox = &rvu->mbox;
- mdev = &mbox->dev[pf];
- hdr = mdev->mbase + mbox->rx_start;
- if (hdr->num_msgs)
- queue_work(rvu->mbox_wq,
- &rvu->mbox_wrk[pf].work);
- mbox = &rvu->mbox_up;
- mdev = &mbox->dev[pf];
- hdr = mdev->mbase + mbox->rx_start;
- if (hdr->num_msgs)
- queue_work(rvu->mbox_wq,
- &rvu->mbox_wrk_up[pf].work);
- }
+ rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
+
+ /* Handle VF interrupts */
+ if (vfs > 64) {
+ intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
+
+ rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
+ vfs -= 64;
}
+ intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
+
+ rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
+
return IRQ_HANDLED;
}
@@ -1561,6 +1753,216 @@ static void rvu_enable_mbox_intr(struct rvu *rvu)
INTR_MASK(hw->total_pfs) & ~1ULL);
}
+static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
+{
+ struct rvu_block *block;
+ int slot, lf, num_lfs;
+ int err;
+
+ block = &rvu->hw->block[blkaddr];
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ block->type);
+ if (!num_lfs)
+ return;
+ for (slot = 0; slot < num_lfs; slot++) {
+ lf = rvu_get_lf(rvu, block, pcifunc, slot);
+ if (lf < 0)
+ continue;
+
+ /* Cleanup LF and reset it */
+ if (block->addr == BLKADDR_NIX0)
+ rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
+ else if (block->addr == BLKADDR_NPA)
+ rvu_npa_lf_teardown(rvu, pcifunc, lf);
+
+ err = rvu_lf_reset(rvu, block, lf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
+ block->addr, lf);
+ }
+ }
+}
+
+static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
+{
+ mutex_lock(&rvu->flr_lock);
+ /* Reset order should reflect inter-block dependencies:
+ * 1. Reset any packet/work sources (NIX, CPT, TIM)
+ * 2. Flush and reset SSO/SSOW
+ * 3. Cleanup pools (NPA)
+ */
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
+ rvu_detach_rsrcs(rvu, NULL, pcifunc);
+ mutex_unlock(&rvu->flr_lock);
+}
+
+static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
+{
+ int reg = 0;
+
+ /* pcifunc = 0(PF0) | (vf + 1) */
+ __rvu_flr_handler(rvu, vf + 1);
+
+ if (vf >= 64) {
+ reg = 1;
+ vf = vf - 64;
+ }
+
+ /* Signal FLR finish and enable IRQ */
+ rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
+}
+
+static void rvu_flr_handler(struct work_struct *work)
+{
+ struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
+ struct rvu *rvu = flrwork->rvu;
+ u16 pcifunc, numvfs, vf;
+ u64 cfg;
+ int pf;
+
+ pf = flrwork - rvu->flr_wrk;
+ if (pf >= rvu->hw->total_pfs) {
+ rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
+ return;
+ }
+
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ numvfs = (cfg >> 12) & 0xFF;
+ pcifunc = pf << RVU_PFVF_PF_SHIFT;
+
+ for (vf = 0; vf < numvfs; vf++)
+ __rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
+
+ __rvu_flr_handler(rvu, pcifunc);
+
+ /* Signal FLR finish */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
+
+ /* Enable interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, BIT_ULL(pf));
+}
+
+static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
+{
+ int dev, vf, reg = 0;
+ u64 intr;
+
+ if (start_vf >= 64)
+ reg = 1;
+
+ intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
+ if (!intr)
+ return;
+
+ for (vf = 0; vf < numvfs; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ dev = vf + start_vf + rvu->hw->total_pfs;
+ queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
+ /* Clear and disable the interrupt */
+ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
+ }
+}
+
+static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ u64 intr;
+ u8 pf;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
+ if (!intr)
+ goto afvf_flr;
+
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (intr & (1ULL << pf)) {
+ /* PF is already dead do only AF related operations */
+ queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
+ /* clear interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
+ BIT_ULL(pf));
+ /* Disable the interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
+ BIT_ULL(pf));
+ }
+ }
+
+afvf_flr:
+ rvu_afvf_queue_flr_work(rvu, 0, 64);
+ if (rvu->vfs > 64)
+ rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
+{
+ int vf;
+
+ /* Nothing to be done here other than clearing the
+ * TRPEND bit.
+ */
+ for (vf = 0; vf < 64; vf++) {
+ if (intr & (1ULL << vf)) {
+ /* clear the trpend due to ME(master enable) */
+ rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
+ /* clear interrupt */
+ rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
+ }
+ }
+}
+
+/* Handles ME interrupts from VFs of AF */
+static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ int vfset;
+ u64 intr;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
+
+ for (vfset = 0; vfset <= 1; vfset++) {
+ intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
+ if (intr)
+ rvu_me_handle_vfset(rvu, vfset, intr);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Handles ME interrupts from PFs */
+static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ u64 intr;
+ u8 pf;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
+
+ /* Nothing to be done here other than clearing the
+ * TRPEND bit.
+ */
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (intr & (1ULL << pf)) {
+ /* clear the trpend due to ME(master enable) */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
+ BIT_ULL(pf));
+ /* clear interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
+ BIT_ULL(pf));
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
static void rvu_unregister_interrupts(struct rvu *rvu)
{
int irq;
@@ -1569,6 +1971,14 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+ /* Disable the PF FLR interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ /* Disable the PF ME interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
for (irq = 0; irq < rvu->num_vec; irq++) {
if (rvu->irq_allocated[irq])
free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
@@ -1578,9 +1988,25 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
rvu->num_vec = 0;
}
+static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
+{
+ struct rvu_pfvf *pfvf = &rvu->pf[0];
+ int offset;
+
+ pfvf = &rvu->pf[0];
+ offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
+
+ /* Make sure there are enough MSIX vectors configured so that
+ * VF interrupts can be handled. Offset equal to zero means
+ * that PF vectors are not configured and overlapping AF vectors.
+ */
+ return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
+ offset;
+}
+
static int rvu_register_interrupts(struct rvu *rvu)
{
- int ret;
+ int ret, offset, pf_vec_start;
rvu->num_vec = pci_msix_vec_count(rvu->pdev);
@@ -1620,13 +2046,331 @@ static int rvu_register_interrupts(struct rvu *rvu)
/* Enable mailbox interrupts from all PFs */
rvu_enable_mbox_intr(rvu);
+ /* Register FLR interrupt handler */
+ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
+ "RVUAF FLR");
+ ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
+ rvu_flr_intr_handler, 0,
+ &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
+ rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for FLR\n");
+ goto fail;
+ }
+ rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
+
+ /* Enable FLR interrupt for all PFs*/
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ /* Register ME interrupt handler */
+ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
+ "RVUAF ME");
+ ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
+ rvu_me_pf_intr_handler, 0,
+ &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
+ rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for ME\n");
+ }
+ rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
+
+ /* Enable ME interrupt for all PFs*/
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ if (!rvu_afvf_msix_vectors_num_ok(rvu))
+ return 0;
+
+ /* Get PF MSIX vectors offset. */
+ pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
+
+ /* Register MBOX0 interrupt. */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_mbox_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE],
+ rvu);
+ if (ret)
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for Mbox0\n");
+
+ rvu->irq_allocated[offset] = true;
+
+ /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
+ * simply increment current offset by 1.
+ */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_mbox_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE],
+ rvu);
+ if (ret)
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for Mbox1\n");
+
+ rvu->irq_allocated[offset] = true;
+
+ /* Register FLR interrupt handler for AF's VFs */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_flr_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
+ goto fail;
+ }
+ rvu->irq_allocated[offset] = true;
+
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_flr_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
+ goto fail;
+ }
+ rvu->irq_allocated[offset] = true;
+
+ /* Register ME interrupt handler for AF's VFs */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_me_vf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for RVUAFVF ME0\n");
+ goto fail;
+ }
+ rvu->irq_allocated[offset] = true;
+
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_me_vf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for RVUAFVF ME1\n");
+ goto fail;
+ }
+ rvu->irq_allocated[offset] = true;
return 0;
fail:
- pci_free_irq_vectors(rvu->pdev);
+ rvu_unregister_interrupts(rvu);
+ return ret;
+}
+
+static void rvu_flr_wq_destroy(struct rvu *rvu)
+{
+ if (rvu->flr_wq) {
+ flush_workqueue(rvu->flr_wq);
+ destroy_workqueue(rvu->flr_wq);
+ rvu->flr_wq = NULL;
+ }
+}
+
+static int rvu_flr_init(struct rvu *rvu)
+{
+ int dev, num_devs;
+ u64 cfg;
+ int pf;
+
+ /* Enable FLR for all PFs*/
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
+ cfg | BIT_ULL(22));
+ }
+
+ rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
+ 1);
+ if (!rvu->flr_wq)
+ return -ENOMEM;
+
+ num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
+ rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
+ sizeof(struct rvu_work), GFP_KERNEL);
+ if (!rvu->flr_wrk) {
+ destroy_workqueue(rvu->flr_wq);
+ return -ENOMEM;
+ }
+
+ for (dev = 0; dev < num_devs; dev++) {
+ rvu->flr_wrk[dev].rvu = rvu;
+ INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
+ }
+
+ mutex_init(&rvu->flr_lock);
+
+ return 0;
+}
+
+static void rvu_disable_afvf_intr(struct rvu *rvu)
+{
+ int vfs = rvu->vfs;
+
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ if (vfs <= 64)
+ return;
+
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
+ INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+}
+
+static void rvu_enable_afvf_intr(struct rvu *rvu)
+{
+ int vfs = rvu->vfs;
+
+ /* Clear any pending interrupts and enable AF VF interrupts for
+ * the first 64 VFs.
+ */
+ /* Mbox */
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
+
+ /* FLR */
+ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
+
+ /* Same for remaining VFs, if any. */
+ if (vfs <= 64)
+ return;
+
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
+ INTR_MASK(vfs - 64));
+
+ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+}
+
+#define PCI_DEVID_OCTEONTX2_LBK 0xA061
+
+static int lbk_get_num_chans(void)
+{
+ struct pci_dev *pdev;
+ void __iomem *base;
+ int ret = -EIO;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
+ NULL);
+ if (!pdev)
+ goto err;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto err_put;
+
+ /* Read number of available LBK channels from LBK(0)_CONST register. */
+ ret = (readq(base + 0x10) >> 32) & 0xffff;
+ iounmap(base);
+err_put:
+ pci_dev_put(pdev);
+err:
return ret;
}
+static int rvu_enable_sriov(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+ int err, chans, vfs;
+
+ if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
+ dev_warn(&pdev->dev,
+ "Skipping SRIOV enablement since not enough IRQs are available\n");
+ return 0;
+ }
+
+ chans = lbk_get_num_chans();
+ if (chans < 0)
+ return chans;
+
+ vfs = pci_sriov_get_totalvfs(pdev);
+
+ /* Limit VFs in case we have more VFs than LBK channels available. */
+ if (vfs > chans)
+ vfs = chans;
+
+ /* AF's VFs work in pairs and talk over consecutive loopback channels.
+ * Thus we want to enable maximum even number of VFs. In case
+ * odd number of VFs are available then the last VF on the list
+ * remains disabled.
+ */
+ if (vfs & 0x1) {
+ dev_warn(&pdev->dev,
+ "Number of VFs should be even. Enabling %d out of %d.\n",
+ vfs - 1, vfs);
+ vfs--;
+ }
+
+ if (!vfs)
+ return 0;
+
+ /* Save VFs number for reference in VF interrupts handlers.
+ * Since interrupts might start arriving during SRIOV enablement
+ * ordinary API cannot be used to get number of enabled VFs.
+ */
+ rvu->vfs = vfs;
+
+ err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
+ rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
+ if (err)
+ return err;
+
+ rvu_enable_afvf_intr(rvu);
+ /* Make sure IRQs are enabled before SRIOV. */
+ mb();
+
+ err = pci_enable_sriov(pdev, vfs);
+ if (err) {
+ rvu_disable_afvf_intr(rvu);
+ rvu_mbox_destroy(&rvu->afvf_wq_info);
+ return err;
+ }
+
+ return 0;
+}
+
+static void rvu_disable_sriov(struct rvu *rvu)
+{
+ rvu_disable_afvf_intr(rvu);
+ rvu_mbox_destroy(&rvu->afvf_wq_info);
+ pci_disable_sriov(rvu->pdev);
+}
+
+static void rvu_update_module_params(struct rvu *rvu)
+{
+ const char *default_pfl_name = "default";
+
+ strscpy(rvu->mkex_pfl_name,
+ mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
+}
+
static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -1680,6 +2424,9 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_regions;
}
+ /* Store module params in rvu structure */
+ rvu_update_module_params(rvu);
+
/* Check which blocks the HW supports */
rvu_check_block_implemented(rvu);
@@ -1689,24 +2436,35 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_release_regions;
- err = rvu_mbox_init(rvu);
+ /* Init mailbox btw AF and PFs */
+ err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
+ rvu->hw->total_pfs, rvu_afpf_mbox_handler,
+ rvu_afpf_mbox_up_handler);
if (err)
goto err_hwsetup;
- err = rvu_cgx_probe(rvu);
+ err = rvu_flr_init(rvu);
if (err)
goto err_mbox;
err = rvu_register_interrupts(rvu);
if (err)
- goto err_cgx;
+ goto err_flr;
+
+ /* Enable AF's VFs (if any) */
+ err = rvu_enable_sriov(rvu);
+ if (err)
+ goto err_irq;
return 0;
-err_cgx:
- rvu_cgx_wq_destroy(rvu);
+err_irq:
+ rvu_unregister_interrupts(rvu);
+err_flr:
+ rvu_flr_wq_destroy(rvu);
err_mbox:
- rvu_mbox_destroy(rvu);
+ rvu_mbox_destroy(&rvu->afpf_wq_info);
err_hwsetup:
+ rvu_cgx_exit(rvu);
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
err_release_regions:
@@ -1725,8 +2483,10 @@ static void rvu_remove(struct pci_dev *pdev)
struct rvu *rvu = pci_get_drvdata(pdev);
rvu_unregister_interrupts(rvu);
- rvu_cgx_wq_destroy(rvu);
- rvu_mbox_destroy(rvu);
+ rvu_flr_wq_destroy(rvu);
+ rvu_cgx_exit(rvu);
+ rvu_mbox_destroy(&rvu->afpf_wq_info);
+ rvu_disable_sriov(rvu);
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 2c0580cd2807..c9d60b0554c0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -11,6 +11,7 @@
#ifndef RVU_H
#define RVU_H
+#include <linux/pci.h>
#include "rvu_struct.h"
#include "common.h"
#include "mbox.h"
@@ -18,6 +19,9 @@
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
+/* Subsystem Device ID */
+#define PCI_SUBSYS_DEVID_96XX 0xB200
+
/* PCI BAR nos */
#define PCI_AF_REG_BAR_NUM 0
#define PCI_PF_REG_BAR_NUM 2
@@ -64,7 +68,7 @@ struct nix_mcast {
struct qmem *mcast_buf;
int replay_pkind;
int next_free_mce;
- spinlock_t mce_lock; /* Serialize MCE updates */
+ struct mutex mce_lock; /* Serialize MCE updates */
};
struct nix_mce_list {
@@ -74,15 +78,27 @@ struct nix_mce_list {
};
struct npc_mcam {
- spinlock_t lock; /* MCAM entries and counters update lock */
+ struct rsrc_bmap counters;
+ struct mutex lock; /* MCAM entries and counters update lock */
+ unsigned long *bmap; /* bitmap, 0 => bmap_entries */
+ unsigned long *bmap_reverse; /* Reverse bitmap, bmap_entries => 0 */
+ u16 bmap_entries; /* Number of unreserved MCAM entries */
+ u16 bmap_fcnt; /* MCAM entries free count */
+ u16 *entry2pfvf_map;
+ u16 *entry2cntr_map;
+ u16 *cntr2pfvf_map;
+ u16 *cntr_refcnt;
u8 keysize; /* MCAM keysize 112/224/448 bits */
u8 banks; /* Number of MCAM banks */
u8 banks_per_entry;/* Number of keywords in key */
u16 banksize; /* Number of MCAM entries in each bank */
u16 total_entries; /* Total number of MCAM entries */
- u16 entries; /* Total minus reserved for NIX LFs */
u16 nixlf_offset; /* Offset of nixlf rsvd uncast entries */
u16 pf_offset; /* Offset of PF's rsvd bcast, promisc entries */
+ u16 lprio_count;
+ u16 lprio_start;
+ u16 hprio_count;
+ u16 hprio_end;
};
/* Structure for per RVU func info ie PF/VF */
@@ -122,18 +138,35 @@ struct rvu_pfvf {
u16 tx_chan_base;
u8 rx_chan_cnt; /* total number of RX channels */
u8 tx_chan_cnt; /* total number of TX channels */
+ u16 maxlen;
+ u16 minlen;
u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
/* Broadcast pkt replication info */
u16 bcast_mce_idx;
struct nix_mce_list bcast_mce_list;
+
+ /* VLAN offload */
+ struct mcam_entry entry;
+ int rxvlan_index;
+ bool rxvlan;
};
struct nix_txsch {
struct rsrc_bmap schq;
u8 lvl;
- u16 *pfvf_map;
+#define NIX_TXSCHQ_TL1_CFG_DONE BIT_ULL(0)
+#define TXSCH_MAP_FUNC(__pfvf_map) ((__pfvf_map) & 0xFFFF)
+#define TXSCH_MAP_FLAGS(__pfvf_map) ((__pfvf_map) >> 16)
+#define TXSCH_MAP(__func, __flags) (((__func) & 0xFFFF) | ((__flags) << 16))
+ u32 *pfvf_map;
+};
+
+struct nix_mark_format {
+ u8 total;
+ u8 in_use;
+ u32 *cfg;
};
struct npc_pkind {
@@ -141,9 +174,23 @@ struct npc_pkind {
u32 *pfchan_map;
};
+struct nix_flowkey {
+#define NIX_FLOW_KEY_ALG_MAX 32
+ u32 flowkey[NIX_FLOW_KEY_ALG_MAX];
+ int in_use;
+};
+
+struct nix_lso {
+ u8 total;
+ u8 in_use;
+};
+
struct nix_hw {
struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
struct nix_mcast mcast;
+ struct nix_flowkey flowkey;
+ struct nix_mark_format mark_format;
+ struct nix_lso lso;
};
struct rvu_hwinfo {
@@ -164,6 +211,16 @@ struct rvu_hwinfo {
struct npc_mcam mcam;
};
+struct mbox_wq_info {
+ struct otx2_mbox mbox;
+ struct rvu_work *mbox_wrk;
+
+ struct otx2_mbox mbox_up;
+ struct rvu_work *mbox_wrk_up;
+
+ struct workqueue_struct *mbox_wq;
+};
+
struct rvu {
void __iomem *afreg_base;
void __iomem *pfreg_base;
@@ -172,14 +229,17 @@ struct rvu {
struct rvu_hwinfo *hw;
struct rvu_pfvf *pf;
struct rvu_pfvf *hwvf;
- spinlock_t rsrc_lock; /* Serialize resource alloc/free */
+ struct mutex rsrc_lock; /* Serialize resource alloc/free */
+ int vfs; /* Number of VFs attached to RVU */
/* Mbox */
- struct otx2_mbox mbox;
- struct rvu_work *mbox_wrk;
- struct otx2_mbox mbox_up;
- struct rvu_work *mbox_wrk_up;
- struct workqueue_struct *mbox_wq;
+ struct mbox_wq_info afpf_wq_info;
+ struct mbox_wq_info afvf_wq_info;
+
+ /* PF FLR */
+ struct rvu_work *flr_wrk;
+ struct workqueue_struct *flr_wq;
+ struct mutex flr_lock; /* Serialize FLRs */
/* MSI-X */
u16 num_vec;
@@ -190,7 +250,7 @@ struct rvu {
/* CGX */
#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
u8 cgx_mapped_pfs;
- u8 cgx_cnt; /* available cgx ports */
+ u8 cgx_cnt_max; /* CGX port count max */
u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for
* every cgx lmac port
@@ -201,6 +261,8 @@ struct rvu {
struct workqueue_struct *cgx_evh_wq;
spinlock_t cgx_evq_lock; /* cgx event queue lock */
struct list_head cgx_evq_head; /* cgx event queue head */
+
+ char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -223,9 +285,22 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
return readq(rvu->pfreg_base + offset);
}
+static inline bool is_rvu_9xxx_A0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ return (pdev->revision == 0x00) &&
+ (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
+}
+
/* Function Prototypes
* RVU
*/
+static inline int is_afvf(u16 pcifunc)
+{
+ return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
+}
+
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
@@ -236,6 +311,7 @@ int rvu_get_pf(u16 pcifunc);
struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr);
+bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype);
int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
@@ -266,89 +342,110 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
*lmac_id = (map & 0xF);
}
-int rvu_cgx_probe(struct rvu *rvu);
-void rvu_cgx_wq_destroy(struct rvu *rvu);
+int rvu_cgx_init(struct rvu *rvu);
+int rvu_cgx_exit(struct rvu *rvu);
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
-int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
struct cgx_stats_rsp *rsp);
-int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
+int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp);
-int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
+int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp);
-int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
struct cgx_link_info_msg *rsp);
-int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
/* NPA APIs */
int rvu_npa_init(struct rvu *rvu);
void rvu_npa_freemem(struct rvu *rvu);
-int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
+void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf);
+int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp);
-int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu,
+int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu,
+int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
struct npa_lf_alloc_req *req,
struct npa_lf_alloc_rsp *rsp);
-int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
/* NIX APIs */
+bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc);
int rvu_nix_init(struct rvu *rvu);
+int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
+ int blkaddr, u32 cfg);
void rvu_nix_freemem(struct rvu *rvu);
int rvu_get_nixlf_count(struct rvu *rvu);
-int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
+void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
+int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
struct nix_lf_alloc_req *req,
struct nix_lf_alloc_rsp *rsp);
-int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu,
+int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp);
-int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu,
+int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
+int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
struct nix_txsch_alloc_req *req,
struct nix_txsch_alloc_rsp *rsp);
-int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu,
+int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
struct nix_txsch_free_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
+int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
struct nix_txschq_config *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu,
+int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
struct nix_vtag_config *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_RSS_FLOWKEY_CFG(struct rvu *rvu,
+int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
struct nix_rss_flowkey_cfg *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu,
+ struct nix_rss_flowkey_cfg_rsp *rsp);
+int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
struct nix_set_mac_addr *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req,
+int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
+ struct nix_mark_format_cfg *req,
+ struct nix_mark_format_cfg_rsp *rsp);
+int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
+ struct nix_lso_format_cfg *req,
+ struct nix_lso_format_cfg_rsp *rsp);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
@@ -360,9 +457,48 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, bool allmulti);
void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan);
+int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index);
+int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
+ struct npc_mcam_alloc_entry_req *req,
+ struct npc_mcam_alloc_entry_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
+ struct npc_mcam_free_entry_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
+ struct npc_mcam_write_entry_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu,
+ struct npc_mcam_ena_dis_entry_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu,
+ struct npc_mcam_ena_dis_entry_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
+ struct npc_mcam_shift_entry_req *req,
+ struct npc_mcam_shift_entry_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
+ struct npc_mcam_alloc_counter_req *req,
+ struct npc_mcam_alloc_counter_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
+ struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req,
+ struct npc_mcam_oper_counter_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
+ struct npc_mcam_alloc_and_write_entry_req *req,
+ struct npc_mcam_alloc_and_write_entry_rsp *rsp);
+int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
+ struct npc_get_kex_cfg_rsp *rsp);
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 188185c15b4a..7d7133c5f799 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -20,14 +20,14 @@ struct cgx_evq_entry {
struct cgx_link_event link_event;
};
-#define M(_name, _id, _req_type, _rsp_type) \
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
static struct _req_type __maybe_unused \
-*otx2_mbox_alloc_msg_ ## _name(struct rvu *rvu, int devid) \
+*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
{ \
struct _req_type *req; \
\
req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
- &rvu->mbox_up, devid, sizeof(struct _req_type), \
+ &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
sizeof(struct _rsp_type)); \
if (!req) \
return NULL; \
@@ -52,7 +52,7 @@ static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
{
- if (cgx_id >= rvu->cgx_cnt)
+ if (cgx_id >= rvu->cgx_cnt_max)
return NULL;
return rvu->cgx_idmap[cgx_id];
@@ -61,38 +61,40 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
- int cgx_cnt = rvu->cgx_cnt;
+ int cgx_cnt_max = rvu->cgx_cnt_max;
int cgx, lmac_cnt, lmac;
int pf = PF_CGXMAP_BASE;
int size, free_pkind;
- if (!cgx_cnt)
+ if (!cgx_cnt_max)
return 0;
- if (cgx_cnt > 0xF || MAX_LMAC_PER_CGX > 0xF)
+ if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
return -EINVAL;
/* Alloc map table
* An additional entry is required since PF id starts from 1 and
* hence entry at offset 0 is invalid.
*/
- size = (cgx_cnt * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
- rvu->pf2cgxlmac_map = devm_kzalloc(rvu->dev, size, GFP_KERNEL);
+ size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
+ rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
if (!rvu->pf2cgxlmac_map)
return -ENOMEM;
- /* Initialize offset 0 with an invalid cgx and lmac id */
- rvu->pf2cgxlmac_map[0] = 0xFF;
+ /* Initialize all entries with an invalid cgx and lmac id */
+ memset(rvu->pf2cgxlmac_map, 0xFF, size);
/* Reverse map table */
rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
- cgx_cnt * MAX_LMAC_PER_CGX * sizeof(u16),
+ cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
GFP_KERNEL);
if (!rvu->cgxlmac2pf_map)
return -ENOMEM;
rvu->cgx_mapped_pfs = 0;
- for (cgx = 0; cgx < cgx_cnt; cgx++) {
+ for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
+ if (!rvu_cgx_pdata(cgx, rvu))
+ continue;
lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
@@ -177,12 +179,12 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
}
/* Send mbox message to PF */
- msg = otx2_mbox_alloc_msg_CGX_LINK_EVENT(rvu, pfid);
+ msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
if (!msg)
continue;
msg->link_info = *linfo;
- otx2_mbox_msg_send(&rvu->mbox_up, pfid);
- err = otx2_mbox_wait_for_rsp(&rvu->mbox_up, pfid);
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
+ err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
if (err)
dev_warn(rvu->dev, "notification to pf %d failed\n",
pfid);
@@ -216,7 +218,7 @@ static void cgx_evhandler_task(struct work_struct *work)
} while (1);
}
-static void cgx_lmac_event_handler_init(struct rvu *rvu)
+static int cgx_lmac_event_handler_init(struct rvu *rvu)
{
struct cgx_event_cb cb;
int cgx, lmac, err;
@@ -228,14 +230,16 @@ static void cgx_lmac_event_handler_init(struct rvu *rvu)
rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
if (!rvu->cgx_evh_wq) {
dev_err(rvu->dev, "alloc workqueue failed");
- return;
+ return -ENOMEM;
}
cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
cb.data = rvu;
- for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) {
+ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
err = cgx_lmac_evh_register(&cb, cgxd, lmac);
if (err)
@@ -244,9 +248,11 @@ static void cgx_lmac_event_handler_init(struct rvu *rvu)
cgx, lmac);
}
}
+
+ return 0;
}
-void rvu_cgx_wq_destroy(struct rvu *rvu)
+static void rvu_cgx_wq_destroy(struct rvu *rvu)
{
if (rvu->cgx_evh_wq) {
flush_workqueue(rvu->cgx_evh_wq);
@@ -255,25 +261,28 @@ void rvu_cgx_wq_destroy(struct rvu *rvu)
}
}
-int rvu_cgx_probe(struct rvu *rvu)
+int rvu_cgx_init(struct rvu *rvu)
{
- int i, err;
+ int cgx, err;
+ void *cgxd;
- /* find available cgx ports */
- rvu->cgx_cnt = cgx_get_cgx_cnt();
- if (!rvu->cgx_cnt) {
+ /* CGX port id starts from 0 and are not necessarily contiguous
+ * Hence we allocate resources based on the maximum port id value.
+ */
+ rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
+ if (!rvu->cgx_cnt_max) {
dev_info(rvu->dev, "No CGX devices found!\n");
return -ENODEV;
}
- rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt * sizeof(void *),
- GFP_KERNEL);
+ rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
+ sizeof(void *), GFP_KERNEL);
if (!rvu->cgx_idmap)
return -ENOMEM;
/* Initialize the cgxdata table */
- for (i = 0; i < rvu->cgx_cnt; i++)
- rvu->cgx_idmap[i] = cgx_get_pdata(i);
+ for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
+ rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
/* Map CGX LMAC interfaces to RVU PFs */
err = rvu_map_cgx_lmac_pf(rvu);
@@ -281,7 +290,47 @@ int rvu_cgx_probe(struct rvu *rvu)
return err;
/* Register for CGX events */
- cgx_lmac_event_handler_init(rvu);
+ err = cgx_lmac_event_handler_init(rvu);
+ if (err)
+ return err;
+
+ /* Ensure event handler registration is completed, before
+ * we turn on the links
+ */
+ mb();
+
+ /* Do link up for all CGX ports */
+ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
+ err = cgx_lmac_linkup_start(cgxd);
+ if (err)
+ dev_err(rvu->dev,
+ "Link up process failed to start on cgx %d\n",
+ cgx);
+ }
+
+ return 0;
+}
+
+int rvu_cgx_exit(struct rvu *rvu)
+{
+ int cgx, lmac;
+ void *cgxd;
+
+ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
+ for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++)
+ cgx_lmac_evh_unregister(cgxd, lmac);
+ }
+
+ /* Ensure event handler unregister is completed */
+ mb();
+
+ rvu_cgx_wq_destroy(rvu);
return 0;
}
@@ -303,21 +352,21 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
return 0;
}
-int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
return 0;
}
-int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
return 0;
}
-int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
struct cgx_stats_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
@@ -354,7 +403,7 @@ int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
return 0;
}
-int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
+int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
@@ -368,7 +417,7 @@ int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
return 0;
}
-int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
+int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
@@ -387,7 +436,7 @@ int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
return 0;
}
-int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
@@ -407,7 +456,7 @@ int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
return 0;
}
-int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
@@ -451,21 +500,21 @@ static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
return 0;
}
-int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
return 0;
}
-int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
return 0;
}
-int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
struct cgx_link_info_msg *rsp)
{
u8 cgx_id, lmac_id;
@@ -500,14 +549,14 @@ static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
lmac_id, en);
}
-int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
return 0;
}
-int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index a5ab7eff2301..4a7609fd6dd0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -43,6 +43,19 @@ enum mc_buf_cnt {
MC_BUF_CNT_2048,
};
+enum nix_makr_fmt_indexes {
+ NIX_MARK_CFG_IP_DSCP_RED,
+ NIX_MARK_CFG_IP_DSCP_YELLOW,
+ NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
+ NIX_MARK_CFG_IP_ECN_RED,
+ NIX_MARK_CFG_IP_ECN_YELLOW,
+ NIX_MARK_CFG_IP_ECN_YELLOW_RED,
+ NIX_MARK_CFG_VLAN_DEI_RED,
+ NIX_MARK_CFG_VLAN_DEI_YELLOW,
+ NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
+ NIX_MARK_CFG_MAX,
+};
+
/* For now considering MC resources needed for broadcast
* pkt replication only. i.e 256 HWVFs + 12 PFs.
*/
@@ -55,6 +68,17 @@ struct mce {
u16 pcifunc;
};
+bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return false;
+ return true;
+}
+
int rvu_get_nixlf_count(struct rvu *rvu)
{
struct rvu_block *block;
@@ -94,11 +118,29 @@ static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
return NULL;
}
+static void nix_rx_sync(struct rvu *rvu, int blkaddr)
+{
+ int err;
+
+ /*Sync all in flight RX packets to LLC/DRAM */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
+ err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
+ if (err)
+ dev_err(rvu->dev, "NIX RX software sync failed\n");
+
+ /* As per a HW errata in 9xxx A0 silicon, HW may clear SW_SYNC[ENA]
+ * bit too early. Hence wait for 50us more.
+ */
+ if (is_rvu_9xxx_A0(rvu))
+ usleep_range(50, 60);
+}
+
static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
int lvl, u16 pcifunc, u16 schq)
{
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
+ u16 map_func;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
@@ -109,12 +151,19 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
if (schq >= txsch->schq.max)
return false;
- spin_lock(&rvu->rsrc_lock);
- if (txsch->pfvf_map[schq] != pcifunc) {
- spin_unlock(&rvu->rsrc_lock);
+ mutex_lock(&rvu->rsrc_lock);
+ map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
+ mutex_unlock(&rvu->rsrc_lock);
+
+ /* For TL1 schq, sharing across VF's of same PF is ok */
+ if (lvl == NIX_TXSCH_LVL_TL1 &&
+ rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
return false;
- }
- spin_unlock(&rvu->rsrc_lock);
+
+ if (lvl != NIX_TXSCH_LVL_TL1 &&
+ map_func != pcifunc)
+ return false;
+
return true;
}
@@ -122,7 +171,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
u8 cgx_id, lmac_id;
- int pkind, pf;
+ int pkind, pf, vf;
int err;
pf = rvu_get_pf(pcifunc);
@@ -148,6 +197,14 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
rvu_npc_set_pkind(rvu, pkind, pfvf);
break;
case NIX_INTF_TYPE_LBK:
+ vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+ pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf);
+ pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) :
+ NIX_CHAN_LBK_CHX(0, vf + 1);
+ pfvf->rx_chan_cnt = 1;
+ pfvf->tx_chan_cnt = 1;
+ rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base, false);
break;
}
@@ -168,14 +225,21 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
rvu_npc_install_bcast_match_entry(rvu, pcifunc,
nixlf, pfvf->rx_chan_base);
+ pfvf->maxlen = NIC_HW_MIN_FRS;
+ pfvf->minlen = NIC_HW_MIN_FRS;
return 0;
}
static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
int err;
+ pfvf->maxlen = 0;
+ pfvf->minlen = 0;
+ pfvf->rxvlan = false;
+
/* Remove this PF_FUNC from bcast pkt replication list */
err = nix_update_bcast_mce_list(rvu, pcifunc, false);
if (err) {
@@ -234,17 +298,21 @@ static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
/* TCP's flags field */
field.layer = NIX_TXLAYER_OL4;
field.offset = 12;
- field.sizem1 = 0; /* not needed */
+ field.sizem1 = 1; /* 2 bytes */
field.alg = NIX_LSOALG_TCP_FLAGS;
rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
*(u64 *)&field);
}
-static void nix_setup_lso(struct rvu *rvu, int blkaddr)
+static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
{
u64 cfg, idx, fidx = 0;
+ /* Get max HW supported format indices */
+ cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
+ nix_hw->lso.total = cfg;
+
/* Enable LSO */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
/* For TSO, set first and middle segment flags to
@@ -254,7 +322,10 @@ static void nix_setup_lso(struct rvu *rvu, int blkaddr)
cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
- /* Configure format fields for TCPv4 segmentation offload */
+ /* Setup default static LSO formats
+ *
+ * Configure format fields for TCPv4 segmentation offload
+ */
idx = NIX_LSO_FORMAT_IDX_TSOV4;
nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
@@ -264,6 +335,7 @@ static void nix_setup_lso(struct rvu *rvu, int blkaddr)
rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
}
+ nix_hw->lso.in_use++;
/* Configure format fields for TCPv6 segmentation offload */
idx = NIX_LSO_FORMAT_IDX_TSOV6;
@@ -276,6 +348,7 @@ static void nix_setup_lso(struct rvu *rvu, int blkaddr)
rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
}
+ nix_hw->lso.in_use++;
}
static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
@@ -388,9 +461,8 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
bool ena;
u64 cfg;
- pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (!pfvf->nixlf || blkaddr < 0)
+ if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
block = &hw->block[blkaddr];
@@ -400,9 +472,14 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
return NIX_AF_ERR_AQ_ENQUEUE;
}
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+
+ /* Skip NIXLF check for broadcast MCE entry init */
+ if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
+ if (!pfvf->nixlf || nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+ }
switch (req->ctype) {
case NIX_AQ_CTYPE_RQ:
@@ -447,7 +524,9 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
/* Check if SQ pointed SMQ belongs to this PF/VF or not */
if (req->ctype == NIX_AQ_CTYPE_SQ &&
- req->op != NIX_AQ_INSTOP_WRITE) {
+ ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
+ (req->op == NIX_AQ_INSTOP_WRITE &&
+ req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
pcifunc, req->sq.smq))
return NIX_AF_ERR_AQ_ENQUEUE;
@@ -637,25 +716,25 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
return err;
}
-int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu,
+int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
{
return rvu_nix_aq_enq_inst(rvu, req, rsp);
}
-int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu,
+int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
struct msg_rsp *rsp)
{
return nix_lf_hwctx_disable(rvu, req);
}
-int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
+int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
struct nix_lf_alloc_req *req,
struct nix_lf_alloc_rsp *rsp)
{
- int nixlf, qints, hwctx_size, err, rc = 0;
+ int nixlf, qints, hwctx_size, intf, err, rc = 0;
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_block *block;
@@ -676,6 +755,24 @@ int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
+ /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
+ if (req->npa_func) {
+ /* If default, use 'this' NIXLF's PFFUNC */
+ if (req->npa_func == RVU_DEFAULT_PF_FUNC)
+ req->npa_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
+ return NIX_AF_INVAL_NPA_PF_FUNC;
+ }
+
+ /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
+ if (req->sso_func) {
+ /* If default, use 'this' NIXLF's PFFUNC */
+ if (req->sso_func == RVU_DEFAULT_PF_FUNC)
+ req->sso_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
+ return NIX_AF_INVAL_SSO_PF_FUNC;
+ }
+
/* If RSS is being enabled, check if requested config is valid.
* RSS table size should be power of two, otherwise
* RSS_GRP::OFFSET + adder might go beyond that group or
@@ -777,21 +874,20 @@ int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
(u64)pfvf->nix_qints_ctx->iova);
rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
+ /* Setup VLANX TPID's.
+ * Use VLAN1 for 802.1Q
+ * and VLAN0 for 802.1AD.
+ */
+ cfg = (0x8100ULL << 16) | 0x88A8ULL;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
+
/* Enable LMTST for this NIX LF */
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
- /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC
- * If requester has sent a 'RVU_DEFAULT_PF_FUNC' use this NIX LF's
- * PCIFUNC itself.
- */
- if (req->npa_func == RVU_DEFAULT_PF_FUNC)
- cfg = pcifunc;
- else
+ /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
+ if (req->npa_func)
cfg = req->npa_func;
-
- if (req->sso_func == RVU_DEFAULT_PF_FUNC)
- cfg |= (u64)pcifunc << 16;
- else
+ if (req->sso_func)
cfg |= (u64)req->sso_func << 16;
cfg |= (u64)req->xqe_sz << 33;
@@ -800,10 +896,14 @@ int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
/* Config Rx pkt length, csum checks and apad enable / disable */
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
- err = nix_interface_init(rvu, pcifunc, NIX_INTF_TYPE_CGX, nixlf);
+ intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ err = nix_interface_init(rvu, pcifunc, intf, nixlf);
if (err)
goto free_mem;
+ /* Disable NPC entries as NIXLF's contexts are not initialized yet */
+ rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+
goto exit;
free_mem:
@@ -823,10 +923,18 @@ exit:
rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
+ /* Get HW supported stat count */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
+ rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
+ /* Get count of CQ IRQs and error IRQs supported per LF */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ rsp->qints = ((cfg >> 12) & 0xFFF);
+ rsp->cints = ((cfg >> 24) & 0xFFF);
return rc;
}
-int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -860,6 +968,41 @@ int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
+ struct nix_mark_format_cfg *req,
+ struct nix_mark_format_cfg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_hw *nix_hw;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, rc;
+ u32 cfg;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ cfg = (((u32)req->offset & 0x7) << 16) |
+ (((u32)req->y_mask & 0xF) << 12) |
+ (((u32)req->y_val & 0xF) << 8) |
+ (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
+
+ rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
+ if (rc < 0) {
+ dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
+ rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
+ return NIX_AF_ERR_MARK_CFG_FAIL;
+ }
+
+ rsp->mark_format_idx = rc;
+ return 0;
+}
+
/* Disable shaping of pkts by a scheduler queue
* at a given scheduler level.
*/
@@ -918,7 +1061,74 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
}
-int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
+static int
+rvu_get_tl1_schqs(struct rvu *rvu, int blkaddr, u16 pcifunc,
+ u16 *schq_list, u16 *schq_cnt)
+{
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ u16 schq_base;
+ u32 *pfvf_map;
+ int pf, intf;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -ENODEV;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
+ pfvf_map = txsch->pfvf_map;
+ pf = rvu_get_pf(pcifunc);
+
+ /* static allocation as two TL1's per link */
+ intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+
+ switch (intf) {
+ case NIX_INTF_TYPE_CGX:
+ rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
+ schq_base = (cgx_id * MAX_LMAC_PER_CGX + lmac_id) * 2;
+ break;
+ case NIX_INTF_TYPE_LBK:
+ schq_base = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX * 2;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ if (schq_base + 1 > txsch->schq.max)
+ return -ENODEV;
+
+ /* init pfvf_map as we store flags */
+ if (pfvf_map[schq_base] == U32_MAX) {
+ pfvf_map[schq_base] =
+ TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
+ pfvf_map[schq_base + 1] =
+ TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
+
+ /* Onetime reset for TL1 */
+ nix_reset_tx_linkcfg(rvu, blkaddr,
+ NIX_TXSCH_LVL_TL1, schq_base);
+ nix_reset_tx_shaping(rvu, blkaddr,
+ NIX_TXSCH_LVL_TL1, schq_base);
+
+ nix_reset_tx_linkcfg(rvu, blkaddr,
+ NIX_TXSCH_LVL_TL1, schq_base + 1);
+ nix_reset_tx_shaping(rvu, blkaddr,
+ NIX_TXSCH_LVL_TL1, schq_base + 1);
+ }
+
+ if (schq_list && schq_cnt) {
+ schq_list[0] = schq_base;
+ schq_list[1] = schq_base + 1;
+ *schq_cnt = 2;
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
struct nix_txsch_alloc_req *req,
struct nix_txsch_alloc_rsp *rsp)
{
@@ -928,6 +1138,7 @@ int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
struct rvu_pfvf *pfvf;
struct nix_hw *nix_hw;
int blkaddr, rc = 0;
+ u32 *pfvf_map;
u16 schq;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -939,17 +1150,27 @@ int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
if (!nix_hw)
return -EINVAL;
- spin_lock(&rvu->rsrc_lock);
+ mutex_lock(&rvu->rsrc_lock);
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
txsch = &nix_hw->txsch[lvl];
req_schq = req->schq_contig[lvl] + req->schq[lvl];
+ pfvf_map = txsch->pfvf_map;
+
+ if (!req_schq)
+ continue;
/* There are only 28 TL1s */
- if (lvl == NIX_TXSCH_LVL_TL1 && req_schq > txsch->schq.max)
- goto err;
+ if (lvl == NIX_TXSCH_LVL_TL1) {
+ if (req->schq_contig[lvl] ||
+ req->schq[lvl] > 2 ||
+ rvu_get_tl1_schqs(rvu, blkaddr,
+ pcifunc, NULL, NULL))
+ goto err;
+ continue;
+ }
/* Check if request is valid */
- if (!req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
+ if (req_schq > MAX_TXSCHQ_PER_FUNC)
goto err;
/* If contiguous queues are needed, check for availability */
@@ -965,16 +1186,32 @@ int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
txsch = &nix_hw->txsch[lvl];
rsp->schq_contig[lvl] = req->schq_contig[lvl];
+ pfvf_map = txsch->pfvf_map;
rsp->schq[lvl] = req->schq[lvl];
- schq = 0;
+ if (!req->schq[lvl] && !req->schq_contig[lvl])
+ continue;
+
+ /* Handle TL1 specially as it is
+ * allocation is restricted to 2 TL1's
+ * per link
+ */
+
+ if (lvl == NIX_TXSCH_LVL_TL1) {
+ rsp->schq_contig[lvl] = 0;
+ rvu_get_tl1_schqs(rvu, blkaddr, pcifunc,
+ &rsp->schq_list[lvl][0],
+ &rsp->schq[lvl]);
+ continue;
+ }
+
/* Alloc contiguous queues first */
if (req->schq_contig[lvl]) {
schq = rvu_alloc_rsrc_contig(&txsch->schq,
req->schq_contig[lvl]);
for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
- txsch->pfvf_map[schq] = pcifunc;
+ pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
rsp->schq_contig_list[lvl][idx] = schq;
@@ -985,7 +1222,7 @@ int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
/* Alloc non-contiguous queues */
for (idx = 0; idx < req->schq[lvl]; idx++) {
schq = rvu_alloc_rsrc(&txsch->schq);
- txsch->pfvf_map[schq] = pcifunc;
+ pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
rsp->schq_list[lvl][idx] = schq;
@@ -995,7 +1232,7 @@ int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
err:
rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
exit:
- spin_unlock(&rvu->rsrc_lock);
+ mutex_unlock(&rvu->rsrc_lock);
return rc;
}
@@ -1020,14 +1257,14 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
return NIX_AF_ERR_AF_LF_INVALID;
/* Disable TL2/3 queue links before SMQ flush*/
- spin_lock(&rvu->rsrc_lock);
+ mutex_lock(&rvu->rsrc_lock);
for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
continue;
txsch = &nix_hw->txsch[lvl];
for (schq = 0; schq < txsch->schq.max; schq++) {
- if (txsch->pfvf_map[schq] != pcifunc)
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue;
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
}
@@ -1036,7 +1273,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
/* Flush SMQs */
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
for (schq = 0; schq < txsch->schq.max; schq++) {
- if (txsch->pfvf_map[schq] != pcifunc)
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue;
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
/* Do SMQ flush and set enqueue xoff */
@@ -1054,15 +1291,21 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
/* Now free scheduler queues to free pool */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ /* Free all SCHQ's except TL1 as
+ * TL1 is shared across all VF's for a RVU PF
+ */
+ if (lvl == NIX_TXSCH_LVL_TL1)
+ continue;
+
txsch = &nix_hw->txsch[lvl];
for (schq = 0; schq < txsch->schq.max; schq++) {
- if (txsch->pfvf_map[schq] != pcifunc)
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue;
rvu_free_rsrc(&txsch->schq, schq);
txsch->pfvf_map[schq] = 0;
}
}
- spin_unlock(&rvu->rsrc_lock);
+ mutex_unlock(&rvu->rsrc_lock);
/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
@@ -1073,11 +1316,81 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
return 0;
}
-int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu,
+static int nix_txschq_free_one(struct rvu *rvu,
+ struct nix_txsch_free_req *req)
+{
+ int lvl, schq, nixlf, blkaddr, rc;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ u32 *pfvf_map;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ lvl = req->schq_lvl;
+ schq = req->schq;
+ txsch = &nix_hw->txsch[lvl];
+
+ /* Don't allow freeing TL1 */
+ if (lvl > NIX_TXSCH_LVL_TL2 ||
+ schq >= txsch->schq.max)
+ goto err;
+
+ pfvf_map = txsch->pfvf_map;
+ mutex_lock(&rvu->rsrc_lock);
+
+ if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
+ mutex_unlock(&rvu->rsrc_lock);
+ goto err;
+ }
+
+ /* Flush if it is a SMQ. Onus of disabling
+ * TL2/3 queue links before SMQ flush is on user
+ */
+ if (lvl == NIX_TXSCH_LVL_SMQ) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
+ /* Do SMQ flush and set enqueue xoff */
+ cfg |= BIT_ULL(50) | BIT_ULL(49);
+ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
+
+ /* Wait for flush to complete */
+ rc = rvu_poll_reg(rvu, blkaddr,
+ NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
+ if (rc) {
+ dev_err(rvu->dev,
+ "NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
+ }
+ }
+
+ /* Free the resource */
+ rvu_free_rsrc(&txsch->schq, schq);
+ txsch->pfvf_map[schq] = 0;
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+err:
+ return NIX_AF_ERR_TLX_INVALID;
+}
+
+int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
struct nix_txsch_free_req *req,
struct msg_rsp *rsp)
{
- return nix_txschq_free(rvu, req->hdr.pcifunc);
+ if (req->flags & TXSCHQ_FREE_ALL)
+ return nix_txschq_free(rvu, req->hdr.pcifunc);
+ else
+ return nix_txschq_free_one(rvu, req);
}
static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
@@ -1118,16 +1431,73 @@ static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
return true;
}
-int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
+static int
+nix_tl1_default_cfg(struct rvu *rvu, u16 pcifunc)
+{
+ u16 schq_list[2], schq_cnt, schq;
+ int blkaddr, idx, err = 0;
+ u16 map_func, map_flags;
+ struct nix_hw *nix_hw;
+ u64 reg, regval;
+ u32 *pfvf_map;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ err = rvu_get_tl1_schqs(rvu, blkaddr,
+ pcifunc, schq_list, &schq_cnt);
+ if (err)
+ goto unlock;
+
+ for (idx = 0; idx < schq_cnt; idx++) {
+ schq = schq_list[idx];
+ map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
+ map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
+
+ /* check if config is already done or this is pf */
+ if (map_flags & NIX_TXSCHQ_TL1_CFG_DONE)
+ continue;
+
+ /* default configuration */
+ reg = NIX_AF_TL1X_TOPOLOGY(schq);
+ regval = (TXSCH_TL1_DFLT_RR_PRIO << 1);
+ rvu_write64(rvu, blkaddr, reg, regval);
+ reg = NIX_AF_TL1X_SCHEDULE(schq);
+ regval = TXSCH_TL1_DFLT_RR_QTM;
+ rvu_write64(rvu, blkaddr, reg, regval);
+ reg = NIX_AF_TL1X_CIR(schq);
+ regval = 0;
+ rvu_write64(rvu, blkaddr, reg, regval);
+
+ map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
+ pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
+ }
+unlock:
+ mutex_unlock(&rvu->rsrc_lock);
+ return err;
+}
+
+int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
struct nix_txschq_config *req,
struct msg_rsp *rsp)
{
+ u16 schq, pcifunc = req->hdr.pcifunc;
struct rvu_hwinfo *hw = rvu->hw;
- u16 pcifunc = req->hdr.pcifunc;
u64 reg, regval, schq_regbase;
struct nix_txsch *txsch;
+ u16 map_func, map_flags;
struct nix_hw *nix_hw;
int blkaddr, idx, err;
+ u32 *pfvf_map;
int nixlf;
if (req->lvl >= NIX_TXSCH_LVL_CNT ||
@@ -1147,6 +1517,16 @@ int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
return NIX_AF_ERR_AF_LF_INVALID;
txsch = &nix_hw->txsch[req->lvl];
+ pfvf_map = txsch->pfvf_map;
+
+ /* VF is only allowed to trigger
+ * setting default cfg on TL1
+ */
+ if (pcifunc & RVU_PFVF_FUNC_MASK &&
+ req->lvl == NIX_TXSCH_LVL_TL1) {
+ return nix_tl1_default_cfg(rvu, pcifunc);
+ }
+
for (idx = 0; idx < req->num_regs; idx++) {
reg = req->reg[idx];
regval = req->regval[idx];
@@ -1164,6 +1544,21 @@ int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
regval |= ((u64)nixlf << 24);
}
+ /* Mark config as done for TL1 by PF */
+ if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
+ schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
+ map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
+
+ map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
+ pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
+ mutex_unlock(&rvu->rsrc_lock);
+ }
+
rvu_write64(rvu, blkaddr, reg, regval);
/* Check for SMQ flush, if so, poll for its completion */
@@ -1181,35 +1576,22 @@ int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
struct nix_vtag_config *req)
{
- u64 regval = 0;
-
-#define NIX_VTAGTYPE_MAX 0x8ull
-#define NIX_VTAGSIZE_MASK 0x7ull
-#define NIX_VTAGSTRIP_CAP_MASK 0x30ull
+ u64 regval = req->vtag_size;
- if (req->rx.vtag_type >= NIX_VTAGTYPE_MAX ||
- req->vtag_size > VTAGSIZE_T8)
+ if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
return -EINVAL;
- regval = rvu_read64(rvu, blkaddr,
- NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type));
-
- if (req->rx.strip_vtag && req->rx.capture_vtag)
- regval |= BIT_ULL(4) | BIT_ULL(5);
- else if (req->rx.strip_vtag)
+ if (req->rx.capture_vtag)
+ regval |= BIT_ULL(5);
+ if (req->rx.strip_vtag)
regval |= BIT_ULL(4);
- else
- regval &= ~(BIT_ULL(4) | BIT_ULL(5));
-
- regval &= ~NIX_VTAGSIZE_MASK;
- regval |= req->vtag_size & NIX_VTAGSIZE_MASK;
rvu_write64(rvu, blkaddr,
NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
return 0;
}
-int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu,
+int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
struct nix_vtag_config *req,
struct msg_rsp *rsp)
{
@@ -1243,7 +1625,7 @@ static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
struct nix_aq_enq_req aq_req;
int err;
- aq_req.hdr.pcifunc = pcifunc;
+ aq_req.hdr.pcifunc = 0;
aq_req.ctype = NIX_AQ_CTYPE_MCE;
aq_req.op = op;
aq_req.qidx = mce;
@@ -1294,7 +1676,7 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
return 0;
/* Add a new one to the list, at the tail */
- mce = kzalloc(sizeof(*mce), GFP_ATOMIC);
+ mce = kzalloc(sizeof(*mce), GFP_KERNEL);
if (!mce)
return -ENOMEM;
mce->idx = idx;
@@ -1317,6 +1699,10 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
struct rvu_pfvf *pfvf;
int blkaddr;
+ /* Broadcast pkt replication is not needed for AF's VFs, hence skip */
+ if (is_afvf(pcifunc))
+ return 0;
+
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return 0;
@@ -1340,7 +1726,7 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
return -EINVAL;
}
- spin_lock(&mcast->mce_lock);
+ mutex_lock(&mcast->mce_lock);
err = nix_update_mce_list(mce_list, pcifunc, idx, add);
if (err)
@@ -1370,7 +1756,7 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
}
end:
- spin_unlock(&mcast->mce_lock);
+ mutex_unlock(&mcast->mce_lock);
return err;
}
@@ -1455,7 +1841,7 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
BIT_ULL(63) | (mcast->replay_pkind << 24) |
BIT_ULL(20) | MC_BUF_CNT);
- spin_lock_init(&mcast->mce_lock);
+ mutex_init(&mcast->mce_lock);
return nix_setup_bcast_tables(rvu, nix_hw);
}
@@ -1499,14 +1885,66 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
* PF/VF pcifunc mapping info.
*/
txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
- sizeof(u16), GFP_KERNEL);
+ sizeof(u32), GFP_KERNEL);
if (!txsch->pfvf_map)
return -ENOMEM;
+ memset(txsch->pfvf_map, U8_MAX, txsch->schq.max * sizeof(u32));
}
return 0;
}
-int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req,
+int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
+ int blkaddr, u32 cfg)
+{
+ int fmt_idx;
+
+ for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
+ if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
+ return fmt_idx;
+ }
+ if (fmt_idx >= nix_hw->mark_format.total)
+ return -ERANGE;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
+ nix_hw->mark_format.cfg[fmt_idx] = cfg;
+ nix_hw->mark_format.in_use++;
+ return fmt_idx;
+}
+
+static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
+ int blkaddr)
+{
+ u64 cfgs[] = {
+ [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003,
+ [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200,
+ [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203,
+ [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c,
+ [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00,
+ [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c,
+ [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008,
+ [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800,
+ [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
+ };
+ int i, rc;
+ u64 total;
+
+ total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
+ nix_hw->mark_format.total = (u8)total;
+ nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
+ GFP_KERNEL);
+ if (!nix_hw->mark_format.cfg)
+ return -ENOMEM;
+ for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
+ rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
+ if (rc < 0)
+ dev_err(rvu->dev, "Err %d in setup mark format %d\n",
+ i, rc);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -1537,190 +1975,287 @@ int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req,
}
/* Returns the ALG index to be set into NPC_RX_ACTION */
-static int get_flowkey_alg_idx(u32 flow_cfg)
-{
- u32 ip_cfg;
-
- flow_cfg &= ~FLOW_KEY_TYPE_PORT;
- ip_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6;
- if (flow_cfg == ip_cfg)
- return FLOW_KEY_ALG_IP;
- else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP))
- return FLOW_KEY_ALG_TCP;
- else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP))
- return FLOW_KEY_ALG_UDP;
- else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_SCTP))
- return FLOW_KEY_ALG_SCTP;
- else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP))
- return FLOW_KEY_ALG_TCP_UDP;
- else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP))
- return FLOW_KEY_ALG_TCP_SCTP;
- else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP))
- return FLOW_KEY_ALG_UDP_SCTP;
- else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP |
- FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP))
- return FLOW_KEY_ALG_TCP_UDP_SCTP;
-
- return FLOW_KEY_ALG_PORT;
-}
-
-int rvu_mbox_handler_NIX_RSS_FLOWKEY_CFG(struct rvu *rvu,
- struct nix_rss_flowkey_cfg *req,
- struct msg_rsp *rsp)
+static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
{
- struct rvu_hwinfo *hw = rvu->hw;
- u16 pcifunc = req->hdr.pcifunc;
- int alg_idx, nixlf, blkaddr;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
- nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ int i;
- alg_idx = get_flowkey_alg_idx(req->flowkey_cfg);
+ /* Scan over exiting algo entries to find a match */
+ for (i = 0; i < nix_hw->flowkey.in_use; i++)
+ if (nix_hw->flowkey.flowkey[i] == flow_cfg)
+ return i;
- rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
- alg_idx, req->mcam_index);
- return 0;
+ return -ERANGE;
}
-static void set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
{
- struct nix_rx_flowkey_alg *field = NULL;
- int idx, key_type;
+ int idx, nr_field, key_off, field_marker, keyoff_marker;
+ int max_key_off, max_bit_pos, group_member;
+ struct nix_rx_flowkey_alg *field;
+ struct nix_rx_flowkey_alg tmp;
+ u32 key_type, valid_key;
if (!alg)
- return;
+ return -EINVAL;
- /* FIELD0: IPv4
- * FIELD1: IPv6
- * FIELD2: TCP/UDP/SCTP/ALL
- * FIELD3: Unused
- * FIELD4: Unused
- *
- * Each of the 32 possible flow key algorithm definitions should
+#define FIELDS_PER_ALG 5
+#define MAX_KEY_OFF 40
+ /* Clear all fields */
+ memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
+
+ /* Each of the 32 possible flow key algorithm definitions should
* fall into above incremental config (except ALG0). Otherwise a
* single NPC MCAM entry is not sufficient for supporting RSS.
*
* If a different definition or combination needed then NPC MCAM
* has to be programmed to filter such pkts and it's action should
* point to this definition to calculate flowtag or hash.
+ *
+ * The `for loop` goes over _all_ protocol field and the following
+ * variables depicts the state machine forward progress logic.
+ *
+ * keyoff_marker - Enabled when hash byte length needs to be accounted
+ * in field->key_offset update.
+ * field_marker - Enabled when a new field needs to be selected.
+ * group_member - Enabled when protocol is part of a group.
*/
- for (idx = 0; idx < 32; idx++) {
- key_type = flow_cfg & BIT_ULL(idx);
- if (!key_type)
- continue;
+
+ keyoff_marker = 0; max_key_off = 0; group_member = 0;
+ nr_field = 0; key_off = 0; field_marker = 1;
+ field = &tmp; max_bit_pos = fls(flow_cfg);
+ for (idx = 0;
+ idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
+ key_off < MAX_KEY_OFF; idx++) {
+ key_type = BIT(idx);
+ valid_key = flow_cfg & key_type;
+ /* Found a field marker, reset the field values */
+ if (field_marker)
+ memset(&tmp, 0, sizeof(tmp));
+
switch (key_type) {
- case FLOW_KEY_TYPE_PORT:
- field = &alg[0];
+ case NIX_FLOW_KEY_TYPE_PORT:
field->sel_chan = true;
/* This should be set to 1, when SEL_CHAN is set */
field->bytesm1 = 1;
+ field_marker = true;
+ keyoff_marker = true;
break;
- case FLOW_KEY_TYPE_IPV4:
- field = &alg[0];
+ case NIX_FLOW_KEY_TYPE_IPV4:
field->lid = NPC_LID_LC;
field->ltype_match = NPC_LT_LC_IP;
field->hdr_offset = 12; /* SIP offset */
field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
field->ltype_mask = 0xF; /* Match only IPv4 */
+ field_marker = true;
+ keyoff_marker = false;
break;
- case FLOW_KEY_TYPE_IPV6:
- field = &alg[1];
+ case NIX_FLOW_KEY_TYPE_IPV6:
field->lid = NPC_LID_LC;
field->ltype_match = NPC_LT_LC_IP6;
field->hdr_offset = 8; /* SIP offset */
field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
field->ltype_mask = 0xF; /* Match only IPv6 */
+ field_marker = true;
+ keyoff_marker = true;
break;
- case FLOW_KEY_TYPE_TCP:
- case FLOW_KEY_TYPE_UDP:
- case FLOW_KEY_TYPE_SCTP:
- field = &alg[2];
+ case NIX_FLOW_KEY_TYPE_TCP:
+ case NIX_FLOW_KEY_TYPE_UDP:
+ case NIX_FLOW_KEY_TYPE_SCTP:
field->lid = NPC_LID_LD;
field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
- if (key_type == FLOW_KEY_TYPE_TCP)
+ if (key_type == NIX_FLOW_KEY_TYPE_TCP && valid_key) {
field->ltype_match |= NPC_LT_LD_TCP;
- else if (key_type == FLOW_KEY_TYPE_UDP)
+ group_member = true;
+ } else if (key_type == NIX_FLOW_KEY_TYPE_UDP &&
+ valid_key) {
field->ltype_match |= NPC_LT_LD_UDP;
- else if (key_type == FLOW_KEY_TYPE_SCTP)
+ group_member = true;
+ } else if (key_type == NIX_FLOW_KEY_TYPE_SCTP &&
+ valid_key) {
field->ltype_match |= NPC_LT_LD_SCTP;
- field->key_offset = 32; /* After IPv4/v6 SIP, DIP */
+ group_member = true;
+ }
field->ltype_mask = ~field->ltype_match;
+ if (key_type == NIX_FLOW_KEY_TYPE_SCTP) {
+ /* Handle the case where any of the group item
+ * is enabled in the group but not the final one
+ */
+ if (group_member) {
+ valid_key = true;
+ group_member = false;
+ }
+ field_marker = true;
+ keyoff_marker = true;
+ } else {
+ field_marker = false;
+ keyoff_marker = false;
+ }
break;
}
- if (field)
- field->ena = 1;
- field = NULL;
+ field->ena = 1;
+
+ /* Found a valid flow key type */
+ if (valid_key) {
+ field->key_offset = key_off;
+ memcpy(&alg[nr_field], field, sizeof(*field));
+ max_key_off = max(max_key_off, field->bytesm1 + 1);
+
+ /* Found a field marker, get the next field */
+ if (field_marker)
+ nr_field++;
+ }
+
+ /* Found a keyoff marker, update the new key_off */
+ if (keyoff_marker) {
+ key_off += max_key_off;
+ max_key_off = 0;
+ }
}
+ /* Processed all the flow key types */
+ if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
+ return 0;
+ else
+ return NIX_AF_ERR_RSS_NOSPC_FIELD;
}
-static void nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
+static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
{
-#define FIELDS_PER_ALG 5
- u64 field[FLOW_KEY_ALG_MAX][FIELDS_PER_ALG];
- u32 flowkey_cfg, minkey_cfg;
- int alg, fid;
+ u64 field[FIELDS_PER_ALG];
+ struct nix_hw *hw;
+ int fid, rc;
- memset(&field, 0, sizeof(u64) * FLOW_KEY_ALG_MAX * FIELDS_PER_ALG);
+ hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!hw)
+ return -EINVAL;
- /* Only incoming channel number */
- flowkey_cfg = FLOW_KEY_TYPE_PORT;
- set_flowkey_fields((void *)&field[FLOW_KEY_ALG_PORT], flowkey_cfg);
+ /* No room to add new flow hash algoritham */
+ if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
+ return NIX_AF_ERR_RSS_NOSPC_ALGO;
- /* For a incoming pkt if none of the fields match then flowkey
- * will be zero, hence tag generated will also be zero.
- * RSS entry at rsse_index = NIX_AF_LF()_RSS_GRP()[OFFSET] will
- * be used to queue the packet.
- */
+ /* Generate algo fields for the given flow_cfg */
+ rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
+ if (rc)
+ return rc;
+
+ /* Update ALGX_FIELDX register with generated fields */
+ for (fid = 0; fid < FIELDS_PER_ALG; fid++)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
+ fid), field[fid]);
+
+ /* Store the flow_cfg for futher lookup */
+ rc = hw->flowkey.in_use;
+ hw->flowkey.flowkey[rc] = flow_cfg;
+ hw->flowkey.in_use++;
+
+ return rc;
+}
+
+int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
+ struct nix_rss_flowkey_cfg *req,
+ struct nix_rss_flowkey_cfg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int alg_idx, nixlf, blkaddr;
+ struct nix_hw *nix_hw;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
+ /* Failed to get algo index from the exiting list, reserve new */
+ if (alg_idx < 0) {
+ alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
+ req->flowkey_cfg);
+ if (alg_idx < 0)
+ return alg_idx;
+ }
+ rsp->alg_idx = alg_idx;
+ rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
+ alg_idx, req->mcam_index);
+ return 0;
+}
+
+static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
+{
+ u32 flowkey_cfg, minkey_cfg;
+ int alg, fid, rc;
+
+ /* Disable all flow key algx fieldx */
+ for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
+ for (fid = 0; fid < FIELDS_PER_ALG; fid++)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
+ 0);
+ }
/* IPv4/IPv6 SIP/DIPs */
- flowkey_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6;
- set_flowkey_fields((void *)&field[FLOW_KEY_ALG_IP], flowkey_cfg);
+ flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
minkey_cfg = flowkey_cfg;
- flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP;
- set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP], flowkey_cfg);
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
- flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP;
- set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP], flowkey_cfg);
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
- flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_SCTP;
- set_flowkey_fields((void *)&field[FLOW_KEY_ALG_SCTP], flowkey_cfg);
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
- flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP;
- set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP], flowkey_cfg);
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
+ NIX_FLOW_KEY_TYPE_UDP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
- flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP;
- set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_SCTP], flowkey_cfg);
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
+ NIX_FLOW_KEY_TYPE_SCTP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
- flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP;
- set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP_SCTP], flowkey_cfg);
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
+ NIX_FLOW_KEY_TYPE_SCTP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
- flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP |
- FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP;
- set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP_SCTP],
- flowkey_cfg);
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
+ NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
- for (alg = 0; alg < FLOW_KEY_ALG_MAX; alg++) {
- for (fid = 0; fid < FIELDS_PER_ALG; fid++)
- rvu_write64(rvu, blkaddr,
- NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
- field[alg][fid]);
- }
+ return 0;
}
-int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu,
+int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
struct nix_set_mac_addr *req,
struct msg_rsp *rsp)
{
@@ -1742,10 +2277,13 @@ int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu,
rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, req->mac_addr);
+
+ rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+
return 0;
}
-int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req,
+int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
struct msg_rsp *rsp)
{
bool allmulti = false, disable_promisc = false;
@@ -1775,9 +2313,303 @@ int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req,
else
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, allmulti);
+
+ rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+
+ return 0;
+}
+
+static void nix_find_link_frs(struct rvu *rvu,
+ struct nix_frs_cfg *req, u16 pcifunc)
+{
+ int pf = rvu_get_pf(pcifunc);
+ struct rvu_pfvf *pfvf;
+ int maxlen, minlen;
+ int numvfs, hwvf;
+ int vf;
+
+ /* Update with requester's min/max lengths */
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ pfvf->maxlen = req->maxlen;
+ if (req->update_minlen)
+ pfvf->minlen = req->minlen;
+
+ maxlen = req->maxlen;
+ minlen = req->update_minlen ? req->minlen : 0;
+
+ /* Get this PF's numVFs and starting hwvf */
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+
+ /* For each VF, compare requested max/minlen */
+ for (vf = 0; vf < numvfs; vf++) {
+ pfvf = &rvu->hwvf[hwvf + vf];
+ if (pfvf->maxlen > maxlen)
+ maxlen = pfvf->maxlen;
+ if (req->update_minlen &&
+ pfvf->minlen && pfvf->minlen < minlen)
+ minlen = pfvf->minlen;
+ }
+
+ /* Compare requested max/minlen with PF's max/minlen */
+ pfvf = &rvu->pf[pf];
+ if (pfvf->maxlen > maxlen)
+ maxlen = pfvf->maxlen;
+ if (req->update_minlen &&
+ pfvf->minlen && pfvf->minlen < minlen)
+ minlen = pfvf->minlen;
+
+ /* Update the request with max/min PF's and it's VF's max/min */
+ req->maxlen = maxlen;
+ if (req->update_minlen)
+ req->minlen = minlen;
+}
+
+int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int pf = rvu_get_pf(pcifunc);
+ int blkaddr, schq, link = -1;
+ struct nix_txsch *txsch;
+ u64 cfg, lmac_fifo_len;
+ struct nix_hw *nix_hw;
+ u8 cgx = 0, lmac = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS)
+ return NIX_AF_ERR_FRS_INVALID;
+
+ if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
+ return NIX_AF_ERR_FRS_INVALID;
+
+ /* Check if requester wants to update SMQ's */
+ if (!req->update_smq)
+ goto rx_frscfg;
+
+ /* Update min/maxlen in each of the SMQ attached to this PF/VF */
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ mutex_lock(&rvu->rsrc_lock);
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+ continue;
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
+ cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
+ if (req->update_minlen)
+ cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
+ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+rx_frscfg:
+ /* Check if config is for SDP link */
+ if (req->sdp_link) {
+ if (!hw->sdp_links)
+ return NIX_AF_ERR_RX_LINK_INVALID;
+ link = hw->cgx_links + hw->lbk_links;
+ goto linkcfg;
+ }
+
+ /* Check if the request is from CGX mapped RVU PF */
+ if (is_pf_cgxmapped(rvu, pf)) {
+ /* Get CGX and LMAC to which this PF is mapped and find link */
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
+ link = (cgx * hw->lmac_per_cgx) + lmac;
+ } else if (pf == 0) {
+ /* For VFs of PF0 ingress is LBK port, so config LBK link */
+ link = hw->cgx_links;
+ }
+
+ if (link < 0)
+ return NIX_AF_ERR_RX_LINK_INVALID;
+
+ nix_find_link_frs(rvu, req, pcifunc);
+
+linkcfg:
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
+ cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
+ if (req->update_minlen)
+ cfg = (cfg & ~0xFFFFULL) | req->minlen;
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
+
+ if (req->sdp_link || pf == 0)
+ return 0;
+
+ /* Update transmit credits for CGX links */
+ lmac_fifo_len =
+ CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
+ cfg &= ~(0xFFFFFULL << 12);
+ cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12;
+ rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
+ rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_EXPR_CREDIT(link), cfg);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam_alloc_entry_req alloc_req = { };
+ struct npc_mcam_alloc_entry_rsp alloc_rsp = { };
+ struct npc_mcam_free_entry_req free_req = { };
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, nixlf, err;
+ struct rvu_pfvf *pfvf;
+
+ /* LBK VFs do not have separate MCAM UCAST entry hence
+ * skip allocating rxvlan for them
+ */
+ if (is_afvf(pcifunc))
+ return 0;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (pfvf->rxvlan)
+ return 0;
+
+ /* alloc new mcam entry */
+ alloc_req.hdr.pcifunc = pcifunc;
+ alloc_req.count = 1;
+
+ err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
+ &alloc_rsp);
+ if (err)
+ return err;
+
+ /* update entry to enable rxvlan offload */
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0) {
+ err = NIX_AF_ERR_AF_LF_INVALID;
+ goto free_entry;
+ }
+
+ nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0) {
+ err = NIX_AF_ERR_AF_LF_INVALID;
+ goto free_entry;
+ }
+
+ pfvf->rxvlan_index = alloc_rsp.entry_list[0];
+ /* all it means is that rxvlan_index is valid */
+ pfvf->rxvlan = true;
+
+ err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+ if (err)
+ goto free_entry;
+
+ return 0;
+free_entry:
+ free_req.hdr.pcifunc = pcifunc;
+ free_req.entry = alloc_rsp.entry_list[0];
+ rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp);
+ pfvf->rxvlan = false;
+ return err;
+}
+
+int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+ int nixlf, blkaddr;
+ u64 cfg;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
+ /* Set the interface configuration */
+ if (req->len_verify & BIT(0))
+ cfg |= BIT_ULL(41);
+ else
+ cfg &= ~BIT_ULL(41);
+
+ if (req->len_verify & BIT(1))
+ cfg |= BIT_ULL(40);
+ else
+ cfg &= ~BIT_ULL(40);
+
+ if (req->csum_verify & BIT(0))
+ cfg |= BIT_ULL(37);
+ else
+ cfg &= ~BIT_ULL(37);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
+
return 0;
}
+static void nix_link_config(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int cgx, lmac_cnt, slink, link;
+ u64 tx_credits;
+
+ /* Set default min/max packet lengths allowed on NIX Rx links.
+ *
+ * With HW reset minlen value of 60byte, HW will treat ARP pkts
+ * as undersize and report them to SW as error pkts, hence
+ * setting it to 40 bytes.
+ */
+ for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
+ NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
+ }
+
+ if (hw->sdp_links) {
+ link = hw->cgx_links + hw->lbk_links;
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
+ SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
+ }
+
+ /* Set credits for Tx links assuming max packet length allowed.
+ * This will be reconfigured based on MTU set for PF/VF.
+ */
+ for (cgx = 0; cgx < hw->cgx; cgx++) {
+ lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+ tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16;
+ /* Enable credits and set credit pkt count to max allowed */
+ tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
+ slink = cgx * hw->lmac_per_cgx;
+ for (link = slink; link < (slink + lmac_cnt); link++) {
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_NORM_CREDIT(link),
+ tx_credits);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_EXPR_CREDIT(link),
+ tx_credits);
+ }
+ }
+
+ /* Set Tx credits for LBK link */
+ slink = hw->cgx_links;
+ for (link = slink; link < (slink + hw->lbk_links); link++) {
+ tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */
+ /* Enable credits and set credit pkt count to max allowed */
+ tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_EXPR_CREDIT(link), tx_credits);
+ }
+}
+
static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
{
int idx, err;
@@ -1796,8 +2628,10 @@ static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
/* Check if CGX devices are ready */
- for (idx = 0; idx < cgx_get_cgx_cnt(); idx++) {
- if (status & (BIT_ULL(16 + idx)))
+ for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
+ /* Skip when cgx port is not available */
+ if (!rvu_cgx_pdata(idx, rvu) ||
+ (status & (BIT_ULL(16 + idx))))
continue;
dev_err(rvu->dev,
"CGX%d didn't respond to NIX X2P calibration\n", idx);
@@ -1830,10 +2664,10 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
/* Set admin queue endianness */
cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
#ifdef __BIG_ENDIAN
- cfg |= BIT_ULL(1);
+ cfg |= BIT_ULL(8);
rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
#else
- cfg &= ~BIT_ULL(1);
+ cfg &= ~BIT_ULL(8);
rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
#endif
@@ -1870,6 +2704,14 @@ int rvu_nix_init(struct rvu *rvu)
return 0;
block = &hw->block[blkaddr];
+ /* As per a HW errata in 9xxx A0 silicon, NIX may corrupt
+ * internal state when conditional clocks are turned off.
+ * Hence enable them.
+ */
+ if (is_rvu_9xxx_A0(rvu))
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x5EULL);
+
/* Calibrate X2P bus to check if CGX/LBK links are fine */
err = nix_calibrate_x2p(rvu, blkaddr);
if (err)
@@ -1891,9 +2733,6 @@ int rvu_nix_init(struct rvu *rvu)
/* Restore CINT timer delay to HW reset values */
rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
- /* Configure segmentation offload formats */
- nix_setup_lso(rvu, blkaddr);
-
if (blkaddr == BLKADDR_NIX0) {
hw->nix0 = devm_kzalloc(rvu->dev,
sizeof(struct nix_hw), GFP_KERNEL);
@@ -1904,24 +2743,51 @@ int rvu_nix_init(struct rvu *rvu)
if (err)
return err;
+ err = nix_af_mark_format_setup(rvu, hw->nix0, blkaddr);
+ if (err)
+ return err;
+
err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
if (err)
return err;
- /* Config Outer L2, IP, TCP and UDP's NPC layer info.
+ /* Configure segmentation offload formats */
+ nix_setup_lso(rvu, hw->nix0, blkaddr);
+
+ /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
* This helps HW protocol checker to identify headers
* and validate length and checksums.
*/
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
(NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
- rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
- (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
- rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
- (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
(NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
+ (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
+ (NPC_LID_LC << 8) | (NPC_LT_LC_IP6 << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
+ (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP6 << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
+ (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
+ (NPC_LID_LG << 8) | (NPC_LT_LG_TU_TCP << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
+ (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
+ (NPC_LID_LG << 8) | (NPC_LT_LG_TU_UDP << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
+ (NPC_LID_LD << 8) | (NPC_LT_LD_SCTP << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
+ (NPC_LID_LG << 8) | (NPC_LT_LG_TU_SCTP << 4) |
+ 0x0F);
+
+ err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
+ if (err)
+ return err;
- nix_rx_flowkey_alg_cfg(rvu, blkaddr);
+ /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
+ nix_link_config(rvu, blkaddr);
}
return 0;
}
@@ -1955,5 +2821,139 @@ void rvu_nix_freemem(struct rvu *rvu)
mcast = &nix_hw->mcast;
qmem_free(rvu->dev, mcast->mce_ctx);
qmem_free(rvu->dev, mcast->mcast_buf);
+ mutex_destroy(&mcast->mce_lock);
+ }
+}
+
+static int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (*nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int nixlf, err;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf);
+ if (err)
+ return err;
+
+ rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
+ return 0;
+}
+
+int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int nixlf, err;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf);
+ if (err)
+ return err;
+
+ rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+ return 0;
+}
+
+void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct hwctx_disable_req ctx_req;
+ int err;
+
+ ctx_req.hdr.pcifunc = pcifunc;
+
+ /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
+ nix_interface_deinit(rvu, pcifunc, nixlf);
+ nix_rx_sync(rvu, blkaddr);
+ nix_txschq_free(rvu, pcifunc);
+
+ if (pfvf->sq_ctx) {
+ ctx_req.ctype = NIX_AQ_CTYPE_SQ;
+ err = nix_lf_hwctx_disable(rvu, &ctx_req);
+ if (err)
+ dev_err(rvu->dev, "SQ ctx disable failed\n");
+ }
+
+ if (pfvf->rq_ctx) {
+ ctx_req.ctype = NIX_AQ_CTYPE_RQ;
+ err = nix_lf_hwctx_disable(rvu, &ctx_req);
+ if (err)
+ dev_err(rvu->dev, "RQ ctx disable failed\n");
+ }
+
+ if (pfvf->cq_ctx) {
+ ctx_req.ctype = NIX_AQ_CTYPE_CQ;
+ err = nix_lf_hwctx_disable(rvu, &ctx_req);
+ if (err)
+ dev_err(rvu->dev, "CQ ctx disable failed\n");
}
+
+ nix_ctx_free(rvu, pfvf);
+}
+
+int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
+ struct nix_lso_format_cfg *req,
+ struct nix_lso_format_cfg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_hw *nix_hw;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, idx, f;
+ u64 reg;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ /* Find existing matching LSO format, if any */
+ for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
+ for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
+ reg = rvu_read64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(idx, f));
+ if (req->fields[f] != (reg & req->field_mask))
+ break;
+ }
+
+ if (f == NIX_LSO_FIELD_MAX)
+ break;
+ }
+
+ if (idx < nix_hw->lso.in_use) {
+ /* Match found */
+ rsp->lso_format_idx = idx;
+ return 0;
+ }
+
+ if (nix_hw->lso.in_use == nix_hw->lso.total)
+ return NIX_AF_ERR_LSO_CFG_FAIL;
+
+ rsp->lso_format_idx = nix_hw->lso.in_use++;
+
+ for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
+ req->fields[f]);
+
+ return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index 7531fdc54fa1..c0e165dfc403 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -241,14 +241,14 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
return err;
}
-int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
+int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp)
{
return rvu_npa_aq_enq_inst(rvu, req, rsp);
}
-int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu,
+int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
struct msg_rsp *rsp)
{
@@ -273,7 +273,7 @@ static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
pfvf->npa_qints_ctx = NULL;
}
-int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu,
+int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
struct npa_lf_alloc_req *req,
struct npa_lf_alloc_rsp *rsp)
{
@@ -372,7 +372,7 @@ exit:
return rc;
}
-int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -470,3 +470,20 @@ void rvu_npa_freemem(struct rvu *rvu)
block = &hw->block[blkaddr];
rvu_aq_free(rvu, block->aq);
}
+
+void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct hwctx_disable_req ctx_req;
+
+ /* Disable all pools */
+ ctx_req.hdr.pcifunc = pcifunc;
+ ctx_req.ctype = NPA_AQ_CTYPE_POOL;
+ npa_lf_hwctx_disable(rvu, &ctx_req);
+
+ /* Disable all auras */
+ ctx_req.ctype = NPA_AQ_CTYPE_AURA;
+ npa_lf_hwctx_disable(rvu, &ctx_req);
+
+ npa_ctx_free(rvu, pfvf);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 23ff47f7efc5..15f70273e29c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -15,6 +16,7 @@
#include "rvu_reg.h"
#include "rvu.h"
#include "npc.h"
+#include "cgx.h"
#include "npc_profile.h"
#define RSVD_MCAM_ENTRIES_PER_PF 2 /* Bcast & Promisc */
@@ -26,13 +28,10 @@
#define NPC_PARSE_RESULT_DMAC_OFFSET 8
-struct mcam_entry {
-#define NPC_MAX_KWS_IN_KEY 7 /* Number of keywords in max keywidth */
- u64 kw[NPC_MAX_KWS_IN_KEY];
- u64 kw_mask[NPC_MAX_KWS_IN_KEY];
- u64 action;
- u64 vtag_action;
-};
+static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 pcifunc);
+static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam,
+ u16 pcifunc);
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
{
@@ -256,6 +255,46 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false);
}
+static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 src, u16 dest)
+{
+ int dbank = npc_get_bank(mcam, dest);
+ int sbank = npc_get_bank(mcam, src);
+ u64 cfg, sreg, dreg;
+ int bank, i;
+
+ src &= (mcam->banksize - 1);
+ dest &= (mcam->banksize - 1);
+
+ /* Copy INTF's, W0's, W1's CAM0 and CAM1 configuration */
+ for (bank = 0; bank < mcam->banks_per_entry; bank++) {
+ sreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank + bank, 0);
+ dreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(dest, dbank + bank, 0);
+ for (i = 0; i < 6; i++) {
+ cfg = rvu_read64(rvu, blkaddr, sreg + (i * 8));
+ rvu_write64(rvu, blkaddr, dreg + (i * 8), cfg);
+ }
+ }
+
+ /* Copy action */
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(src, sbank));
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(dest, dbank), cfg);
+
+ /* Copy TAG action */
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank));
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_TAG_ACT(dest, dbank), cfg);
+
+ /* Enable or disable */
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(src, sbank));
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg);
+}
+
static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index)
{
@@ -269,12 +308,17 @@ static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct npc_mcam *mcam = &rvu->hw->mcam;
struct mcam_entry entry = { {0} };
struct nix_rx_action action;
int blkaddr, index, kwi;
u64 mac = 0;
+ /* AF's VFs work in promiscuous mode */
+ if (is_afvf(pcifunc))
+ return;
+
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return;
@@ -308,22 +352,33 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
entry.action = *(u64 *)&action;
npc_config_mcam_entry(rvu, mcam, blkaddr, index,
NIX_INTF_RX, &entry, true);
+
+ /* add VLAN matching, setup action and save entry back for later */
+ entry.kw[0] |= (NPC_LT_LB_STAG | NPC_LT_LB_CTAG) << 20;
+ entry.kw_mask[0] |= (NPC_LT_LB_STAG & NPC_LT_LB_CTAG) << 20;
+
+ entry.vtag_action = VTAG0_VALID_BIT |
+ FIELD_PREP(VTAG0_TYPE_MASK, 0) |
+ FIELD_PREP(VTAG0_LID_MASK, NPC_LID_LA) |
+ FIELD_PREP(VTAG0_RELPTR_MASK, 12);
+
+ memcpy(&pfvf->entry, &entry, sizeof(entry));
}
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, bool allmulti)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, ucast_idx, index, kwi;
struct mcam_entry entry = { {0} };
- struct nix_rx_action action;
- int blkaddr, index, kwi;
+ struct nix_rx_action action = { };
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
- if (blkaddr < 0)
+ /* Only PF or AF VF can add a promiscuous entry */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) && !is_afvf(pcifunc))
return;
- /* Only PF or AF VF can add a promiscuous entry */
- if (pcifunc & RVU_PFVF_FUNC_MASK)
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
return;
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
@@ -338,16 +393,29 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
entry.kw_mask[kwi] = BIT_ULL(40);
}
- *(u64 *)&action = 0x00;
- action.op = NIX_RX_ACTIONOP_UCAST;
- action.pf_func = pcifunc;
+ ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+
+ /* If the corresponding PF's ucast action is RSS,
+ * use the same action for promisc also
+ */
+ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
+ *(u64 *)&action = npc_get_mcam_action(rvu, mcam,
+ blkaddr, ucast_idx);
+
+ if (action.op != NIX_RX_ACTIONOP_RSS) {
+ *(u64 *)&action = 0x00;
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ action.pf_func = pcifunc;
+ }
entry.action = *(u64 *)&action;
npc_config_mcam_entry(rvu, mcam, blkaddr, index,
NIX_INTF_RX, &entry, true);
}
-void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
+static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, bool enable)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
int blkaddr, index;
@@ -362,7 +430,17 @@ void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
- npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+}
+
+void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ npc_enadis_promisc_entry(rvu, pcifunc, nixlf, false);
+}
+
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ npc_enadis_promisc_entry(rvu, pcifunc, nixlf, true);
}
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
@@ -390,9 +468,28 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_BCAST_ENTRY);
- /* Check for L2B bit and LMAC channel */
- entry.kw[0] = BIT_ULL(25) | chan;
- entry.kw_mask[0] = BIT_ULL(25) | 0xFFFULL;
+ /* Check for L2B bit and LMAC channel
+ * NOTE: Since MKEX default profile(a reduced version intended to
+ * accommodate more capability but igoring few bits) a stap-gap
+ * approach.
+ * Since we care for L2B which by HRM NPC_PARSE_KEX_S at BIT_POS[25], So
+ * moved to BIT_POS[13], ignoring ERRCODE, ERRLEV as we'll loose out
+ * on capability features needed for CoS (/from ODP PoV) e.g: VLAN,
+ * DSCP.
+ *
+ * Reduced layout of MKEX default profile -
+ * Includes following are (i.e.CHAN, L2/3{B/M}, LA, LB, LC, LD):
+ *
+ * BIT_POS[31:28] : LD
+ * BIT_POS[27:24] : LC
+ * BIT_POS[23:20] : LB
+ * BIT_POS[19:16] : LA
+ * BIT_POS[15:12] : L3B, L3M, L2B, L2M
+ * BIT_POS[11:00] : CHAN
+ *
+ */
+ entry.kw[0] = BIT_ULL(13) | chan;
+ entry.kw_mask[0] = BIT_ULL(13) | 0xFFFULL;
*(u64 *)&action = 0x00;
#ifdef MCAST_MCE
@@ -454,51 +551,110 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action);
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_PROMISC_ENTRY);
+
+ /* If PF's promiscuous entry is enabled,
+ * Set RSS action for that entry as well
+ */
+ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
+ bank = npc_get_bank(mcam, index);
+ index &= (mcam->banksize - 1);
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank),
+ *(u64 *)&action);
+ }
+
+ rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
}
-void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
+ int nixlf, bool enable)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
struct nix_rx_action action;
- int blkaddr, index, bank;
+ int index, bank, blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return;
- /* Disable ucast MCAM match entry of this PF/VF */
+ /* Ucast MCAM match entry of this PF/VF */
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
- npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
- /* For PF, disable promisc and bcast MCAM match entries */
- if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
- index = npc_get_nixlf_mcam_index(mcam, pcifunc,
- nixlf, NIXLF_BCAST_ENTRY);
- /* For bcast, disable only if it's action is not
- * packet replication, incase if action is replication
- * then this PF's nixlf is removed from bcast replication
- * list.
- */
- bank = npc_get_bank(mcam, index);
- index &= (mcam->banksize - 1);
- *(u64 *)&action = rvu_read64(rvu, blkaddr,
- NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
- if (action.op != NIX_RX_ACTIONOP_MCAST)
- npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+ /* For PF, ena/dis promisc and bcast MCAM match entries */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return;
+ /* For bcast, enable/disable only if it's action is not
+ * packet replication, incase if action is replication
+ * then this PF's nixlf is removed from bcast replication
+ * list.
+ */
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_BCAST_ENTRY);
+ bank = npc_get_bank(mcam, index);
+ *(u64 *)&action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index & (mcam->banksize - 1), bank));
+ if (action.op != NIX_RX_ACTIONOP_MCAST)
+ npc_enable_mcam_entry(rvu, mcam,
+ blkaddr, index, enable);
+ if (enable)
+ rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf);
+ else
rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
- }
+
+ rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+}
+
+void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ npc_enadis_default_entries(rvu, pcifunc, nixlf, false);
+}
+
+void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ npc_enadis_default_entries(rvu, pcifunc, nixlf, true);
+}
+
+void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ mutex_lock(&mcam->lock);
+
+ /* Disable and free all MCAM entries mapped to this 'pcifunc' */
+ npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc);
+
+ /* Free all MCAM counters mapped to this 'pcifunc' */
+ npc_mcam_free_all_counters(rvu, mcam, pcifunc);
+
+ mutex_unlock(&mcam->lock);
+
+ rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
}
-#define LDATA_EXTRACT_CONFIG(intf, lid, ltype, ld, cfg) \
+#define SET_KEX_LD(intf, lid, ltype, ld, cfg) \
rvu_write64(rvu, blkaddr, \
NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg)
-#define LDATA_FLAGS_CONFIG(intf, ld, flags, cfg) \
+#define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \
rvu_write64(rvu, blkaddr, \
NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
+#define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
+ (((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
+ ((flags_ena) << 6) | ((key_ofs) & 0x3F))
+
static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
@@ -514,28 +670,171 @@ static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr)
*/
for (lid = 0; lid < lid_count; lid++) {
for (ltype = 0; ltype < 16; ltype++) {
- LDATA_EXTRACT_CONFIG(NIX_INTF_RX, lid, ltype, 0, 0ULL);
- LDATA_EXTRACT_CONFIG(NIX_INTF_RX, lid, ltype, 1, 0ULL);
- LDATA_EXTRACT_CONFIG(NIX_INTF_TX, lid, ltype, 0, 0ULL);
- LDATA_EXTRACT_CONFIG(NIX_INTF_TX, lid, ltype, 1, 0ULL);
-
- LDATA_FLAGS_CONFIG(NIX_INTF_RX, 0, ltype, 0ULL);
- LDATA_FLAGS_CONFIG(NIX_INTF_RX, 1, ltype, 0ULL);
- LDATA_FLAGS_CONFIG(NIX_INTF_TX, 0, ltype, 0ULL);
- LDATA_FLAGS_CONFIG(NIX_INTF_TX, 1, ltype, 0ULL);
+ SET_KEX_LD(NIX_INTF_RX, lid, ltype, 0, 0ULL);
+ SET_KEX_LD(NIX_INTF_RX, lid, ltype, 1, 0ULL);
+ SET_KEX_LD(NIX_INTF_TX, lid, ltype, 0, 0ULL);
+ SET_KEX_LD(NIX_INTF_TX, lid, ltype, 1, 0ULL);
+
+ SET_KEX_LDFLAGS(NIX_INTF_RX, 0, ltype, 0ULL);
+ SET_KEX_LDFLAGS(NIX_INTF_RX, 1, ltype, 0ULL);
+ SET_KEX_LDFLAGS(NIX_INTF_TX, 0, ltype, 0ULL);
+ SET_KEX_LDFLAGS(NIX_INTF_TX, 1, ltype, 0ULL);
}
}
- /* If we plan to extract Outer IPv4 tuple for TCP/UDP pkts
- * then 112bit key is not sufficient
- */
if (mcam->keysize != NPC_MCAM_KEY_X2)
return;
- /* Start placing extracted data/flags from 64bit onwards, for now */
- /* Extract DMAC from the packet */
- cfg = (0x05 << 16) | BIT_ULL(7) | NPC_PARSE_RESULT_DMAC_OFFSET;
- LDATA_EXTRACT_CONFIG(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 0, cfg);
+ /* Default MCAM KEX profile */
+ /* Layer A: Ethernet: */
+
+ /* DMAC: 6 bytes, KW1[47:0] */
+ cfg = KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_PARSE_RESULT_DMAC_OFFSET);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 0, cfg);
+
+ /* Ethertype: 2 bytes, KW0[47:32] */
+ cfg = KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 1, cfg);
+
+ /* Layer B: Single VLAN (CTAG) */
+ /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
+ cfg = KEX_LD_CFG(0x03, 0x0, 0x1, 0x0, 0x4);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_CTAG, 0, cfg);
+
+ /* Layer B: Stacked VLAN (STAG|QinQ) */
+ /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
+ cfg = KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG, 0, cfg);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_QINQ, 0, cfg);
+
+ /* Layer C: IPv4 */
+ /* SIP+DIP: 8 bytes, KW2[63:0] */
+ cfg = KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 0, cfg);
+ /* TOS: 1 byte, KW1[63:56] */
+ cfg = KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 1, cfg);
+
+ /* Layer D:UDP */
+ /* SPORT: 2 bytes, KW3[15:0] */
+ cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 0, cfg);
+ /* DPORT: 2 bytes, KW3[31:16] */
+ cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 1, cfg);
+
+ /* Layer D:TCP */
+ /* SPORT: 2 bytes, KW3[15:0] */
+ cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 0, cfg);
+ /* DPORT: 2 bytes, KW3[31:16] */
+ cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 1, cfg);
+}
+
+static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
+ struct npc_mcam_kex *mkex)
+{
+ int lid, lt, ld, fl;
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX),
+ mkex->keyx_cfg[NIX_INTF_RX]);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
+ mkex->keyx_cfg[NIX_INTF_TX]);
+
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld),
+ mkex->kex_ld_flags[ld]);
+
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ SET_KEX_LD(NIX_INTF_RX, lid, lt, ld,
+ mkex->intf_lid_lt_ld[NIX_INTF_RX]
+ [lid][lt][ld]);
+
+ SET_KEX_LD(NIX_INTF_TX, lid, lt, ld,
+ mkex->intf_lid_lt_ld[NIX_INTF_TX]
+ [lid][lt][ld]);
+ }
+ }
+ }
+
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ for (fl = 0; fl < NPC_MAX_LFL; fl++) {
+ SET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl,
+ mkex->intf_ld_flags[NIX_INTF_RX]
+ [ld][fl]);
+
+ SET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl,
+ mkex->intf_ld_flags[NIX_INTF_TX]
+ [ld][fl]);
+ }
+ }
+}
+
+/* strtoull of "mkexprof" with base:36 */
+#define MKEX_SIGN 0x19bbfdbd15f
+#define MKEX_END_SIGN 0xdeadbeef
+
+static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr)
+{
+ const char *mkex_profile = rvu->mkex_pfl_name;
+ struct device *dev = &rvu->pdev->dev;
+ void __iomem *mkex_prfl_addr = NULL;
+ struct npc_mcam_kex *mcam_kex;
+ u64 prfl_addr;
+ u64 prfl_sz;
+
+ /* If user not selected mkex profile */
+ if (!strncmp(mkex_profile, "default", MKEX_NAME_LEN))
+ goto load_default;
+
+ if (cgx_get_mkex_prfl_info(&prfl_addr, &prfl_sz))
+ goto load_default;
+
+ if (!prfl_addr || !prfl_sz)
+ goto load_default;
+
+ mkex_prfl_addr = ioremap_wc(prfl_addr, prfl_sz);
+ if (!mkex_prfl_addr)
+ goto load_default;
+
+ mcam_kex = (struct npc_mcam_kex *)mkex_prfl_addr;
+
+ while (((s64)prfl_sz > 0) && (mcam_kex->mkex_sign != MKEX_END_SIGN)) {
+ /* Compare with mkex mod_param name string */
+ if (mcam_kex->mkex_sign == MKEX_SIGN &&
+ !strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) {
+ /* Due to an errata (35786) in A0 pass silicon,
+ * parse nibble enable configuration has to be
+ * identical for both Rx and Tx interfaces.
+ */
+ if (is_rvu_9xxx_A0(rvu) &&
+ mcam_kex->keyx_cfg[NIX_INTF_RX] !=
+ mcam_kex->keyx_cfg[NIX_INTF_TX])
+ goto load_default;
+
+ /* Program selected mkex profile */
+ npc_program_mkex_profile(rvu, blkaddr, mcam_kex);
+
+ goto unmap;
+ }
+
+ mcam_kex++;
+ prfl_sz -= sizeof(struct npc_mcam_kex);
+ }
+ dev_warn(dev, "Failed to load requested profile: %s\n",
+ rvu->mkex_pfl_name);
+
+load_default:
+ dev_info(rvu->dev, "Using default mkex profile\n");
+ /* Config packet data and flags extraction into PARSE result */
+ npc_config_ldata_extract(rvu, blkaddr);
+
+unmap:
+ if (mkex_prfl_addr)
+ iounmap(mkex_prfl_addr);
}
static void npc_config_kpuaction(struct rvu *rvu, int blkaddr,
@@ -690,13 +989,14 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
{
int nixlf_count = rvu_get_nixlf_count(rvu);
struct npc_mcam *mcam = &rvu->hw->mcam;
- int rsvd;
+ int rsvd, err;
u64 cfg;
/* Get HW limits */
cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
mcam->banks = (cfg >> 44) & 0xF;
mcam->banksize = (cfg >> 28) & 0xFFFF;
+ mcam->counters.max = (cfg >> 48) & 0xFFFF;
/* Actual number of MCAM entries vary by entry size */
cfg = (rvu_read64(rvu, blkaddr,
@@ -728,20 +1028,82 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
return -ENOMEM;
}
- mcam->entries = mcam->total_entries - rsvd;
- mcam->nixlf_offset = mcam->entries;
+ mcam->bmap_entries = mcam->total_entries - rsvd;
+ mcam->nixlf_offset = mcam->bmap_entries;
mcam->pf_offset = mcam->nixlf_offset + nixlf_count;
- spin_lock_init(&mcam->lock);
+ /* Allocate bitmaps for managing MCAM entries */
+ mcam->bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(mcam->bmap_entries),
+ sizeof(long), GFP_KERNEL);
+ if (!mcam->bmap)
+ return -ENOMEM;
+
+ mcam->bmap_reverse = devm_kcalloc(rvu->dev,
+ BITS_TO_LONGS(mcam->bmap_entries),
+ sizeof(long), GFP_KERNEL);
+ if (!mcam->bmap_reverse)
+ return -ENOMEM;
+
+ mcam->bmap_fcnt = mcam->bmap_entries;
+
+ /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
+ mcam->entry2pfvf_map = devm_kcalloc(rvu->dev, mcam->bmap_entries,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->entry2pfvf_map)
+ return -ENOMEM;
+
+ /* Reserve 1/8th of MCAM entries at the bottom for low priority
+ * allocations and another 1/8th at the top for high priority
+ * allocations.
+ */
+ mcam->lprio_count = mcam->bmap_entries / 8;
+ if (mcam->lprio_count > BITS_PER_LONG)
+ mcam->lprio_count = round_down(mcam->lprio_count,
+ BITS_PER_LONG);
+ mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count;
+ mcam->hprio_count = mcam->lprio_count;
+ mcam->hprio_end = mcam->hprio_count;
+
+ /* Allocate bitmap for managing MCAM counters and memory
+ * for saving counter to RVU PFFUNC allocation mapping.
+ */
+ err = rvu_alloc_bitmap(&mcam->counters);
+ if (err)
+ return err;
+
+ mcam->cntr2pfvf_map = devm_kcalloc(rvu->dev, mcam->counters.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->cntr2pfvf_map)
+ goto free_mem;
+
+ /* Alloc memory for MCAM entry to counter mapping and for tracking
+ * counter's reference count.
+ */
+ mcam->entry2cntr_map = devm_kcalloc(rvu->dev, mcam->bmap_entries,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->entry2cntr_map)
+ goto free_mem;
+
+ mcam->cntr_refcnt = devm_kcalloc(rvu->dev, mcam->counters.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->cntr_refcnt)
+ goto free_mem;
+
+ mutex_init(&mcam->lock);
return 0;
+
+free_mem:
+ kfree(mcam->counters.bmap);
+ return -ENOMEM;
}
int rvu_npc_init(struct rvu *rvu)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
u64 keyz = NPC_MCAM_KEY_X2;
- int blkaddr, err;
+ int blkaddr, entry, bank, err;
+ u64 cfg, nibble_ena;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0) {
@@ -749,6 +1111,14 @@ int rvu_npc_init(struct rvu *rvu)
return -ENODEV;
}
+ /* First disable all MCAM entries, to stop traffic towards NIXLFs */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
+ for (bank = 0; bank < ((cfg >> 44) & 0xF); bank++) {
+ for (entry = 0; entry < ((cfg >> 28) & 0xFFFF); entry++)
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(entry, bank), 0);
+ }
+
/* Allocate resource bimap for pkind*/
pkind->rsrc.max = (rvu_read64(rvu, blkaddr,
NPC_AF_CONST1) >> 12) & 0xFF;
@@ -771,29 +1141,41 @@ int rvu_npc_init(struct rvu *rvu)
rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4,
(NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
+ /* Config Inner IPV4 NPC layer info */
+ rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4,
+ (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F);
+
/* Enable below for Rx pkts.
* - Outer IPv4 header checksum validation.
* - Detect outer L2 broadcast address and set NPC_RESULT_S[L2M].
+ * - Inner IPv4 header checksum validation.
+ * - Set non zero checksum error code value
*/
rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG,
rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) |
- BIT_ULL(6) | BIT_ULL(2));
+ BIT_ULL(32) | BIT_ULL(24) | BIT_ULL(6) |
+ BIT_ULL(2) | BIT_ULL(1));
/* Set RX and TX side MCAM search key size.
- * Also enable parse key extract nibbles suchthat except
- * layer E to H, rest of the key is included for MCAM search.
+ * LA..LD (ltype only) + Channel
*/
+ nibble_ena = 0x49247;
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX),
- ((keyz & 0x3) << 32) | ((1ULL << 20) - 1));
+ ((keyz & 0x3) << 32) | nibble_ena);
+ /* Due to an errata (35786) in A0 pass silicon, parse nibble enable
+ * configuration has to be identical for both Rx and Tx interfaces.
+ */
+ if (!is_rvu_9xxx_A0(rvu))
+ nibble_ena = (1ULL << 19) - 1;
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
- ((keyz & 0x3) << 32) | ((1ULL << 20) - 1));
+ ((keyz & 0x3) << 32) | nibble_ena);
err = npc_mcam_rsrcs_init(rvu, blkaddr);
if (err)
return err;
- /* Config packet data and flags extraction into PARSE result */
- npc_config_ldata_extract(rvu, blkaddr);
+ /* Configure MKEX profile */
+ npc_load_mkex_profile(rvu, blkaddr);
/* Set TX miss action to UCAST_DEFAULT i.e
* transmit the packet on NIX LF SQ's default channel.
@@ -811,6 +1193,1020 @@ int rvu_npc_init(struct rvu *rvu)
void rvu_npc_freemem(struct rvu *rvu)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
kfree(pkind->rsrc.bmap);
+ kfree(mcam->counters.bmap);
+ mutex_destroy(&mcam->lock);
+}
+
+static int npc_mcam_verify_entry(struct npc_mcam *mcam,
+ u16 pcifunc, int entry)
+{
+ /* Verify if entry is valid and if it is indeed
+ * allocated to the requesting PFFUNC.
+ */
+ if (entry >= mcam->bmap_entries)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (pcifunc != mcam->entry2pfvf_map[entry])
+ return NPC_MCAM_PERM_DENIED;
+
+ return 0;
+}
+
+static int npc_mcam_verify_counter(struct npc_mcam *mcam,
+ u16 pcifunc, int cntr)
+{
+ /* Verify if counter is valid and if it is indeed
+ * allocated to the requesting PFFUNC.
+ */
+ if (cntr >= mcam->counters.max)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (pcifunc != mcam->cntr2pfvf_map[cntr])
+ return NPC_MCAM_PERM_DENIED;
+
+ return 0;
+}
+
+static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 entry, u16 cntr)
+{
+ u16 index = entry & (mcam->banksize - 1);
+ u16 bank = npc_get_bank(mcam, entry);
+
+ /* Set mapping and increment counter's refcnt */
+ mcam->entry2cntr_map[entry] = cntr;
+ mcam->cntr_refcnt[cntr]++;
+ /* Enable stats */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank),
+ BIT_ULL(9) | cntr);
+}
+
+static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu,
+ struct npc_mcam *mcam,
+ int blkaddr, u16 entry, u16 cntr)
+{
+ u16 index = entry & (mcam->banksize - 1);
+ u16 bank = npc_get_bank(mcam, entry);
+
+ /* Remove mapping and reduce counter's refcnt */
+ mcam->entry2cntr_map[entry] = NPC_MCAM_INVALID_MAP;
+ mcam->cntr_refcnt[cntr]--;
+ /* Disable stats */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), 0x00);
+}
+
+/* Sets MCAM entry in bitmap as used. Update
+ * reverse bitmap too. Should be called with
+ * 'mcam->lock' held.
+ */
+static void npc_mcam_set_bit(struct npc_mcam *mcam, u16 index)
+{
+ u16 entry, rentry;
+
+ entry = index;
+ rentry = mcam->bmap_entries - index - 1;
+
+ __set_bit(entry, mcam->bmap);
+ __set_bit(rentry, mcam->bmap_reverse);
+ mcam->bmap_fcnt--;
+}
+
+/* Sets MCAM entry in bitmap as free. Update
+ * reverse bitmap too. Should be called with
+ * 'mcam->lock' held.
+ */
+static void npc_mcam_clear_bit(struct npc_mcam *mcam, u16 index)
+{
+ u16 entry, rentry;
+
+ entry = index;
+ rentry = mcam->bmap_entries - index - 1;
+
+ __clear_bit(entry, mcam->bmap);
+ __clear_bit(rentry, mcam->bmap_reverse);
+ mcam->bmap_fcnt++;
+}
+
+static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 pcifunc)
+{
+ u16 index, cntr;
+
+ /* Scan all MCAM entries and free the ones mapped to 'pcifunc' */
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2pfvf_map[index] == pcifunc) {
+ mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP;
+ /* Free the entry in bitmap */
+ npc_mcam_clear_bit(mcam, index);
+ /* Disable the entry */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+
+ /* Update entry2counter mapping */
+ cntr = mcam->entry2cntr_map[index];
+ if (cntr != NPC_MCAM_INVALID_MAP)
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam,
+ blkaddr, index,
+ cntr);
+ }
+ }
+}
+
+static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam,
+ u16 pcifunc)
+{
+ u16 cntr;
+
+ /* Scan all MCAM counters and free the ones mapped to 'pcifunc' */
+ for (cntr = 0; cntr < mcam->counters.max; cntr++) {
+ if (mcam->cntr2pfvf_map[cntr] == pcifunc) {
+ mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP;
+ mcam->cntr_refcnt[cntr] = 0;
+ rvu_free_rsrc(&mcam->counters, cntr);
+ /* This API is expected to be called after freeing
+ * MCAM entries, which inturn will remove
+ * 'entry to counter' mapping.
+ * No need to do it again.
+ */
+ }
+ }
+}
+
+/* Find area of contiguous free entries of size 'nr'.
+ * If not found return max contiguous free entries available.
+ */
+static u16 npc_mcam_find_zero_area(unsigned long *map, u16 size, u16 start,
+ u16 nr, u16 *max_area)
+{
+ u16 max_area_start = 0;
+ u16 index, next, end;
+
+ *max_area = 0;
+
+again:
+ index = find_next_zero_bit(map, size, start);
+ if (index >= size)
+ return max_area_start;
+
+ end = ((index + nr) >= size) ? size : index + nr;
+ next = find_next_bit(map, end, index);
+ if (*max_area < (next - index)) {
+ *max_area = next - index;
+ max_area_start = index;
+ }
+
+ if (next < end) {
+ start = next + 1;
+ goto again;
+ }
+
+ return max_area_start;
+}
+
+/* Find number of free MCAM entries available
+ * within range i.e in between 'start' and 'end'.
+ */
+static u16 npc_mcam_get_free_count(unsigned long *map, u16 start, u16 end)
+{
+ u16 index, next;
+ u16 fcnt = 0;
+
+again:
+ if (start >= end)
+ return fcnt;
+
+ index = find_next_zero_bit(map, end, start);
+ if (index >= end)
+ return fcnt;
+
+ next = find_next_bit(map, end, index);
+ if (next <= end) {
+ fcnt += next - index;
+ start = next + 1;
+ goto again;
+ }
+
+ fcnt += end - index;
+ return fcnt;
+}
+
+static void
+npc_get_mcam_search_range_priority(struct npc_mcam *mcam,
+ struct npc_mcam_alloc_entry_req *req,
+ u16 *start, u16 *end, bool *reverse)
+{
+ u16 fcnt;
+
+ if (req->priority == NPC_MCAM_HIGHER_PRIO)
+ goto hprio;
+
+ /* For a low priority entry allocation
+ * - If reference entry is not in hprio zone then
+ * search range: ref_entry to end.
+ * - If reference entry is in hprio zone and if
+ * request can be accomodated in non-hprio zone then
+ * search range: 'start of middle zone' to 'end'
+ * - else search in reverse, so that less number of hprio
+ * zone entries are allocated.
+ */
+
+ *reverse = false;
+ *start = req->ref_entry + 1;
+ *end = mcam->bmap_entries;
+
+ if (req->ref_entry >= mcam->hprio_end)
+ return;
+
+ fcnt = npc_mcam_get_free_count(mcam->bmap,
+ mcam->hprio_end, mcam->bmap_entries);
+ if (fcnt > req->count)
+ *start = mcam->hprio_end;
+ else
+ *reverse = true;
+ return;
+
+hprio:
+ /* For a high priority entry allocation, search is always
+ * in reverse to preserve hprio zone entries.
+ * - If reference entry is not in lprio zone then
+ * search range: 0 to ref_entry.
+ * - If reference entry is in lprio zone and if
+ * request can be accomodated in middle zone then
+ * search range: 'hprio_end' to 'lprio_start'
+ */
+
+ *reverse = true;
+ *start = 0;
+ *end = req->ref_entry;
+
+ if (req->ref_entry <= mcam->lprio_start)
+ return;
+
+ fcnt = npc_mcam_get_free_count(mcam->bmap,
+ mcam->hprio_end, mcam->lprio_start);
+ if (fcnt < req->count)
+ return;
+ *start = mcam->hprio_end;
+ *end = mcam->lprio_start;
+}
+
+static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
+ struct npc_mcam_alloc_entry_req *req,
+ struct npc_mcam_alloc_entry_rsp *rsp)
+{
+ u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES];
+ u16 fcnt, hp_fcnt, lp_fcnt;
+ u16 start, end, index;
+ int entry, next_start;
+ bool reverse = false;
+ unsigned long *bmap;
+ u16 max_contig;
+
+ mutex_lock(&mcam->lock);
+
+ /* Check if there are any free entries */
+ if (!mcam->bmap_fcnt) {
+ mutex_unlock(&mcam->lock);
+ return NPC_MCAM_ALLOC_FAILED;
+ }
+
+ /* MCAM entries are divided into high priority, middle and
+ * low priority zones. Idea is to not allocate top and lower
+ * most entries as much as possible, this is to increase
+ * probability of honouring priority allocation requests.
+ *
+ * Two bitmaps are used for mcam entry management,
+ * mcam->bmap for forward search i.e '0 to mcam->bmap_entries'.
+ * mcam->bmap_reverse for reverse search i.e 'mcam->bmap_entries to 0'.
+ *
+ * Reverse bitmap is used to allocate entries
+ * - when a higher priority entry is requested
+ * - when available free entries are less.
+ * Lower priority ones out of avaialble free entries are always
+ * chosen when 'high vs low' question arises.
+ */
+
+ /* Get the search range for priority allocation request */
+ if (req->priority) {
+ npc_get_mcam_search_range_priority(mcam, req,
+ &start, &end, &reverse);
+ goto alloc;
+ }
+
+ /* Find out the search range for non-priority allocation request
+ *
+ * Get MCAM free entry count in middle zone.
+ */
+ lp_fcnt = npc_mcam_get_free_count(mcam->bmap,
+ mcam->lprio_start,
+ mcam->bmap_entries);
+ hp_fcnt = npc_mcam_get_free_count(mcam->bmap, 0, mcam->hprio_end);
+ fcnt = mcam->bmap_fcnt - lp_fcnt - hp_fcnt;
+
+ /* Check if request can be accomodated in the middle zone */
+ if (fcnt > req->count) {
+ start = mcam->hprio_end;
+ end = mcam->lprio_start;
+ } else if ((fcnt + (hp_fcnt / 2) + (lp_fcnt / 2)) > req->count) {
+ /* Expand search zone from half of hprio zone to
+ * half of lprio zone.
+ */
+ start = mcam->hprio_end / 2;
+ end = mcam->bmap_entries - (mcam->lprio_count / 2);
+ reverse = true;
+ } else {
+ /* Not enough free entries, search all entries in reverse,
+ * so that low priority ones will get used up.
+ */
+ reverse = true;
+ start = 0;
+ end = mcam->bmap_entries;
+ }
+
+alloc:
+ if (reverse) {
+ bmap = mcam->bmap_reverse;
+ start = mcam->bmap_entries - start;
+ end = mcam->bmap_entries - end;
+ index = start;
+ start = end;
+ end = index;
+ } else {
+ bmap = mcam->bmap;
+ }
+
+ if (req->contig) {
+ /* Allocate requested number of contiguous entries, if
+ * unsuccessful find max contiguous entries available.
+ */
+ index = npc_mcam_find_zero_area(bmap, end, start,
+ req->count, &max_contig);
+ rsp->count = max_contig;
+ if (reverse)
+ rsp->entry = mcam->bmap_entries - index - max_contig;
+ else
+ rsp->entry = index;
+ } else {
+ /* Allocate requested number of non-contiguous entries,
+ * if unsuccessful allocate as many as possible.
+ */
+ rsp->count = 0;
+ next_start = start;
+ for (entry = 0; entry < req->count; entry++) {
+ index = find_next_zero_bit(bmap, end, next_start);
+ if (index >= end)
+ break;
+
+ next_start = start + (index - start) + 1;
+
+ /* Save the entry's index */
+ if (reverse)
+ index = mcam->bmap_entries - index - 1;
+ entry_list[entry] = index;
+ rsp->count++;
+ }
+ }
+
+ /* If allocating requested no of entries is unsucessful,
+ * expand the search range to full bitmap length and retry.
+ */
+ if (!req->priority && (rsp->count < req->count) &&
+ ((end - start) != mcam->bmap_entries)) {
+ reverse = true;
+ start = 0;
+ end = mcam->bmap_entries;
+ goto alloc;
+ }
+
+ /* For priority entry allocation requests, if allocation is
+ * failed then expand search to max possible range and retry.
+ */
+ if (req->priority && rsp->count < req->count) {
+ if (req->priority == NPC_MCAM_LOWER_PRIO &&
+ (start != (req->ref_entry + 1))) {
+ start = req->ref_entry + 1;
+ end = mcam->bmap_entries;
+ reverse = false;
+ goto alloc;
+ } else if ((req->priority == NPC_MCAM_HIGHER_PRIO) &&
+ ((end - start) != req->ref_entry)) {
+ start = 0;
+ end = req->ref_entry;
+ reverse = true;
+ goto alloc;
+ }
+ }
+
+ /* Copy MCAM entry indices into mbox response entry_list.
+ * Requester always expects indices in ascending order, so
+ * so reverse the list if reverse bitmap is used for allocation.
+ */
+ if (!req->contig && rsp->count) {
+ index = 0;
+ for (entry = rsp->count - 1; entry >= 0; entry--) {
+ if (reverse)
+ rsp->entry_list[index++] = entry_list[entry];
+ else
+ rsp->entry_list[entry] = entry_list[entry];
+ }
+ }
+
+ /* Mark the allocated entries as used and set nixlf mapping */
+ for (entry = 0; entry < rsp->count; entry++) {
+ index = req->contig ?
+ (rsp->entry + entry) : rsp->entry_list[entry];
+ npc_mcam_set_bit(mcam, index);
+ mcam->entry2pfvf_map[index] = pcifunc;
+ mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP;
+ }
+
+ /* Update available free count in mbox response */
+ rsp->free_count = mcam->bmap_fcnt;
+
+ mutex_unlock(&mcam->lock);
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
+ struct npc_mcam_alloc_entry_req *req,
+ struct npc_mcam_alloc_entry_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ rsp->entry = NPC_MCAM_ENTRY_INVALID;
+ rsp->free_count = 0;
+
+ /* Check if ref_entry is within range */
+ if (req->priority && req->ref_entry >= mcam->bmap_entries)
+ return NPC_MCAM_INVALID_REQ;
+
+ /* ref_entry can't be '0' if requested priority is high.
+ * Can't be last entry if requested priority is low.
+ */
+ if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) ||
+ ((req->ref_entry == (mcam->bmap_entries - 1)) &&
+ req->priority == NPC_MCAM_LOWER_PRIO))
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Since list of allocated indices needs to be sent to requester,
+ * max number of non-contiguous entries per mbox msg is limited.
+ */
+ if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES)
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Alloc request from PFFUNC with no NIXLF attached should be denied */
+ if (!is_nixlf_attached(rvu, pcifunc))
+ return NPC_MCAM_ALLOC_DENIED;
+
+ return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp);
+}
+
+int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
+ struct npc_mcam_free_entry_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc = 0;
+ u16 cntr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Free request from PFFUNC with no NIXLF attached, ignore */
+ if (!is_nixlf_attached(rvu, pcifunc))
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+
+ if (req->all)
+ goto free_all;
+
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ if (rc)
+ goto exit;
+
+ mcam->entry2pfvf_map[req->entry] = 0;
+ npc_mcam_clear_bit(mcam, req->entry);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
+
+ /* Update entry2counter mapping */
+ cntr = mcam->entry2cntr_map[req->entry];
+ if (cntr != NPC_MCAM_INVALID_MAP)
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ req->entry, cntr);
+
+ goto exit;
+
+free_all:
+ /* Free up all entries allocated to requesting PFFUNC */
+ npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc);
+exit:
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
+ struct npc_mcam_write_entry_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ if (rc)
+ goto exit;
+
+ if (req->set_cntr &&
+ npc_mcam_verify_counter(mcam, pcifunc, req->cntr)) {
+ rc = NPC_MCAM_INVALID_REQ;
+ goto exit;
+ }
+
+ if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX) {
+ rc = NPC_MCAM_INVALID_REQ;
+ goto exit;
+ }
+
+ npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, req->intf,
+ &req->entry_data, req->enable_entry);
+
+ if (req->set_cntr)
+ npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ req->entry, req->cntr);
+
+ rc = 0;
+exit:
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu,
+ struct npc_mcam_ena_dis_entry_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ mutex_unlock(&mcam->lock);
+ if (rc)
+ return rc;
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, true);
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu,
+ struct npc_mcam_ena_dis_entry_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ mutex_unlock(&mcam->lock);
+ if (rc)
+ return rc;
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
+ struct npc_mcam_shift_entry_req *req,
+ struct npc_mcam_shift_entry_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ u16 old_entry, new_entry;
+ u16 index, cntr;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (req->shift_count > NPC_MCAM_MAX_SHIFTS)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ for (index = 0; index < req->shift_count; index++) {
+ old_entry = req->curr_entry[index];
+ new_entry = req->new_entry[index];
+
+ /* Check if both old and new entries are valid and
+ * does belong to this PFFUNC or not.
+ */
+ rc = npc_mcam_verify_entry(mcam, pcifunc, old_entry);
+ if (rc)
+ break;
+
+ rc = npc_mcam_verify_entry(mcam, pcifunc, new_entry);
+ if (rc)
+ break;
+
+ /* new_entry should not have a counter mapped */
+ if (mcam->entry2cntr_map[new_entry] != NPC_MCAM_INVALID_MAP) {
+ rc = NPC_MCAM_PERM_DENIED;
+ break;
+ }
+
+ /* Disable the new_entry */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, false);
+
+ /* Copy rule from old entry to new entry */
+ npc_copy_mcam_entry(rvu, mcam, blkaddr, old_entry, new_entry);
+
+ /* Copy counter mapping, if any */
+ cntr = mcam->entry2cntr_map[old_entry];
+ if (cntr != NPC_MCAM_INVALID_MAP) {
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ old_entry, cntr);
+ npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ new_entry, cntr);
+ }
+
+ /* Enable new_entry and disable old_entry */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, true);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, old_entry, false);
+ }
+
+ /* If shift has failed then report the failed index */
+ if (index != req->shift_count) {
+ rc = NPC_MCAM_PERM_DENIED;
+ rsp->failed_entry_idx = index;
+ }
+
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
+ struct npc_mcam_alloc_counter_req *req,
+ struct npc_mcam_alloc_counter_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ u16 max_contig, cntr;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ /* If the request is from a PFFUNC with no NIXLF attached, ignore */
+ if (!is_nixlf_attached(rvu, pcifunc))
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Since list of allocated counter IDs needs to be sent to requester,
+ * max number of non-contiguous counters per mbox msg is limited.
+ */
+ if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+
+ /* Check if unused counters are available or not */
+ if (!rvu_rsrc_free_count(&mcam->counters)) {
+ mutex_unlock(&mcam->lock);
+ return NPC_MCAM_ALLOC_FAILED;
+ }
+
+ rsp->count = 0;
+
+ if (req->contig) {
+ /* Allocate requested number of contiguous counters, if
+ * unsuccessful find max contiguous entries available.
+ */
+ index = npc_mcam_find_zero_area(mcam->counters.bmap,
+ mcam->counters.max, 0,
+ req->count, &max_contig);
+ rsp->count = max_contig;
+ rsp->cntr = index;
+ for (cntr = index; cntr < (index + max_contig); cntr++) {
+ __set_bit(cntr, mcam->counters.bmap);
+ mcam->cntr2pfvf_map[cntr] = pcifunc;
+ }
+ } else {
+ /* Allocate requested number of non-contiguous counters,
+ * if unsuccessful allocate as many as possible.
+ */
+ for (cntr = 0; cntr < req->count; cntr++) {
+ index = rvu_alloc_rsrc(&mcam->counters);
+ if (index < 0)
+ break;
+ rsp->cntr_list[cntr] = index;
+ rsp->count++;
+ mcam->cntr2pfvf_map[index] = pcifunc;
+ }
+ }
+
+ mutex_unlock(&mcam->lock);
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 index, entry = 0;
+ int blkaddr, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+ if (err) {
+ mutex_unlock(&mcam->lock);
+ return err;
+ }
+
+ /* Mark counter as free/unused */
+ mcam->cntr2pfvf_map[req->cntr] = NPC_MCAM_INVALID_MAP;
+ rvu_free_rsrc(&mcam->counters, req->cntr);
+
+ /* Disable all MCAM entry's stats which are using this counter */
+ while (entry < mcam->bmap_entries) {
+ if (!mcam->cntr_refcnt[req->cntr])
+ break;
+
+ index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
+ if (index >= mcam->bmap_entries)
+ break;
+ if (mcam->entry2cntr_map[index] != req->cntr)
+ continue;
+
+ entry = index + 1;
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ index, req->cntr);
+ }
+
+ mutex_unlock(&mcam->lock);
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
+ struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 index, entry = 0;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+ if (rc)
+ goto exit;
+
+ /* Unmap the MCAM entry and counter */
+ if (!req->all) {
+ rc = npc_mcam_verify_entry(mcam, req->hdr.pcifunc, req->entry);
+ if (rc)
+ goto exit;
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ req->entry, req->cntr);
+ goto exit;
+ }
+
+ /* Disable all MCAM entry's stats which are using this counter */
+ while (entry < mcam->bmap_entries) {
+ if (!mcam->cntr_refcnt[req->cntr])
+ break;
+
+ index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
+ if (index >= mcam->bmap_entries)
+ break;
+ if (mcam->entry2cntr_map[index] != req->cntr)
+ continue;
+
+ entry = index + 1;
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ index, req->cntr);
+ }
+exit:
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+ mutex_unlock(&mcam->lock);
+ if (err)
+ return err;
+
+ rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr), 0x00);
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req,
+ struct npc_mcam_oper_counter_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+ mutex_unlock(&mcam->lock);
+ if (err)
+ return err;
+
+ rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr));
+ rsp->stat &= BIT_ULL(48) - 1;
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
+ struct npc_mcam_alloc_and_write_entry_req *req,
+ struct npc_mcam_alloc_and_write_entry_rsp *rsp)
+{
+ struct npc_mcam_alloc_counter_req cntr_req;
+ struct npc_mcam_alloc_counter_rsp cntr_rsp;
+ struct npc_mcam_alloc_entry_req entry_req;
+ struct npc_mcam_alloc_entry_rsp entry_rsp;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 entry = NPC_MCAM_ENTRY_INVALID;
+ u16 cntr = NPC_MCAM_ENTRY_INVALID;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX)
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Try to allocate a MCAM entry */
+ entry_req.hdr.pcifunc = req->hdr.pcifunc;
+ entry_req.contig = true;
+ entry_req.priority = req->priority;
+ entry_req.ref_entry = req->ref_entry;
+ entry_req.count = 1;
+
+ rc = rvu_mbox_handler_npc_mcam_alloc_entry(rvu,
+ &entry_req, &entry_rsp);
+ if (rc)
+ return rc;
+
+ if (!entry_rsp.count)
+ return NPC_MCAM_ALLOC_FAILED;
+
+ entry = entry_rsp.entry;
+
+ if (!req->alloc_cntr)
+ goto write_entry;
+
+ /* Now allocate counter */
+ cntr_req.hdr.pcifunc = req->hdr.pcifunc;
+ cntr_req.contig = true;
+ cntr_req.count = 1;
+
+ rc = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp);
+ if (rc) {
+ /* Free allocated MCAM entry */
+ mutex_lock(&mcam->lock);
+ mcam->entry2pfvf_map[entry] = 0;
+ npc_mcam_clear_bit(mcam, entry);
+ mutex_unlock(&mcam->lock);
+ return rc;
+ }
+
+ cntr = cntr_rsp.cntr;
+
+write_entry:
+ mutex_lock(&mcam->lock);
+ npc_config_mcam_entry(rvu, mcam, blkaddr, entry, req->intf,
+ &req->entry_data, req->enable_entry);
+
+ if (req->alloc_cntr)
+ npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, entry, cntr);
+ mutex_unlock(&mcam->lock);
+
+ rsp->entry = entry;
+ rsp->cntr = cntr;
+
+ return 0;
+}
+
+#define GET_KEX_CFG(intf) \
+ rvu_read64(rvu, BLKADDR_NPC, NPC_AF_INTFX_KEX_CFG(intf))
+
+#define GET_KEX_FLAGS(ld) \
+ rvu_read64(rvu, BLKADDR_NPC, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld))
+
+#define GET_KEX_LD(intf, lid, lt, ld) \
+ rvu_read64(rvu, BLKADDR_NPC, \
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld))
+
+#define GET_KEX_LDFLAGS(intf, ld, fl) \
+ rvu_read64(rvu, BLKADDR_NPC, \
+ NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, fl))
+
+int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
+ struct npc_get_kex_cfg_rsp *rsp)
+{
+ int lid, lt, ld, fl;
+
+ rsp->rx_keyx_cfg = GET_KEX_CFG(NIX_INTF_RX);
+ rsp->tx_keyx_cfg = GET_KEX_CFG(NIX_INTF_TX);
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ rsp->intf_lid_lt_ld[NIX_INTF_RX][lid][lt][ld] =
+ GET_KEX_LD(NIX_INTF_RX, lid, lt, ld);
+ rsp->intf_lid_lt_ld[NIX_INTF_TX][lid][lt][ld] =
+ GET_KEX_LD(NIX_INTF_TX, lid, lt, ld);
+ }
+ }
+ }
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ rsp->kex_ld_flags[ld] = GET_KEX_FLAGS(ld);
+
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ for (fl = 0; fl < NPC_MAX_LFL; fl++) {
+ rsp->intf_ld_flags[NIX_INTF_RX][ld][fl] =
+ GET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl);
+ rsp->intf_ld_flags[NIX_INTF_TX][ld][fl] =
+ GET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl);
+ }
+ }
+ memcpy(rsp->mkex_pfl_name, rvu->mkex_pfl_name, MKEX_NAME_LEN);
+ return 0;
+}
+
+int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+ bool enable;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (!pfvf->rxvlan)
+ return 0;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
+ NIXLF_UCAST_ENTRY);
+ pfvf->entry.action = npc_get_mcam_action(rvu, mcam, blkaddr, index);
+ enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, index);
+ npc_config_mcam_entry(rvu, mcam, blkaddr, pfvf->rxvlan_index,
+ NIX_INTF_RX, &pfvf->entry, enable);
+
+ return 0;
}
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 9c08c3650c02..04fd1f135011 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3732,19 +3732,7 @@ static int skge_debug_show(struct seq_file *seq, void *v)
return 0;
}
-
-static int skge_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, skge_debug_show, inode->i_private);
-}
-
-static const struct file_operations skge_debug_fops = {
- .owner = THIS_MODULE,
- .open = skge_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(skge_debug);
/*
* Use network device events to create/remove/rename
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 697d9b374f5e..f3a5fa84860f 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2485,13 +2485,11 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
skb->ip_summed = re->skb->ip_summed;
skb->csum = re->skb->csum;
skb_copy_hash(skb, re->skb);
- skb->vlan_proto = re->skb->vlan_proto;
- skb->vlan_tci = re->skb->vlan_tci;
+ __vlan_hwaccel_copy_tag(skb, re->skb);
pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
length, PCI_DMA_FROMDEVICE);
- re->skb->vlan_proto = 0;
- re->skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(re->skb);
skb_clear_hash(re->skb);
re->skb->ip_summed = CHECKSUM_NONE;
skb_put(skb, length);
@@ -4623,19 +4621,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v)
napi_enable(&hw->napi);
return 0;
}
-
-static int sky2_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, sky2_debug_show, inode->i_private);
-}
-
-static const struct file_operations sky2_debug_fops = {
- .owner = THIS_MODULE,
- .open = sky2_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(sky2_debug);
/*
* Use network device events to create/remove/rename
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 7dbfdac4067a..399f565dd85a 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -243,7 +243,7 @@ static void mtk_phy_link_adjust(struct net_device *dev)
if (dev->phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- lcl_adv = ethtool_adv_to_lcl_adv_t(dev->phydev->advertising);
+ lcl_adv = linkmode_adv_to_lcl_adv_t(dev->phydev->advertising);
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (flowctrl & FLOW_CTRL_TX)
@@ -353,8 +353,9 @@ static int mtk_phy_connect(struct net_device *dev)
phy_set_max_speed(dev->phydev, SPEED_1000);
phy_support_asym_pause(dev->phydev);
- dev->phydev->advertising = dev->phydev->supported |
- ADVERTISED_Autoneg;
+ linkmode_copy(dev->phydev->advertising, dev->phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ dev->phydev->advertising);
phy_start_aneg(dev->phydev);
of_node_put(np);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index d8e9a323122e..db909b6069b5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -144,9 +144,9 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
}
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
- int cq_num)
+ int cq_num, u8 opmod)
{
- return mlx4_cmd(dev, mailbox->dma, cq_num, 0,
+ return mlx4_cmd(dev, mailbox->dma, cq_num, opmod,
MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}
@@ -287,11 +287,61 @@ static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
__mlx4_cq_free_icm(dev, cqn);
}
+static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size)
+{
+ int entries_per_copy = PAGE_SIZE / cqe_size;
+ void *init_ents;
+ int err = 0;
+ int i;
+
+ init_ents = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!init_ents)
+ return -ENOMEM;
+
+ /* Populate a list of CQ entries to reduce the number of
+ * copy_to_user calls. 0xcc is the initialization value
+ * required by the FW.
+ */
+ memset(init_ents, 0xcc, PAGE_SIZE);
+
+ if (entries_per_copy < entries) {
+ for (i = 0; i < entries / entries_per_copy; i++) {
+ err = copy_to_user(buf, init_ents, PAGE_SIZE);
+ if (err)
+ goto out;
+
+ buf += PAGE_SIZE;
+ }
+ } else {
+ err = copy_to_user(buf, init_ents, entries * cqe_size);
+ }
+
+out:
+ kfree(init_ents);
+
+ return err;
+}
+
+static void mlx4_init_kernel_cqes(struct mlx4_buf *buf,
+ int entries,
+ int cqe_size)
+{
+ int i;
+
+ if (buf->nbufs == 1)
+ memset(buf->direct.buf, 0xcc, entries * cqe_size);
+ else
+ for (i = 0; i < buf->npages; i++)
+ memset(buf->page_list[i].buf, 0xcc,
+ 1UL << buf->page_shift);
+}
+
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec,
struct mlx4_cq *cq, unsigned vector, int collapsed,
- int timestamp_en)
+ int timestamp_en, void *buf_addr, bool user_cq)
{
+ bool sw_cq_init = dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SW_CQ_INIT;
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cq_table *cq_table = &priv->cq_table;
struct mlx4_cmd_mailbox *mailbox;
@@ -336,7 +386,20 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
cq_context->db_rec_addr = cpu_to_be64(db_rec);
- err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
+ if (sw_cq_init) {
+ if (user_cq) {
+ err = mlx4_init_user_cqes(buf_addr, nent,
+ dev->caps.cqe_size);
+ if (err)
+ sw_cq_init = false;
+ } else {
+ mlx4_init_kernel_cqes(buf_addr, nent,
+ dev->caps.cqe_size);
+ }
+ }
+
+ err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn, sw_cq_init);
+
mlx4_free_cmd_mailbox(dev, mailbox);
if (err)
goto err_radix;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 1e487acb4667..74d466796b7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -54,11 +54,8 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node);
if (!cq) {
- cq = kzalloc(sizeof(*cq), GFP_KERNEL);
- if (!cq) {
- en_err(priv, "Failed to allocate CQ structure\n");
- return -ENOMEM;
- }
+ en_err(priv, "Failed to allocate CQ structure\n");
+ return -ENOMEM;
}
cq->size = entries;
@@ -143,7 +140,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
cq->mcq.usage = MLX4_RES_USAGE_DRIVER;
err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
&mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
- cq->vector, 0, timestamp_en);
+ cq->vector, 0, timestamp_en, &cq->wqres.buf, false);
if (err)
goto free_eq;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index db00bf1c23f5..9a0881cb7f51 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -271,11 +271,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
if (!ring) {
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
- if (!ring) {
- en_err(priv, "Failed to allocate RX ring structure\n");
- return -ENOMEM;
- }
+ en_err(priv, "Failed to allocate RX ring structure\n");
+ return -ENOMEM;
}
ring->prod = 0;
@@ -875,7 +872,7 @@ csum_none:
skb->data_len = length;
napi_gro_frags(&cq->napi);
} else {
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
skb_clear_hash(skb);
}
next:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 6f5153afcab4..2cbd2bd7c67c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -57,11 +57,8 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
if (!ring) {
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
- if (!ring) {
- en_err(priv, "Failed allocating TX ring\n");
- return -ENOMEM;
- }
+ en_err(priv, "Failed allocating TX ring\n");
+ return -ENOMEM;
}
ring->size = size;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index babcfd9c0571..7df728f1e5b5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -166,6 +166,7 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[37] = "sl to vl mapping table change event support",
[38] = "user MAC support",
[39] = "Report driver version to FW support",
+ [40] = "SW CQ initialization support",
};
int i;
@@ -1098,6 +1099,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
if (field32 & (1 << 21))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS;
+ if (field32 & (1 << 23))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SW_CQ_INIT;
for (i = 1; i <= dev_cap->num_ports; i++) {
err = mlx4_QUERY_PORT(dev, i, dev_cap->port_cap + i);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 6a046030e873..bdb8dd161923 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -63,7 +63,7 @@ struct workqueue_struct *mlx4_wq;
#ifdef CONFIG_MLX4_DEBUG
-int mlx4_debug_level = 0;
+int mlx4_debug_level; /* 0 by default */
module_param_named(debug_level, mlx4_debug_level, int, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
@@ -83,7 +83,7 @@ MODULE_PARM_DESC(msi_x, "0 - don't use MSI-X, 1 - use MSI-X, >1 - limit number o
static uint8_t num_vfs[3] = {0, 0, 0};
static int num_vfs_argc;
-module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
+module_param_array(num_vfs, byte, &num_vfs_argc, 0444);
MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
"num_vfs=port1,port2,port1+2");
@@ -313,7 +313,7 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
for (i = 0; i < dev->caps.num_ports - 1; i++) {
if (port_type[i] != port_type[i + 1]) {
mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
}
@@ -322,7 +322,7 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
if (!(port_type[i] & dev->caps.supported_type[i+1])) {
mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
i + 1);
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
return 0;
@@ -1188,8 +1188,7 @@ static int __set_port_type(struct mlx4_port_info *info,
mlx4_err(mdev,
"Requested port type for port %d is not supported on this HCA\n",
info->port);
- err = -EINVAL;
- goto err_sup;
+ return -EOPNOTSUPP;
}
mlx4_stop_sense(mdev);
@@ -1211,7 +1210,7 @@ static int __set_port_type(struct mlx4_port_info *info,
for (i = 1; i <= mdev->caps.num_ports; i++) {
if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
mdev->caps.possible_type[i] = mdev->caps.port_type[i];
- err = -EINVAL;
+ err = -EOPNOTSUPP;
}
}
}
@@ -1237,7 +1236,7 @@ static int __set_port_type(struct mlx4_port_info *info,
out:
mlx4_start_sense(mdev);
mutex_unlock(&priv->port_mutex);
-err_sup:
+
return err;
}
@@ -3252,7 +3251,7 @@ disable_sriov:
free_mem:
dev->persist->num_vfs = 0;
kfree(dev->dev_vfs);
- dev->dev_vfs = NULL;
+ dev->dev_vfs = NULL;
return dev_flags & ~MLX4_FLAG_MASTER;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 31bd56727022..eb13d3618162 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -4729,7 +4729,6 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
struct res_srq *tmp;
int state;
u64 in_param;
- LIST_HEAD(tlist);
int srqn;
int err;
@@ -4795,7 +4794,6 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
struct res_cq *tmp;
int state;
u64 in_param;
- LIST_HEAD(tlist);
int cqn;
int err;
@@ -4858,7 +4856,6 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
struct res_mpt *tmp;
int state;
u64 in_param;
- LIST_HEAD(tlist);
int mptn;
int err;
@@ -4926,7 +4923,6 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
struct res_mtt *mtt;
struct res_mtt *tmp;
int state;
- LIST_HEAD(tlist);
int base;
int err;
@@ -5115,7 +5111,6 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
struct res_eq *tmp;
int err;
int state;
- LIST_HEAD(tlist);
int eqn;
err = move_all_busy(dev, slave, RES_EQ);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index d324a3884462..9de9abacf7f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -12,17 +12,17 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
# mlx5 core basic
#
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
- health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
+ health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
- fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o \
- diag/fs_tracepoint.o diag/fw_tracer.o
+ fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
+ lib/devcom.o diag/fs_tracepoint.o diag/fw_tracer.o
#
# Netdev basic
#
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
- en_selftest.o en/port.o
+ en_selftest.o en/port.o en/monitor_stats.o
#
# Netdev extra
@@ -30,7 +30,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
-mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o
#
# Core extra
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index a5a0823e5ada..d3125cdf69db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -40,9 +40,11 @@
#include <linux/random.h>
#include <linux/io-mapping.h>
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/eq.h>
#include <linux/debugfs.h>
#include "mlx5_core.h"
+#include "lib/eq.h"
enum {
CMD_IF_REV = 5,
@@ -313,6 +315,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_FPGA_DESTROY_QP:
case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
case MLX5_CMD_OP_DEALLOC_MEMIC:
+ case MLX5_CMD_OP_PAGE_FAULT_RESUME:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -326,7 +329,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_CREATE_MKEY:
case MLX5_CMD_OP_QUERY_MKEY:
case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
- case MLX5_CMD_OP_PAGE_FAULT_RESUME:
case MLX5_CMD_OP_CREATE_EQ:
case MLX5_CMD_OP_QUERY_EQ:
case MLX5_CMD_OP_GEN_EQE:
@@ -371,6 +373,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
case MLX5_CMD_OP_ALLOC_Q_COUNTER:
case MLX5_CMD_OP_QUERY_Q_COUNTER:
+ case MLX5_CMD_OP_SET_MONITOR_COUNTER:
+ case MLX5_CMD_OP_ARM_MONITOR_COUNTER:
case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
case MLX5_CMD_OP_QUERY_RATE_LIMIT:
case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
@@ -520,6 +524,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
+ MLX5_COMMAND_STR_CASE(SET_MONITOR_COUNTER);
+ MLX5_COMMAND_STR_CASE(ARM_MONITOR_COUNTER);
MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
@@ -805,6 +811,8 @@ static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
return MLX5_GET(mbox_in, in->first.data, opcode);
}
+static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
+
static void cb_timeout_handler(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work, struct delayed_work,
@@ -1412,14 +1420,32 @@ static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
up(&cmd->sem);
}
+static int cmd_comp_notifier(struct notifier_block *nb,
+ unsigned long type, void *data)
+{
+ struct mlx5_core_dev *dev;
+ struct mlx5_cmd *cmd;
+ struct mlx5_eqe *eqe;
+
+ cmd = mlx5_nb_cof(nb, struct mlx5_cmd, nb);
+ dev = container_of(cmd, struct mlx5_core_dev, cmd);
+ eqe = data;
+
+ mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
+
+ return NOTIFY_OK;
+}
void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
{
+ MLX5_NB_INIT(&dev->cmd.nb, cmd_comp_notifier, CMD);
+ mlx5_eq_notifier_register(dev, &dev->cmd.nb);
mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
}
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
{
mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
+ mlx5_eq_notifier_unregister(dev, &dev->cmd.nb);
}
static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
@@ -1435,7 +1461,7 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
}
}
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
+static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
{
struct mlx5_cmd *cmd = &dev->cmd;
struct mlx5_cmd_work_ent *ent;
@@ -1533,7 +1559,29 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
}
}
}
-EXPORT_SYMBOL(mlx5_cmd_comp_handler);
+
+void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
+{
+ unsigned long flags;
+ u64 vector;
+
+ /* wait for pending handlers to complete */
+ mlx5_eq_synchronize_cmd_irq(dev);
+ spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
+ vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
+ if (!vector)
+ goto no_trig;
+
+ vector |= MLX5_TRIGGERED_CMD_COMP;
+ spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
+
+ mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
+ mlx5_cmd_comp_handler(dev, vector, true);
+ return;
+
+no_trig:
+ spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
+}
static int status_to_err(u8 status)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 4b85abb5c9f7..713a17ee3751 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -38,6 +38,7 @@
#include <rdma/ib_verbs.h>
#include <linux/mlx5/cq.h>
#include "mlx5_core.h"
+#include "lib/eq.h"
#define TASKLET_MAX_TIME 2
#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
@@ -92,10 +93,10 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
- struct mlx5_eq *eq;
+ struct mlx5_eq_comp *eq;
int err;
- eq = mlx5_eqn2eq(dev, eqn);
+ eq = mlx5_eqn2comp_eq(dev, eqn);
if (IS_ERR(eq))
return PTR_ERR(eq);
@@ -119,12 +120,12 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
INIT_LIST_HEAD(&cq->tasklet_ctx.list);
/* Add to comp EQ CQ tree to recv comp events */
- err = mlx5_eq_add_cq(eq, cq);
+ err = mlx5_eq_add_cq(&eq->core, cq);
if (err)
goto err_cmd;
/* Add to async EQ CQ tree to recv async events */
- err = mlx5_eq_add_cq(&dev->priv.eq_table.async_eq, cq);
+ err = mlx5_eq_add_cq(mlx5_get_async_eq(dev), cq);
if (err)
goto err_cq_add;
@@ -139,7 +140,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
return 0;
err_cq_add:
- mlx5_eq_del_cq(eq, cq);
+ mlx5_eq_del_cq(&eq->core, cq);
err_cmd:
memset(din, 0, sizeof(din));
memset(dout, 0, sizeof(dout));
@@ -157,11 +158,11 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
int err;
- err = mlx5_eq_del_cq(&dev->priv.eq_table.async_eq, cq);
+ err = mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
if (err)
return err;
- err = mlx5_eq_del_cq(cq->eq, cq);
+ err = mlx5_eq_del_cq(&cq->eq->core, cq);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 90fabd612b6c..a11e22d0b0cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -36,6 +36,7 @@
#include <linux/mlx5/cq.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
+#include "lib/eq.h"
enum {
QP_PID,
@@ -349,6 +350,16 @@ out:
return param;
}
+static int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
+ u32 *out, int outlen)
+{
+ u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {};
+
+ MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
+ MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+}
+
static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
int index)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 37ba7c78859d..ebc046fa97d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -45,75 +45,11 @@ struct mlx5_device_context {
unsigned long state;
};
-struct mlx5_delayed_event {
- struct list_head list;
- struct mlx5_core_dev *dev;
- enum mlx5_dev_event event;
- unsigned long param;
-};
-
enum {
MLX5_INTERFACE_ADDED,
MLX5_INTERFACE_ATTACHED,
};
-static void add_delayed_event(struct mlx5_priv *priv,
- struct mlx5_core_dev *dev,
- enum mlx5_dev_event event,
- unsigned long param)
-{
- struct mlx5_delayed_event *delayed_event;
-
- delayed_event = kzalloc(sizeof(*delayed_event), GFP_ATOMIC);
- if (!delayed_event) {
- mlx5_core_err(dev, "event %d is missed\n", event);
- return;
- }
-
- mlx5_core_dbg(dev, "Accumulating event %d\n", event);
- delayed_event->dev = dev;
- delayed_event->event = event;
- delayed_event->param = param;
- list_add_tail(&delayed_event->list, &priv->waiting_events_list);
-}
-
-static void delayed_event_release(struct mlx5_device_context *dev_ctx,
- struct mlx5_priv *priv)
-{
- struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
- struct mlx5_delayed_event *de;
- struct mlx5_delayed_event *n;
- struct list_head temp;
-
- INIT_LIST_HEAD(&temp);
-
- spin_lock_irq(&priv->ctx_lock);
-
- priv->is_accum_events = false;
- list_splice_init(&priv->waiting_events_list, &temp);
- if (!dev_ctx->context)
- goto out;
- list_for_each_entry_safe(de, n, &temp, list)
- dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
-
-out:
- spin_unlock_irq(&priv->ctx_lock);
-
- list_for_each_entry_safe(de, n, &temp, list) {
- list_del(&de->list);
- kfree(de);
- }
-}
-
-/* accumulating events that can come after mlx5_ib calls to
- * ib_register_device, till adding that interface to the events list.
- */
-static void delayed_event_start(struct mlx5_priv *priv)
-{
- spin_lock_irq(&priv->ctx_lock);
- priv->is_accum_events = true;
- spin_unlock_irq(&priv->ctx_lock);
-}
void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
{
@@ -129,8 +65,6 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
dev_ctx->intf = intf;
- delayed_event_start(priv);
-
dev_ctx->context = intf->add(dev);
if (dev_ctx->context) {
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
@@ -139,22 +73,9 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
spin_lock_irq(&priv->ctx_lock);
list_add_tail(&dev_ctx->list, &priv->ctx_list);
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (dev_ctx->intf->pfault) {
- if (priv->pfault) {
- mlx5_core_err(dev, "multiple page fault handlers not supported");
- } else {
- priv->pfault_ctx = dev_ctx->context;
- priv->pfault = dev_ctx->intf->pfault;
- }
- }
-#endif
spin_unlock_irq(&priv->ctx_lock);
}
- delayed_event_release(dev_ctx, priv);
-
if (!dev_ctx->context)
kfree(dev_ctx);
}
@@ -179,15 +100,6 @@ void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
if (!dev_ctx)
return;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- spin_lock_irq(&priv->ctx_lock);
- if (priv->pfault == dev_ctx->intf->pfault)
- priv->pfault = NULL;
- spin_unlock_irq(&priv->ctx_lock);
-
- synchronize_srcu(&priv->pfault_srcu);
-#endif
-
spin_lock_irq(&priv->ctx_lock);
list_del(&dev_ctx->list);
spin_unlock_irq(&priv->ctx_lock);
@@ -207,26 +119,20 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv
if (!dev_ctx)
return;
- delayed_event_start(priv);
if (intf->attach) {
if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
- goto out;
+ return;
if (intf->attach(dev, dev_ctx->context))
- goto out;
-
+ return;
set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
} else {
if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
- goto out;
+ return;
dev_ctx->context = intf->add(dev);
if (!dev_ctx->context)
- goto out;
-
+ return;
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
}
-
-out:
- delayed_event_release(dev_ctx, priv);
}
void mlx5_attach_device(struct mlx5_core_dev *dev)
@@ -350,28 +256,6 @@ void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol)
mutex_unlock(&mlx5_intf_mutex);
}
-void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
-{
- struct mlx5_priv *priv = &mdev->priv;
- struct mlx5_device_context *dev_ctx;
- unsigned long flags;
- void *result = NULL;
-
- spin_lock_irqsave(&priv->ctx_lock, flags);
-
- list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
- if ((dev_ctx->intf->protocol == protocol) &&
- dev_ctx->intf->get_dev) {
- result = dev_ctx->intf->get_dev(dev_ctx->context);
- break;
- }
-
- spin_unlock_irqrestore(&priv->ctx_lock, flags);
-
- return result;
-}
-EXPORT_SYMBOL(mlx5_get_protocol_dev);
-
/* Must be called with intf_mutex held */
void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
{
@@ -422,44 +306,6 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
return res;
}
-void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
- unsigned long param)
-{
- struct mlx5_priv *priv = &dev->priv;
- struct mlx5_device_context *dev_ctx;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->ctx_lock, flags);
-
- if (priv->is_accum_events)
- add_delayed_event(priv, dev, event, param);
-
- /* After mlx5_detach_device, the dev_ctx->intf is still set and dev_ctx is
- * still in priv->ctx_list. In this case, only notify the dev_ctx if its
- * ADDED or ATTACHED bit are set.
- */
- list_for_each_entry(dev_ctx, &priv->ctx_list, list)
- if (dev_ctx->intf->event &&
- (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state) ||
- test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)))
- dev_ctx->intf->event(dev, dev_ctx->context, event, param);
-
- spin_unlock_irqrestore(&priv->ctx_lock, flags);
-}
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-void mlx5_core_page_fault(struct mlx5_core_dev *dev,
- struct mlx5_pagefault *pfault)
-{
- struct mlx5_priv *priv = &dev->priv;
- int srcu_idx;
-
- srcu_idx = srcu_read_lock(&priv->pfault_srcu);
- if (priv->pfault)
- priv->pfault(dev, priv->pfault_ctx, pfault);
- srcu_read_unlock(&priv->pfault_srcu, srcu_idx);
-}
-#endif
void mlx5_dev_list_lock(void)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index 0f11fff32a9b..424457ff9759 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -161,10 +161,10 @@ static void print_misc_parameters_hdrs(struct trace_seq *p,
PRINT_MASKED_VAL(name, p, format); \
}
DECLARE_MASK_VAL(u64, gre_key) = {
- .m = MLX5_GET(fte_match_set_misc, mask, gre_key_h) << 8 |
- MLX5_GET(fte_match_set_misc, mask, gre_key_l),
- .v = MLX5_GET(fte_match_set_misc, value, gre_key_h) << 8 |
- MLX5_GET(fte_match_set_misc, value, gre_key_l)};
+ .m = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.hi) << 8 |
+ MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.lo),
+ .v = MLX5_GET(fte_match_set_misc, value, gre_key.nvgre.hi) << 8 |
+ MLX5_GET(fte_match_set_misc, value, gre_key.nvgre.lo)};
PRINT_MASKED_VAL(gre_key, p, "%llu");
PRINT_MASKED_VAL_MISC(u32, source_sqn, source_sqn, p, "%u");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index d4ec93bde4de..6999f4486e9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
#define CREATE_TRACE_POINTS
+#include "lib/eq.h"
#include "fw_tracer.h"
#include "fw_tracer_tracepoint.h"
@@ -846,9 +847,9 @@ free_tracer:
return ERR_PTR(err);
}
-/* Create HW resources + start tracer
- * must be called before Async EQ is created
- */
+static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void *data);
+
+/* Create HW resources + start tracer */
int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
{
struct mlx5_core_dev *dev;
@@ -874,6 +875,9 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
goto err_dealloc_pd;
}
+ MLX5_NB_INIT(&tracer->nb, fw_tracer_event, DEVICE_TRACER);
+ mlx5_eq_notifier_register(dev, &tracer->nb);
+
mlx5_fw_tracer_start(tracer);
return 0;
@@ -883,9 +887,7 @@ err_dealloc_pd:
return err;
}
-/* Stop tracer + Cleanup HW resources
- * must be called after Async EQ is destroyed
- */
+/* Stop tracer + Cleanup HW resources */
void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer)
{
if (IS_ERR_OR_NULL(tracer))
@@ -893,7 +895,7 @@ void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer)
mlx5_core_dbg(tracer->dev, "FWTracer: Cleanup, is owner ? (%d)\n",
tracer->owner);
-
+ mlx5_eq_notifier_unregister(tracer->dev, &tracer->nb);
cancel_work_sync(&tracer->ownership_change_work);
cancel_work_sync(&tracer->handle_traces_work);
@@ -922,12 +924,11 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
kfree(tracer);
}
-void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
+static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void *data)
{
- struct mlx5_fw_tracer *tracer = dev->tracer;
-
- if (!tracer)
- return;
+ struct mlx5_fw_tracer *tracer = mlx5_nb_cof(nb, struct mlx5_fw_tracer, nb);
+ struct mlx5_core_dev *dev = tracer->dev;
+ struct mlx5_eqe *eqe = data;
switch (eqe->sub_type) {
case MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE:
@@ -942,6 +943,8 @@ void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n",
eqe->sub_type);
}
+
+ return NOTIFY_OK;
}
EXPORT_TRACEPOINT_SYMBOL(mlx5_fw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
index 0347f2dd5cee..a8b8747f2b61 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
@@ -55,6 +55,7 @@
struct mlx5_fw_tracer {
struct mlx5_core_dev *dev;
+ struct mlx5_nb nb;
bool owner;
u8 trc_ver;
struct workqueue_struct *work_queue;
@@ -170,6 +171,5 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev);
int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer);
void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer);
void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer);
-void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 118324802926..8fa8fdd30b85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -49,6 +49,7 @@
#include <net/switchdev.h>
#include <net/xdp.h>
#include <linux/net_dim.h>
+#include <linux/bits.h>
#include "wq.h"
#include "mlx5_core.h"
#include "en_stats.h"
@@ -147,9 +148,6 @@ struct page_pool;
MLX5_UMR_MTT_ALIGNMENT))
#define MLX5E_UMR_WQEBBS \
(DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
-#define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS
-
-#define MLX5E_NUM_MAIN_GROUPS 9
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
@@ -178,8 +176,7 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{
return is_kdump_kernel() ?
MLX5E_MIN_NUM_CHANNELS :
- min_t(int, mdev->priv.eq_table.num_comp_vectors,
- MLX5E_MAX_NUM_CHANNELS);
+ min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
}
/* Use this function to get max num channels after netdev was created */
@@ -214,22 +211,24 @@ struct mlx5e_umr_wqe {
extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
enum mlx5e_priv_flag {
- MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
- MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1),
- MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2),
- MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3),
- MLX5E_PFLAG_RX_NO_CSUM_COMPLETE = (1 << 4),
+ MLX5E_PFLAG_RX_CQE_BASED_MODER,
+ MLX5E_PFLAG_TX_CQE_BASED_MODER,
+ MLX5E_PFLAG_RX_CQE_COMPRESS,
+ MLX5E_PFLAG_RX_STRIDING_RQ,
+ MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
+ MLX5E_PFLAG_XDP_TX_MPWQE,
+ MLX5E_NUM_PFLAGS, /* Keep last */
};
#define MLX5E_SET_PFLAG(params, pflag, enable) \
do { \
if (enable) \
- (params)->pflags |= (pflag); \
+ (params)->pflags |= BIT(pflag); \
else \
- (params)->pflags &= ~(pflag); \
+ (params)->pflags &= ~(BIT(pflag)); \
} while (0)
-#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (pflag)))
+#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag))))
#ifdef CONFIG_MLX5_CORE_EN_DCB
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
@@ -247,9 +246,6 @@ struct mlx5e_params {
bool lro_en;
u32 lro_wqe_sz;
u8 tx_min_inline_mode;
- u8 rss_hfunc;
- u8 toeplitz_hash_key[40];
- u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
bool vlan_strip_disable;
bool scatter_fcs_en;
bool rx_dim_enabled;
@@ -349,7 +345,6 @@ enum {
MLX5E_SQ_STATE_IPSEC,
MLX5E_SQ_STATE_AM,
MLX5E_SQ_STATE_TLS,
- MLX5E_SQ_STATE_REDIRECT,
};
struct mlx5e_sq_wqe_info {
@@ -410,24 +405,51 @@ struct mlx5e_xdp_info {
struct mlx5e_dma_info di;
};
+struct mlx5e_xdp_info_fifo {
+ struct mlx5e_xdp_info *xi;
+ u32 *cc;
+ u32 *pc;
+ u32 mask;
+};
+
+struct mlx5e_xdp_wqe_info {
+ u8 num_wqebbs;
+ u8 num_ds;
+};
+
+struct mlx5e_xdp_mpwqe {
+ /* Current MPWQE session */
+ struct mlx5e_tx_wqe *wqe;
+ u8 ds_count;
+ u8 max_ds_count;
+};
+
+struct mlx5e_xdpsq;
+typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq*,
+ struct mlx5e_xdp_info*);
struct mlx5e_xdpsq {
/* data path */
/* dirtied @completion */
+ u32 xdpi_fifo_cc;
u16 cc;
bool redirect_flush;
/* dirtied @xmit */
- u16 pc ____cacheline_aligned_in_smp;
- bool doorbell;
+ u32 xdpi_fifo_pc ____cacheline_aligned_in_smp;
+ u16 pc;
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg;
+ struct mlx5e_xdp_mpwqe mpwqe;
struct mlx5e_cq cq;
/* read only */
struct mlx5_wq_cyc wq;
struct mlx5e_xdpsq_stats *stats;
+ mlx5e_fp_xmit_xdp_frame xmit_xdp_frame;
struct {
- struct mlx5e_xdp_info *xdpi;
+ struct mlx5e_xdp_wqe_info *wqe_info;
+ struct mlx5e_xdp_info_fifo xdpi_fifo;
} db;
void __iomem *uar_map;
u32 sqn;
@@ -633,7 +655,6 @@ struct mlx5e_channel_stats {
} ____cacheline_aligned_in_smp;
enum {
- MLX5E_STATE_ASYNC_EVENTS_ENABLED,
MLX5E_STATE_OPENED,
MLX5E_STATE_DESTROYING,
};
@@ -654,6 +675,13 @@ enum {
MLX5E_NIC_PRIO
};
+struct mlx5e_rss_params {
+ u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
+ u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS];
+ u8 toeplitz_hash_key[40];
+ u8 hfunc;
+};
+
struct mlx5e_priv {
/* priv data path fields - start */
struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
@@ -674,6 +702,7 @@ struct mlx5e_priv {
struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
+ struct mlx5e_rss_params rss_params;
u32 tx_rates[MLX5E_MAX_NUM_SQS];
struct mlx5e_flow_steering fs;
@@ -683,6 +712,8 @@ struct mlx5e_priv {
struct work_struct set_rx_mode_work;
struct work_struct tx_timeout_work;
struct work_struct update_stats_work;
+ struct work_struct monitor_counters_work;
+ struct mlx5_nb monitor_counters_nb;
struct mlx5_core_dev *mdev;
struct net_device *netdev;
@@ -692,6 +723,8 @@ struct mlx5e_priv {
struct hwtstamp_config tstamp;
u16 q_counter;
u16 drop_rq_q_counter;
+ struct notifier_block events_nb;
+
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx dcbx;
#endif
@@ -769,6 +802,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
void mlx5e_update_stats(struct mlx5e_priv *priv);
+void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
int mlx5e_self_test_num(struct mlx5e_priv *priv);
@@ -799,9 +833,11 @@ struct mlx5e_redirect_rqt_param {
int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
struct mlx5e_redirect_rqt_param rrp);
-void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
- enum mlx5e_traffic_types tt,
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
+ const struct mlx5e_tirc_config *ttconfig,
void *tirc, bool inner);
+void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen);
+struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
@@ -931,14 +967,16 @@ int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
int mlx5e_create_tises(struct mlx5e_priv *priv);
-void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
+void mlx5e_update_carrier(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
int mlx5e_open(struct net_device *netdev);
+void mlx5e_update_ndo_stats(struct mlx5e_priv *priv);
void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
int mlx5e_bits_invert(unsigned long a, int size);
typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv);
+int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv);
int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
change_hw_mtu_cb set_mtu_cb);
@@ -962,12 +1000,20 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal);
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal);
+int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
+ struct ethtool_link_ksettings *link_ksettings);
+int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
+ const struct ethtool_link_ksettings *link_ksettings);
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
struct ethtool_ts_info *info);
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash);
+void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
+ struct ethtool_pauseparam *pauseparam);
+int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
+ struct ethtool_pauseparam *pauseparam);
/* mlx5e generic netdev management API */
int mlx5e_netdev_init(struct net_device *netdev,
@@ -983,12 +1029,26 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_rss_params *rss_params,
struct mlx5e_params *params,
u16 max_channels, u16 mtu);
void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
-void mlx5e_build_rss_params(struct mlx5e_params *params);
+void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
+ u16 num_channels);
u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
void mlx5e_rx_dim_work(struct work_struct *work);
void mlx5e_tx_dim_work(struct work_struct *work);
+
+void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti);
+void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti);
+netdev_features_t mlx5e_features_check(struct sk_buff *skb,
+ struct net_device *netdev,
+ netdev_features_t features);
+#ifdef CONFIG_MLX5_ESWITCH
+int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
+int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate);
+int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi);
+int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats);
+#endif
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 1431232c9a09..be5961ff24cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -73,6 +73,22 @@ enum mlx5e_traffic_types {
MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
};
+struct mlx5e_tirc_config {
+ u8 l3_prot_type;
+ u8 l4_prot_type;
+ u32 rx_hash_fields;
+};
+
+#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP)
+#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_L4_SPORT |\
+ MLX5_HASH_FIELD_SEL_L4_DPORT)
+#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_IPSEC_SPI)
+
enum mlx5e_tunnel_types {
MLX5E_TT_IPV4_GRE,
MLX5E_TT_IPV6_GRE,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
new file mode 100644
index 000000000000..2ce420851e77
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#include "en.h"
+#include "monitor_stats.h"
+#include "lib/eq.h"
+
+/* Driver will set the following watch counters list:
+ * Ppcnt.802_3:
+ * a_in_range_length_errors Type: 0x0, Counter: 0x0, group_id = N/A
+ * a_out_of_range_length_field Type: 0x0, Counter: 0x1, group_id = N/A
+ * a_frame_too_long_errors Type: 0x0, Counter: 0x2, group_id = N/A
+ * a_frame_check_sequence_errors Type: 0x0, Counter: 0x3, group_id = N/A
+ * a_alignment_errors Type: 0x0, Counter: 0x4, group_id = N/A
+ * if_out_discards Type: 0x0, Counter: 0x5, group_id = N/A
+ * Q_Counters:
+ * Q[index].rx_out_of_buffer Type: 0x1, Counter: 0x4, group_id = counter_ix
+ */
+
+#define NUM_REQ_PPCNT_COUNTER_S1 MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1
+#define NUM_REQ_Q_COUNTERS_S1 MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1
+
+int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (!MLX5_CAP_GEN(mdev, max_num_of_monitor_counters))
+ return false;
+ if (MLX5_CAP_PCAM_REG(mdev, ppcnt) &&
+ MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters) <
+ NUM_REQ_PPCNT_COUNTER_S1)
+ return false;
+ if (MLX5_CAP_GEN(mdev, num_q_monitor_counters) <
+ NUM_REQ_Q_COUNTERS_S1)
+ return false;
+ return true;
+}
+
+void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv)
+{
+ u32 in[MLX5_ST_SZ_DW(arm_monitor_counter_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(arm_monitor_counter_out)] = {};
+
+ MLX5_SET(arm_monitor_counter_in, in, opcode,
+ MLX5_CMD_OP_ARM_MONITOR_COUNTER);
+ mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static void mlx5e_monitor_counters_work(struct work_struct *work)
+{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ monitor_counters_work);
+
+ mutex_lock(&priv->state_lock);
+ mlx5e_update_ndo_stats(priv);
+ mutex_unlock(&priv->state_lock);
+ mlx5e_monitor_counter_arm(priv);
+}
+
+static int mlx5e_monitor_event_handler(struct notifier_block *nb,
+ unsigned long event, void *eqe)
+{
+ struct mlx5e_priv *priv = mlx5_nb_cof(nb, struct mlx5e_priv,
+ monitor_counters_nb);
+ queue_work(priv->wq, &priv->monitor_counters_work);
+ return NOTIFY_OK;
+}
+
+void mlx5e_monitor_counter_start(struct mlx5e_priv *priv)
+{
+ MLX5_NB_INIT(&priv->monitor_counters_nb, mlx5e_monitor_event_handler,
+ MONITOR_COUNTER);
+ mlx5_eq_notifier_register(priv->mdev, &priv->monitor_counters_nb);
+}
+
+static void mlx5e_monitor_counter_stop(struct mlx5e_priv *priv)
+{
+ mlx5_eq_notifier_unregister(priv->mdev, &priv->monitor_counters_nb);
+ cancel_work_sync(&priv->monitor_counters_work);
+}
+
+static int fill_monitor_counter_ppcnt_set1(int cnt, u32 *in)
+{
+ enum mlx5_monitor_counter_ppcnt ppcnt_cnt;
+
+ for (ppcnt_cnt = 0;
+ ppcnt_cnt < NUM_REQ_PPCNT_COUNTER_S1;
+ ppcnt_cnt++, cnt++) {
+ MLX5_SET(set_monitor_counter_in, in,
+ monitor_counter[cnt].type,
+ MLX5_QUERY_MONITOR_CNT_TYPE_PPCNT);
+ MLX5_SET(set_monitor_counter_in, in,
+ monitor_counter[cnt].counter,
+ ppcnt_cnt);
+ }
+ return ppcnt_cnt;
+}
+
+static int fill_monitor_counter_q_counter_set1(int cnt, int q_counter, u32 *in)
+{
+ MLX5_SET(set_monitor_counter_in, in,
+ monitor_counter[cnt].type,
+ MLX5_QUERY_MONITOR_CNT_TYPE_Q_COUNTER);
+ MLX5_SET(set_monitor_counter_in, in,
+ monitor_counter[cnt].counter,
+ MLX5_QUERY_MONITOR_Q_COUNTER_RX_OUT_OF_BUFFER);
+ MLX5_SET(set_monitor_counter_in, in,
+ monitor_counter[cnt].counter_group_id,
+ q_counter);
+ return 1;
+}
+
+/* check if mlx5e_monitor_counter_supported before calling this function*/
+static void mlx5e_set_monitor_counter(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int max_num_of_counters = MLX5_CAP_GEN(mdev, max_num_of_monitor_counters);
+ int num_q_counters = MLX5_CAP_GEN(mdev, num_q_monitor_counters);
+ int num_ppcnt_counters = !MLX5_CAP_PCAM_REG(mdev, ppcnt) ? 0 :
+ MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters);
+ u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(set_monitor_counter_out)] = {};
+ int q_counter = priv->q_counter;
+ int cnt = 0;
+
+ if (num_ppcnt_counters >= NUM_REQ_PPCNT_COUNTER_S1 &&
+ max_num_of_counters >= (NUM_REQ_PPCNT_COUNTER_S1 + cnt))
+ cnt += fill_monitor_counter_ppcnt_set1(cnt, in);
+
+ if (num_q_counters >= NUM_REQ_Q_COUNTERS_S1 &&
+ max_num_of_counters >= (NUM_REQ_Q_COUNTERS_S1 + cnt) &&
+ q_counter)
+ cnt += fill_monitor_counter_q_counter_set1(cnt, q_counter, in);
+
+ MLX5_SET(set_monitor_counter_in, in, num_of_counters, cnt);
+ MLX5_SET(set_monitor_counter_in, in, opcode,
+ MLX5_CMD_OP_SET_MONITOR_COUNTER);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+/* check if mlx5e_monitor_counter_supported before calling this function*/
+void mlx5e_monitor_counter_init(struct mlx5e_priv *priv)
+{
+ INIT_WORK(&priv->monitor_counters_work, mlx5e_monitor_counters_work);
+ mlx5e_monitor_counter_start(priv);
+ mlx5e_set_monitor_counter(priv);
+ mlx5e_monitor_counter_arm(priv);
+ queue_work(priv->wq, &priv->update_stats_work);
+}
+
+static void mlx5e_monitor_counter_disable(struct mlx5e_priv *priv)
+{
+ u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(set_monitor_counter_out)] = {};
+
+ MLX5_SET(set_monitor_counter_in, in, num_of_counters, 0);
+ MLX5_SET(set_monitor_counter_in, in, opcode,
+ MLX5_CMD_OP_SET_MONITOR_COUNTER);
+
+ mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
+}
+
+/* check if mlx5e_monitor_counter_supported before calling this function*/
+void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv)
+{
+ mlx5e_monitor_counter_disable(priv);
+ mlx5e_monitor_counter_stop(priv);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h
new file mode 100644
index 000000000000..e1ac4b3d22fb
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#ifndef __MLX5_MONITOR_H__
+#define __MLX5_MONITOR_H__
+
+int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv);
+void mlx5e_monitor_counter_init(struct mlx5e_priv *priv);
+void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv);
+void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv);
+
+#endif /* __MLX5_MONITOR_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
new file mode 100644
index 000000000000..046948ead152
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -0,0 +1,634 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#include <net/vxlan.h>
+#include <net/gre.h>
+#include "lib/vxlan.h"
+#include "en/tc_tun.h"
+
+static int get_route_and_out_devs(struct mlx5e_priv *priv,
+ struct net_device *dev,
+ struct net_device **route_dev,
+ struct net_device **out_dev)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct net_device *uplink_dev, *uplink_upper;
+ bool dst_is_lag_dev;
+
+ uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
+ uplink_upper = netdev_master_upper_dev_get(uplink_dev);
+ dst_is_lag_dev = (uplink_upper &&
+ netif_is_lag_master(uplink_upper) &&
+ dev == uplink_upper &&
+ mlx5_lag_is_sriov(priv->mdev));
+
+ /* if the egress device isn't on the same HW e-switch or
+ * it's a LAG device, use the uplink
+ */
+ if (!switchdev_port_same_parent_id(priv->netdev, dev) ||
+ dst_is_lag_dev) {
+ *route_dev = uplink_dev;
+ *out_dev = *route_dev;
+ } else {
+ *route_dev = dev;
+ if (is_vlan_dev(*route_dev))
+ *out_dev = uplink_dev;
+ else if (mlx5e_eswitch_rep(dev))
+ *out_dev = *route_dev;
+ else
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct net_device **out_dev,
+ struct net_device **route_dev,
+ struct flowi4 *fl4,
+ struct neighbour **out_n,
+ u8 *out_ttl)
+{
+ struct rtable *rt;
+ struct neighbour *n = NULL;
+
+#if IS_ENABLED(CONFIG_INET)
+ int ret;
+
+ rt = ip_route_output_key(dev_net(mirred_dev), fl4);
+ ret = PTR_ERR_OR_ZERO(rt);
+ if (ret)
+ return ret;
+#else
+ return -EOPNOTSUPP;
+#endif
+
+ ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev);
+ if (ret < 0)
+ return ret;
+
+ if (!(*out_ttl))
+ *out_ttl = ip4_dst_hoplimit(&rt->dst);
+ n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
+ ip_rt_put(rt);
+ if (!n)
+ return -ENOMEM;
+
+ *out_n = n;
+ return 0;
+}
+
+static const char *mlx5e_netdev_kind(struct net_device *dev)
+{
+ if (dev->rtnl_link_ops)
+ return dev->rtnl_link_ops->kind;
+ else
+ return "";
+}
+
+static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct net_device **out_dev,
+ struct net_device **route_dev,
+ struct flowi6 *fl6,
+ struct neighbour **out_n,
+ u8 *out_ttl)
+{
+ struct neighbour *n = NULL;
+ struct dst_entry *dst;
+
+#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+ int ret;
+
+ ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
+ fl6);
+ if (ret < 0)
+ return ret;
+
+ if (!(*out_ttl))
+ *out_ttl = ip6_dst_hoplimit(dst);
+
+ ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev);
+ if (ret < 0)
+ return ret;
+#else
+ return -EOPNOTSUPP;
+#endif
+
+ n = dst_neigh_lookup(dst, &fl6->daddr);
+ dst_release(dst);
+ if (!n)
+ return -ENOMEM;
+
+ *out_n = n;
+ return 0;
+}
+
+static int mlx5e_gen_vxlan_header(char buf[], struct ip_tunnel_key *tun_key)
+{
+ __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
+ struct udphdr *udp = (struct udphdr *)(buf);
+ struct vxlanhdr *vxh = (struct vxlanhdr *)
+ ((char *)udp + sizeof(struct udphdr));
+
+ udp->dest = tun_key->tp_dst;
+ vxh->vx_flags = VXLAN_HF_VNI;
+ vxh->vx_vni = vxlan_vni_field(tun_id);
+
+ return 0;
+}
+
+static int mlx5e_gen_gre_header(char buf[], struct ip_tunnel_key *tun_key)
+{
+ __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
+ int hdr_len;
+ struct gre_base_hdr *greh = (struct gre_base_hdr *)(buf);
+
+ /* the HW does not calculate GRE csum or sequences */
+ if (tun_key->tun_flags & (TUNNEL_CSUM | TUNNEL_SEQ))
+ return -EOPNOTSUPP;
+
+ greh->protocol = htons(ETH_P_TEB);
+
+ /* GRE key */
+ hdr_len = gre_calc_hlen(tun_key->tun_flags);
+ greh->flags = gre_tnl_flags_to_gre_flags(tun_key->tun_flags);
+ if (tun_key->tun_flags & TUNNEL_KEY) {
+ __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
+
+ *ptr = tun_id;
+ }
+
+ return 0;
+}
+
+static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto,
+ struct mlx5e_encap_entry *e)
+{
+ int err = 0;
+ struct ip_tunnel_key *key = &e->tun_info.key;
+
+ if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
+ *ip_proto = IPPROTO_UDP;
+ err = mlx5e_gen_vxlan_header(buf, key);
+ } else if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
+ *ip_proto = IPPROTO_GRE;
+ err = mlx5e_gen_gre_header(buf, key);
+ } else {
+ pr_warn("mlx5: Cannot generate tunnel header for tunnel type (%d)\n"
+ , e->tunnel_type);
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static char *gen_eth_tnl_hdr(char *buf, struct net_device *dev,
+ struct mlx5e_encap_entry *e,
+ u16 proto)
+{
+ struct ethhdr *eth = (struct ethhdr *)buf;
+ char *ip;
+
+ ether_addr_copy(eth->h_dest, e->h_dest);
+ ether_addr_copy(eth->h_source, dev->dev_addr);
+ if (is_vlan_dev(dev)) {
+ struct vlan_hdr *vlan = (struct vlan_hdr *)
+ ((char *)eth + ETH_HLEN);
+ ip = (char *)vlan + VLAN_HLEN;
+ eth->h_proto = vlan_dev_vlan_proto(dev);
+ vlan->h_vlan_TCI = htons(vlan_dev_vlan_id(dev));
+ vlan->h_vlan_encapsulated_proto = htons(proto);
+ } else {
+ eth->h_proto = htons(proto);
+ ip = (char *)eth + ETH_HLEN;
+ }
+
+ return ip;
+}
+
+int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5e_encap_entry *e)
+{
+ int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+ struct ip_tunnel_key *tun_key = &e->tun_info.key;
+ struct net_device *out_dev, *route_dev;
+ struct neighbour *n = NULL;
+ struct flowi4 fl4 = {};
+ int ipv4_encap_size;
+ char *encap_header;
+ u8 nud_state, ttl;
+ struct iphdr *ip;
+ int err;
+
+ /* add the IP fields */
+ fl4.flowi4_tos = tun_key->tos;
+ fl4.daddr = tun_key->u.ipv4.dst;
+ fl4.saddr = tun_key->u.ipv4.src;
+ ttl = tun_key->ttl;
+
+ err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, &route_dev,
+ &fl4, &n, &ttl);
+ if (err)
+ return err;
+
+ ipv4_encap_size =
+ (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
+ sizeof(struct iphdr) +
+ e->tunnel_hlen;
+
+ if (max_encap_size < ipv4_encap_size) {
+ mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
+ ipv4_encap_size, max_encap_size);
+ return -EOPNOTSUPP;
+ }
+
+ encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
+ if (!encap_header)
+ return -ENOMEM;
+
+ /* used by mlx5e_detach_encap to lookup a neigh hash table
+ * entry in the neigh hash table when a user deletes a rule
+ */
+ e->m_neigh.dev = n->dev;
+ e->m_neigh.family = n->ops->family;
+ memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
+ e->out_dev = out_dev;
+
+ /* It's important to add the neigh to the hash table before checking
+ * the neigh validity state. So if we'll get a notification, in case the
+ * neigh changes it's validity state, we would find the relevant neigh
+ * in the hash.
+ */
+ err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
+ if (err)
+ goto free_encap;
+
+ read_lock_bh(&n->lock);
+ nud_state = n->nud_state;
+ ether_addr_copy(e->h_dest, n->ha);
+ read_unlock_bh(&n->lock);
+
+ /* add ethernet header */
+ ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
+ ETH_P_IP);
+
+ /* add ip header */
+ ip->tos = tun_key->tos;
+ ip->version = 0x4;
+ ip->ihl = 0x5;
+ ip->ttl = ttl;
+ ip->daddr = fl4.daddr;
+ ip->saddr = fl4.saddr;
+
+ /* add tunneling protocol header */
+ err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr),
+ &ip->protocol, e);
+ if (err)
+ goto destroy_neigh_entry;
+
+ e->encap_size = ipv4_encap_size;
+ e->encap_header = encap_header;
+
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(n, NULL);
+ err = -EAGAIN;
+ goto out;
+ }
+
+ err = mlx5_packet_reformat_alloc(priv->mdev,
+ e->reformat_type,
+ ipv4_encap_size, encap_header,
+ MLX5_FLOW_NAMESPACE_FDB,
+ &e->encap_id);
+ if (err)
+ goto destroy_neigh_entry;
+
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
+ neigh_release(n);
+ return err;
+
+destroy_neigh_entry:
+ mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
+free_encap:
+ kfree(encap_header);
+out:
+ if (n)
+ neigh_release(n);
+ return err;
+}
+
+int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5e_encap_entry *e)
+{
+ int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+ struct ip_tunnel_key *tun_key = &e->tun_info.key;
+ struct net_device *out_dev, *route_dev;
+ struct neighbour *n = NULL;
+ struct flowi6 fl6 = {};
+ struct ipv6hdr *ip6h;
+ int ipv6_encap_size;
+ char *encap_header;
+ u8 nud_state, ttl;
+ int err;
+
+ ttl = tun_key->ttl;
+
+ fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
+ fl6.daddr = tun_key->u.ipv6.dst;
+ fl6.saddr = tun_key->u.ipv6.src;
+
+ err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, &route_dev,
+ &fl6, &n, &ttl);
+ if (err)
+ return err;
+
+ ipv6_encap_size =
+ (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
+ sizeof(struct ipv6hdr) +
+ e->tunnel_hlen;
+
+ if (max_encap_size < ipv6_encap_size) {
+ mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
+ ipv6_encap_size, max_encap_size);
+ return -EOPNOTSUPP;
+ }
+
+ encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
+ if (!encap_header)
+ return -ENOMEM;
+
+ /* used by mlx5e_detach_encap to lookup a neigh hash table
+ * entry in the neigh hash table when a user deletes a rule
+ */
+ e->m_neigh.dev = n->dev;
+ e->m_neigh.family = n->ops->family;
+ memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
+ e->out_dev = out_dev;
+
+ /* It's importent to add the neigh to the hash table before checking
+ * the neigh validity state. So if we'll get a notification, in case the
+ * neigh changes it's validity state, we would find the relevant neigh
+ * in the hash.
+ */
+ err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
+ if (err)
+ goto free_encap;
+
+ read_lock_bh(&n->lock);
+ nud_state = n->nud_state;
+ ether_addr_copy(e->h_dest, n->ha);
+ read_unlock_bh(&n->lock);
+
+ /* add ethernet header */
+ ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
+ ETH_P_IPV6);
+
+ /* add ip header */
+ ip6_flow_hdr(ip6h, tun_key->tos, 0);
+ /* the HW fills up ipv6 payload len */
+ ip6h->hop_limit = ttl;
+ ip6h->daddr = fl6.daddr;
+ ip6h->saddr = fl6.saddr;
+
+ /* add tunneling protocol header */
+ err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr),
+ &ip6h->nexthdr, e);
+ if (err)
+ goto destroy_neigh_entry;
+
+ e->encap_size = ipv6_encap_size;
+ e->encap_header = encap_header;
+
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(n, NULL);
+ err = -EAGAIN;
+ goto out;
+ }
+
+ err = mlx5_packet_reformat_alloc(priv->mdev,
+ e->reformat_type,
+ ipv6_encap_size, encap_header,
+ MLX5_FLOW_NAMESPACE_FDB,
+ &e->encap_id);
+ if (err)
+ goto destroy_neigh_entry;
+
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
+ neigh_release(n);
+ return err;
+
+destroy_neigh_entry:
+ mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
+free_encap:
+ kfree(encap_header);
+out:
+ if (n)
+ neigh_release(n);
+ return err;
+}
+
+int mlx5e_tc_tun_get_type(struct net_device *tunnel_dev)
+{
+ if (netif_is_vxlan(tunnel_dev))
+ return MLX5E_TC_TUNNEL_TYPE_VXLAN;
+ else if (netif_is_gretap(tunnel_dev) ||
+ netif_is_ip6gretap(tunnel_dev))
+ return MLX5E_TC_TUNNEL_TYPE_GRETAP;
+ else
+ return MLX5E_TC_TUNNEL_TYPE_UNKNOWN;
+}
+
+bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
+ struct net_device *netdev)
+{
+ int tunnel_type = mlx5e_tc_tun_get_type(netdev);
+
+ if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN &&
+ MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
+ return true;
+ else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP &&
+ MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap))
+ return true;
+ else
+ return false;
+}
+
+int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev,
+ struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct netlink_ext_ack *extack)
+{
+ e->tunnel_type = mlx5e_tc_tun_get_type(tunnel_dev);
+
+ if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
+ int dst_port = be16_to_cpu(e->tun_info.key.tp_dst);
+
+ if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "vxlan udp dport was not registered with the HW");
+ netdev_warn(priv->netdev,
+ "%d isn't an offloaded vxlan udp dport\n",
+ dst_port);
+ return -EOPNOTSUPP;
+ }
+ e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
+ e->tunnel_hlen = VXLAN_HLEN;
+ } else if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
+ e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_NVGRE;
+ e->tunnel_hlen = gre_calc_hlen(e->tun_info.key.tun_flags);
+ } else {
+ e->reformat_type = -1;
+ e->tunnel_hlen = -1;
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f,
+ void *headers_c,
+ void *headers_v)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_dissector_key_ports *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_PORTS,
+ f->key);
+ struct flow_dissector_key_ports *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_PORTS,
+ f->mask);
+ void *misc_c = MLX5_ADDR_OF(fte_match_param,
+ spec->match_criteria,
+ misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param,
+ spec->match_value,
+ misc_parameters);
+
+ /* Full udp dst port must be given */
+ if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
+ memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VXLAN decap filter must include enc_dst_port condition");
+ netdev_warn(priv->netdev,
+ "VXLAN decap filter must include enc_dst_port condition\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* udp dst port must be knonwn as a VXLAN port */
+ if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matched UDP port is not registered as a VXLAN port");
+ netdev_warn(priv->netdev,
+ "UDP port %d is not registered as a VXLAN port\n",
+ be16_to_cpu(key->dst));
+ return -EOPNOTSUPP;
+ }
+
+ /* dst UDP port is valid here */
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, ntohs(mask->dst));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, ntohs(key->dst));
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, ntohs(mask->src));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, ntohs(key->src));
+
+ /* match on VNI */
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_dissector_key_keyid *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID,
+ f->key);
+ struct flow_dissector_key_keyid *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID,
+ f->mask);
+ MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
+ be32_to_cpu(mask->keyid));
+ MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
+ be32_to_cpu(key->keyid));
+ }
+ return 0;
+}
+
+static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f,
+ void *outer_headers_c,
+ void *outer_headers_v)
+{
+ void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+
+ if (!MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "GRE HW offloading is not supported");
+ netdev_warn(priv->netdev, "GRE HW offloading is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ ip_protocol, IPPROTO_GRE);
+
+ /* gre protocol*/
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, gre_protocol);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, ETH_P_TEB);
+
+ /* gre key */
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_dissector_key_keyid *mask = NULL;
+ struct flow_dissector_key_keyid *key = NULL;
+
+ mask = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID,
+ f->mask);
+ MLX5_SET(fte_match_set_misc, misc_c,
+ gre_key.key, be32_to_cpu(mask->keyid));
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID,
+ f->key);
+ MLX5_SET(fte_match_set_misc, misc_v,
+ gre_key.key, be32_to_cpu(key->keyid));
+ }
+
+ return 0;
+}
+
+int mlx5e_tc_tun_parse(struct net_device *filter_dev,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f,
+ void *headers_c,
+ void *headers_v)
+{
+ int tunnel_type;
+ int err = 0;
+
+ tunnel_type = mlx5e_tc_tun_get_type(filter_dev);
+ if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
+ err = mlx5e_tc_tun_parse_vxlan(priv, spec, f,
+ headers_c, headers_v);
+ } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
+ err = mlx5e_tc_tun_parse_gretap(priv, spec, f,
+ headers_c, headers_v);
+ } else {
+ netdev_warn(priv->netdev,
+ "decapsulation offload is not supported for %s net device (%d)\n",
+ mlx5e_netdev_kind(filter_dev), tunnel_type);
+ return -EOPNOTSUPP;
+ }
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
new file mode 100644
index 000000000000..706ce7bf15e7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_TC_TUNNEL_H__
+#define __MLX5_EN_TC_TUNNEL_H__
+
+#include <linux/netdevice.h>
+#include <linux/mlx5/fs.h>
+#include <net/pkt_cls.h>
+#include <linux/netlink.h>
+#include "en.h"
+#include "en_rep.h"
+
+enum {
+ MLX5E_TC_TUNNEL_TYPE_UNKNOWN,
+ MLX5E_TC_TUNNEL_TYPE_VXLAN,
+ MLX5E_TC_TUNNEL_TYPE_GRETAP
+};
+
+int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev,
+ struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct netlink_ext_ack *extack);
+
+int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5e_encap_entry *e);
+
+int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5e_encap_entry *e);
+
+int mlx5e_tc_tun_get_type(struct net_device *tunnel_dev);
+bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
+ struct net_device *netdev);
+
+int mlx5e_tc_tun_parse(struct net_device *filter_dev,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f,
+ void *headers_c,
+ void *headers_v);
+
+#endif //__MLX5_EN_TC_TUNNEL_H__
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index ad6d471d00dd..3740177eed09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -47,7 +47,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di,
xdpi.xdpf->len, PCI_DMA_TODEVICE);
xdpi.di = *di;
- return mlx5e_xmit_xdp_frame(sq, &xdpi);
+ return sq->xmit_xdp_frame(sq, &xdpi);
}
/* returns true if packet was consumed by xdp */
@@ -102,7 +102,98 @@ xdp_abort:
}
}
-bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi)
+static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
+{
+ struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u8 wqebbs;
+ u16 pi;
+
+ mlx5e_xdpsq_fetch_wqe(sq, &session->wqe);
+
+ prefetchw(session->wqe->data);
+ session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT;
+
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+
+/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
+ * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
+ * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
+ * full-session WQE be cache-aligned.
+ */
+#if L1_CACHE_BYTES < 128
+#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
+#else
+#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
+#endif
+
+ wqebbs = min_t(u16, mlx5_wq_cyc_get_contig_wqebbs(wq, pi),
+ MLX5E_XDP_MPW_MAX_WQEBBS);
+
+ session->max_ds_count = MLX5_SEND_WQEBB_NUM_DS * wqebbs;
+}
+
+static void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5_wqe_ctrl_seg *cseg = &session->wqe->ctrl;
+ u16 ds_count = session->ds_count;
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi];
+
+ cseg->opmod_idx_opcode =
+ cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count);
+
+ wi->num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS);
+ wi->num_ds = ds_count - MLX5E_XDP_TX_EMPTY_DS_COUNT;
+
+ sq->pc += wi->num_wqebbs;
+
+ sq->doorbell_cseg = cseg;
+
+ session->wqe = NULL; /* Close session */
+}
+
+static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
+ struct mlx5e_xdp_info *xdpi)
+{
+ struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5e_xdpsq_stats *stats = sq->stats;
+
+ dma_addr_t dma_addr = xdpi->dma_addr;
+ struct xdp_frame *xdpf = xdpi->xdpf;
+ unsigned int dma_len = xdpf->len;
+
+ if (unlikely(sq->hw_mtu < dma_len)) {
+ stats->err++;
+ return false;
+ }
+
+ if (unlikely(!session->wqe)) {
+ if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
+ MLX5_SEND_WQE_MAX_WQEBBS))) {
+ /* SQ is full, ring doorbell */
+ mlx5e_xmit_xdp_doorbell(sq);
+ stats->full++;
+ return false;
+ }
+
+ mlx5e_xdp_mpwqe_session_start(sq);
+ }
+
+ mlx5e_xdp_mpwqe_add_dseg(sq, dma_addr, dma_len);
+
+ if (unlikely(session->ds_count == session->max_ds_count))
+ mlx5e_xdp_mpwqe_complete(sq);
+
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
+ stats->xmit++;
+ return true;
+}
+
+static bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi)
{
struct mlx5_wq_cyc *wq = &sq->wq;
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
@@ -126,11 +217,8 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi)
}
if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) {
- if (sq->doorbell) {
- /* SQ is full, ring doorbell */
- mlx5e_xmit_xdp_doorbell(sq);
- sq->doorbell = false;
- }
+ /* SQ is full, ring doorbell */
+ mlx5e_xmit_xdp_doorbell(sq);
stats->full++;
return false;
}
@@ -152,23 +240,20 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi)
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
- /* move page to reference to sq responsibility,
- * and mark so it's not put back in page-cache.
- */
- sq->db.xdpi[pi] = *xdpi;
sq->pc++;
- sq->doorbell = true;
+ sq->doorbell_cseg = cseg;
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
stats->xmit++;
return true;
}
-bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
{
+ struct mlx5e_xdp_info_fifo *xdpi_fifo;
struct mlx5e_xdpsq *sq;
struct mlx5_cqe64 *cqe;
- struct mlx5e_rq *rq;
bool is_redirect;
u16 sqcc;
int i;
@@ -182,8 +267,8 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
if (!cqe)
return false;
- is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
- rq = container_of(sq, struct mlx5e_rq, xdpsq);
+ is_redirect = !rq;
+ xdpi_fifo = &sq->db.xdpi_fifo;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
* otherwise a cq overrun may occur
@@ -199,20 +284,33 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
wqe_counter = be16_to_cpu(cqe->wqe_counter);
+ if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ))
+ netdev_WARN_ONCE(sq->channel->netdev,
+ "Bad OP in XDPSQ CQE: 0x%x\n",
+ get_cqe_opcode(cqe));
+
do {
- u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
- struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci];
+ struct mlx5e_xdp_wqe_info *wi;
+ u16 ci, j;
last_wqe = (sqcc == wqe_counter);
- sqcc++;
-
- if (is_redirect) {
- xdp_return_frame(xdpi->xdpf);
- dma_unmap_single(sq->pdev, xdpi->dma_addr,
- xdpi->xdpf->len, DMA_TO_DEVICE);
- } else {
- /* Recycle RX page */
- mlx5e_page_release(rq, &xdpi->di, true);
+ ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
+ wi = &sq->db.wqe_info[ci];
+
+ sqcc += wi->num_wqebbs;
+
+ for (j = 0; j < wi->num_ds; j++) {
+ struct mlx5e_xdp_info xdpi =
+ mlx5e_xdpi_fifo_pop(xdpi_fifo);
+
+ if (is_redirect) {
+ xdp_return_frame(xdpi.xdpf);
+ dma_unmap_single(sq->pdev, xdpi.dma_addr,
+ xdpi.xdpf->len, DMA_TO_DEVICE);
+ } else {
+ /* Recycle RX page */
+ mlx5e_page_release(rq, &xdpi.di, true);
+ }
}
} while (!last_wqe);
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
@@ -228,27 +326,32 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
-void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
+void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
{
- struct mlx5e_rq *rq;
- bool is_redirect;
-
- is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
- rq = is_redirect ? NULL : container_of(sq, struct mlx5e_rq, xdpsq);
+ struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
+ bool is_redirect = !rq;
while (sq->cc != sq->pc) {
- u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
- struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci];
-
- sq->cc++;
-
- if (is_redirect) {
- xdp_return_frame(xdpi->xdpf);
- dma_unmap_single(sq->pdev, xdpi->dma_addr,
- xdpi->xdpf->len, DMA_TO_DEVICE);
- } else {
- /* Recycle RX page */
- mlx5e_page_release(rq, &xdpi->di, false);
+ struct mlx5e_xdp_wqe_info *wi;
+ u16 ci, i;
+
+ ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
+ wi = &sq->db.wqe_info[ci];
+
+ sq->cc += wi->num_wqebbs;
+
+ for (i = 0; i < wi->num_ds; i++) {
+ struct mlx5e_xdp_info xdpi =
+ mlx5e_xdpi_fifo_pop(xdpi_fifo);
+
+ if (is_redirect) {
+ xdp_return_frame(xdpi.xdpf);
+ dma_unmap_single(sq->pdev, xdpi.dma_addr,
+ xdpi.xdpf->len, DMA_TO_DEVICE);
+ } else {
+ /* Recycle RX page */
+ mlx5e_page_release(rq, &xdpi.di, false);
+ }
}
}
}
@@ -292,7 +395,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
xdpi.xdpf = xdpf;
- if (unlikely(!mlx5e_xmit_xdp_frame(sq, &xdpi))) {
+ if (unlikely(!sq->xmit_xdp_frame(sq, &xdpi))) {
dma_unmap_single(sq->pdev, xdpi.dma_addr,
xdpf->len, DMA_TO_DEVICE);
xdp_return_frame_rx_napi(xdpf);
@@ -300,8 +403,33 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
}
}
- if (flags & XDP_XMIT_FLUSH)
+ if (flags & XDP_XMIT_FLUSH) {
+ if (sq->mpwqe.wqe)
+ mlx5e_xdp_mpwqe_complete(sq);
mlx5e_xmit_xdp_doorbell(sq);
+ }
return n - drops;
}
+
+void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq)
+{
+ struct mlx5e_xdpsq *xdpsq = &rq->xdpsq;
+
+ if (xdpsq->mpwqe.wqe)
+ mlx5e_xdp_mpwqe_complete(xdpsq);
+
+ mlx5e_xmit_xdp_doorbell(xdpsq);
+
+ if (xdpsq->redirect_flush) {
+ xdp_do_flush_map();
+ xdpsq->redirect_flush = false;
+ }
+}
+
+void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw)
+{
+ sq->xmit_xdp_frame = is_mpw ?
+ mlx5e_xmit_xdp_frame_mpwqe : mlx5e_xmit_xdp_frame;
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 6dfab045925f..3a67cb3cd179 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -37,27 +37,62 @@
#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \
MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM)))
#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
-#define MLX5E_XDP_TX_DS_COUNT \
- ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
+#define MLX5E_XDP_TX_EMPTY_DS_COUNT \
+ (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
+#define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */)
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
void *va, u16 *rx_headroom, u32 *len);
-bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
-void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
-
-bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi);
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq);
+void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq);
+void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw);
+void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
{
+ if (sq->doorbell_cseg) {
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
+ sq->doorbell_cseg = NULL;
+ }
+}
+
+static inline void
+mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, dma_addr_t dma_addr, u16 dma_len)
+{
+ struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5_wqe_data_seg *dseg =
+ (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count++;
+
+ dseg->addr = cpu_to_be64(dma_addr);
+ dseg->byte_count = cpu_to_be32(dma_len);
+ dseg->lkey = sq->mkey_be;
+}
+
+static inline void mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq,
+ struct mlx5e_tx_wqe **wqe)
+{
struct mlx5_wq_cyc *wq = &sq->wq;
- struct mlx5e_tx_wqe *wqe;
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+
+ *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ memset(*wqe, 0, sizeof(**wqe));
+}
- wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+static inline void
+mlx5e_xdpi_fifo_push(struct mlx5e_xdp_info_fifo *fifo,
+ struct mlx5e_xdp_info *xi)
+{
+ u32 i = (*fifo->pc)++ & fifo->mask;
- mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl);
+ fifo->xi[i] = *xi;
+}
+
+static inline struct mlx5e_xdp_info
+mlx5e_xdpi_fifo_pop(struct mlx5e_xdp_info_fifo *fifo)
+{
+ return fifo->xi[(*fifo->cc)++ & fifo->mask];
}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 128a82b1dbfc..53608afd39b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -254,11 +254,13 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
struct mlx5e_ipsec_metadata *mdata;
struct mlx5e_ipsec_sa_entry *sa_entry;
struct xfrm_state *x;
+ struct sec_path *sp;
if (!xo)
return skb;
- if (unlikely(skb->sp->len != 1)) {
+ sp = skb_sec_path(skb);
+ if (unlikely(sp->len != 1)) {
atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_bundle);
goto drop;
}
@@ -305,10 +307,11 @@ mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
struct mlx5e_priv *priv = netdev_priv(netdev);
struct xfrm_offload *xo;
struct xfrm_state *xs;
+ struct sec_path *sp;
u32 sa_handle;
- skb->sp = secpath_dup(skb->sp);
- if (unlikely(!skb->sp)) {
+ sp = secpath_set(skb);
+ if (unlikely(!sp)) {
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
return NULL;
}
@@ -320,8 +323,9 @@ mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
return NULL;
}
- skb->sp->xvec[skb->sp->len++] = xs;
- skb->sp->olen++;
+ sp = skb_sec_path(skb);
+ sp->xvec[sp->len++] = xs;
+ sp->olen++;
xo = xfrm_offload(skb);
xo->flags = CRYPTO_DONE;
@@ -372,10 +376,11 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
netdev_features_t features)
{
+ struct sec_path *sp = skb_sec_path(skb);
struct xfrm_state *x;
- if (skb->sp && skb->sp->len) {
- x = skb->sp->xvec[0];
+ if (sp && sp->len) {
+ x = sp->xvec[0];
if (x && x->xso.offload_handle)
return true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index f480763dcd0d..c9df08133718 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -135,14 +135,15 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT);
}
-static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
- "rx_cqe_moder",
- "tx_cqe_moder",
- "rx_cqe_compress",
- "rx_striding_rq",
- "rx_no_csum_complete",
+typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable);
+
+struct pflag_desc {
+ char name[ETH_GSTRING_LEN];
+ mlx5e_pflag_handler handler;
};
+static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS];
+
int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
{
int i, num_stats = 0;
@@ -153,7 +154,7 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
num_stats += mlx5e_stats_grps[i].get_num_stats(priv);
return num_stats;
case ETH_SS_PRIV_FLAGS:
- return ARRAY_SIZE(mlx5e_priv_flags);
+ return MLX5E_NUM_PFLAGS;
case ETH_SS_TEST:
return mlx5e_self_test_num(priv);
/* fallthrough */
@@ -183,8 +184,9 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_PRIV_FLAGS:
- for (i = 0; i < ARRAY_SIZE(mlx5e_priv_flags); i++)
- strcpy(data + i * ETH_GSTRING_LEN, mlx5e_priv_flags[i]);
+ for (i = 0; i < MLX5E_NUM_PFLAGS; i++)
+ strcpy(data + i * ETH_GSTRING_LEN,
+ mlx5e_priv_flags[i].name);
break;
case ETH_SS_TEST:
@@ -353,7 +355,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
new_channels.params = priv->channels.params;
new_channels.params.num_channels = count;
if (!netif_is_rxfh_configured(priv->netdev))
- mlx5e_build_default_indir_rqt(new_channels.params.indirection_rqt,
+ mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, count);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
@@ -785,10 +787,9 @@ static void get_lp_advertising(u32 eth_proto_lp,
ptys2ethtool_adver_link(lp_advertising, eth_proto_lp);
}
-static int mlx5e_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *link_ksettings)
+int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
+ struct ethtool_link_ksettings *link_ksettings)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
u32 rx_pause = 0;
@@ -804,7 +805,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
if (err) {
- netdev_err(netdev, "%s: query port ptys failed: %d\n",
+ netdev_err(priv->netdev, "%s: query port ptys failed: %d\n",
__func__, err);
goto err_query_regs;
}
@@ -824,7 +825,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
get_supported(eth_proto_cap, link_ksettings);
get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings);
- get_speed_duplex(netdev, eth_proto_oper, link_ksettings);
+ get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
@@ -844,7 +845,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
Autoneg);
if (get_fec_supported_advertised(mdev, link_ksettings))
- netdev_dbg(netdev, "%s: FEC caps query failed: %d\n",
+ netdev_dbg(priv->netdev, "%s: FEC caps query failed: %d\n",
__func__, err);
if (!an_disable_admin)
@@ -855,6 +856,14 @@ err_query_regs:
return err;
}
+static int mlx5e_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *link_ksettings)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
+}
+
static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
{
u32 i, ptys_modes = 0;
@@ -869,10 +878,9 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
return ptys_modes;
}
-static int mlx5e_set_link_ksettings(struct net_device *netdev,
- const struct ethtool_link_ksettings *link_ksettings)
+int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
+ const struct ethtool_link_ksettings *link_ksettings)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
u32 eth_proto_cap, eth_proto_admin;
bool an_changes = false;
@@ -892,14 +900,14 @@ static int mlx5e_set_link_ksettings(struct net_device *netdev,
err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
if (err) {
- netdev_err(netdev, "%s: query port eth proto cap failed: %d\n",
+ netdev_err(priv->netdev, "%s: query port eth proto cap failed: %d\n",
__func__, err);
goto out;
}
link_modes = link_modes & eth_proto_cap;
if (!link_modes) {
- netdev_err(netdev, "%s: Not supported link mode(s) requested",
+ netdev_err(priv->netdev, "%s: Not supported link mode(s) requested",
__func__);
err = -EINVAL;
goto out;
@@ -907,7 +915,7 @@ static int mlx5e_set_link_ksettings(struct net_device *netdev,
err = mlx5_query_port_proto_admin(mdev, &eth_proto_admin, MLX5_PTYS_EN);
if (err) {
- netdev_err(netdev, "%s: query port eth proto admin failed: %d\n",
+ netdev_err(priv->netdev, "%s: query port eth proto admin failed: %d\n",
__func__, err);
goto out;
}
@@ -929,9 +937,17 @@ out:
return err;
}
+static int mlx5e_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *link_ksettings)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings);
+}
+
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv)
{
- return sizeof(priv->channels.params.toeplitz_hash_key);
+ return sizeof(priv->rss_params.toeplitz_hash_key);
}
static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
@@ -957,50 +973,27 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_rss_params *rss = &priv->rss_params;
if (indir)
- memcpy(indir, priv->channels.params.indirection_rqt,
- sizeof(priv->channels.params.indirection_rqt));
+ memcpy(indir, rss->indirection_rqt,
+ sizeof(rss->indirection_rqt));
if (key)
- memcpy(key, priv->channels.params.toeplitz_hash_key,
- sizeof(priv->channels.params.toeplitz_hash_key));
+ memcpy(key, rss->toeplitz_hash_key,
+ sizeof(rss->toeplitz_hash_key));
if (hfunc)
- *hfunc = priv->channels.params.rss_hfunc;
+ *hfunc = rss->hfunc;
return 0;
}
-static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
-{
- void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
- struct mlx5_core_dev *mdev = priv->mdev;
- int ctxlen = MLX5_ST_SZ_BYTES(tirc);
- int tt;
-
- MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
-
- for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- memset(tirc, 0, ctxlen);
- mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
- mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
- }
-
- if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
- return;
-
- for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- memset(tirc, 0, ctxlen);
- mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true);
- mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in, inlen);
- }
-}
-
static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_rss_params *rss = &priv->rss_params;
int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
bool hash_changed = false;
void *in;
@@ -1016,15 +1009,14 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
mutex_lock(&priv->state_lock);
- if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
- hfunc != priv->channels.params.rss_hfunc) {
- priv->channels.params.rss_hfunc = hfunc;
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != rss->hfunc) {
+ rss->hfunc = hfunc;
hash_changed = true;
}
if (indir) {
- memcpy(priv->channels.params.indirection_rqt, indir,
- sizeof(priv->channels.params.indirection_rqt));
+ memcpy(rss->indirection_rqt, indir,
+ sizeof(rss->indirection_rqt));
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
u32 rqtn = priv->indir_rqt.rqtn;
@@ -1032,7 +1024,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
.is_rss = true,
{
.rss = {
- .hfunc = priv->channels.params.rss_hfunc,
+ .hfunc = rss->hfunc,
.channels = &priv->channels,
},
},
@@ -1043,10 +1035,9 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
}
if (key) {
- memcpy(priv->channels.params.toeplitz_hash_key, key,
- sizeof(priv->channels.params.toeplitz_hash_key));
- hash_changed = hash_changed ||
- priv->channels.params.rss_hfunc == ETH_RSS_HASH_TOP;
+ memcpy(rss->toeplitz_hash_key, key,
+ sizeof(rss->toeplitz_hash_key));
+ hash_changed = hash_changed || rss->hfunc == ETH_RSS_HASH_TOP;
}
if (hash_changed)
@@ -1150,25 +1141,31 @@ static int mlx5e_set_tunable(struct net_device *dev,
return err;
}
-static void mlx5e_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pauseparam)
+void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
+ struct ethtool_pauseparam *pauseparam)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
int err;
err = mlx5_query_port_pause(mdev, &pauseparam->rx_pause,
&pauseparam->tx_pause);
if (err) {
- netdev_err(netdev, "%s: mlx5_query_port_pause failed:0x%x\n",
+ netdev_err(priv->netdev, "%s: mlx5_query_port_pause failed:0x%x\n",
__func__, err);
}
}
-static int mlx5e_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pauseparam)
+static void mlx5e_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_ethtool_get_pauseparam(priv, pauseparam);
+}
+
+int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
+ struct ethtool_pauseparam *pauseparam)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
int err;
@@ -1179,13 +1176,21 @@ static int mlx5e_set_pauseparam(struct net_device *netdev,
pauseparam->rx_pause ? 1 : 0,
pauseparam->tx_pause ? 1 : 0);
if (err) {
- netdev_err(netdev, "%s: mlx5_set_port_pause failed:0x%x\n",
+ netdev_err(priv->netdev, "%s: mlx5_set_port_pause failed:0x%x\n",
__func__, err);
}
return err;
}
+static int mlx5e_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_set_pauseparam(priv, pauseparam);
+}
+
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
struct ethtool_ts_info *info)
{
@@ -1505,8 +1510,6 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
return 0;
}
-typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable);
-
static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
bool is_rx_cq)
{
@@ -1669,23 +1672,58 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
return 0;
}
+static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_channels new_channels = {};
+ int err;
+
+ if (enable && !MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
+ return -EOPNOTSUPP;
+
+ new_channels.params = priv->channels.params;
+
+ MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_XDP_TX_MPWQE, enable);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ priv->channels.params = new_channels.params;
+ return 0;
+ }
+
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err)
+ return err;
+
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+ return 0;
+}
+
+static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = {
+ { "rx_cqe_moder", set_pflag_rx_cqe_based_moder },
+ { "tx_cqe_moder", set_pflag_tx_cqe_based_moder },
+ { "rx_cqe_compress", set_pflag_rx_cqe_compress },
+ { "rx_striding_rq", set_pflag_rx_striding_rq },
+ { "rx_no_csum_complete", set_pflag_rx_no_csum_complete },
+ { "xdp_tx_mpwqe", set_pflag_xdp_tx_mpwqe },
+};
+
static int mlx5e_handle_pflag(struct net_device *netdev,
u32 wanted_flags,
- enum mlx5e_priv_flag flag,
- mlx5e_pflag_handler pflag_handler)
+ enum mlx5e_priv_flag flag)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- bool enable = !!(wanted_flags & flag);
+ bool enable = !!(wanted_flags & BIT(flag));
u32 changes = wanted_flags ^ priv->channels.params.pflags;
int err;
- if (!(changes & flag))
+ if (!(changes & BIT(flag)))
return 0;
- err = pflag_handler(netdev, enable);
+ err = mlx5e_priv_flags[flag].handler(netdev, enable);
if (err) {
- netdev_err(netdev, "%s private flag 0x%x failed err %d\n",
- enable ? "Enable" : "Disable", flag, err);
+ netdev_err(netdev, "%s private flag '%s' failed err %d\n",
+ enable ? "Enable" : "Disable", mlx5e_priv_flags[flag].name, err);
return err;
}
@@ -1696,38 +1734,17 @@ static int mlx5e_handle_pflag(struct net_device *netdev,
static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ enum mlx5e_priv_flag pflag;
int err;
mutex_lock(&priv->state_lock);
- err = mlx5e_handle_pflag(netdev, pflags,
- MLX5E_PFLAG_RX_CQE_BASED_MODER,
- set_pflag_rx_cqe_based_moder);
- if (err)
- goto out;
- err = mlx5e_handle_pflag(netdev, pflags,
- MLX5E_PFLAG_TX_CQE_BASED_MODER,
- set_pflag_tx_cqe_based_moder);
- if (err)
- goto out;
-
- err = mlx5e_handle_pflag(netdev, pflags,
- MLX5E_PFLAG_RX_CQE_COMPRESS,
- set_pflag_rx_cqe_compress);
- if (err)
- goto out;
-
- err = mlx5e_handle_pflag(netdev, pflags,
- MLX5E_PFLAG_RX_STRIDING_RQ,
- set_pflag_rx_striding_rq);
- if (err)
- goto out;
-
- err = mlx5e_handle_pflag(netdev, pflags,
- MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
- set_pflag_rx_no_csum_complete);
+ for (pflag = 0; pflag < MLX5E_NUM_PFLAGS; pflag++) {
+ err = mlx5e_handle_pflag(netdev, pflags, pflag);
+ if (err)
+ break;
+ }
-out:
mutex_unlock(&priv->state_lock);
/* Need to fix some features.. */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index c18dcebe1462..4421c10f58ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -771,6 +771,112 @@ void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
INIT_LIST_HEAD(&priv->fs.ethtool.rules);
}
+static enum mlx5e_traffic_types flow_type_to_traffic_type(u32 flow_type)
+{
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ return MLX5E_TT_IPV4_TCP;
+ case TCP_V6_FLOW:
+ return MLX5E_TT_IPV6_TCP;
+ case UDP_V4_FLOW:
+ return MLX5E_TT_IPV4_UDP;
+ case UDP_V6_FLOW:
+ return MLX5E_TT_IPV6_UDP;
+ case AH_V4_FLOW:
+ return MLX5E_TT_IPV4_IPSEC_AH;
+ case AH_V6_FLOW:
+ return MLX5E_TT_IPV6_IPSEC_AH;
+ case ESP_V4_FLOW:
+ return MLX5E_TT_IPV4_IPSEC_ESP;
+ case ESP_V6_FLOW:
+ return MLX5E_TT_IPV6_IPSEC_ESP;
+ case IPV4_FLOW:
+ return MLX5E_TT_IPV4;
+ case IPV6_FLOW:
+ return MLX5E_TT_IPV6;
+ default:
+ return MLX5E_NUM_INDIR_TIRS;
+ }
+}
+
+static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
+ struct ethtool_rxnfc *nfc)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+ enum mlx5e_traffic_types tt;
+ u8 rx_hash_field = 0;
+ void *in;
+
+ tt = flow_type_to_traffic_type(nfc->flow_type);
+ if (tt == MLX5E_NUM_INDIR_TIRS)
+ return -EINVAL;
+
+ /* RSS does not support anything other than hashing to queues
+ * on src IP, dest IP, TCP/UDP src port and TCP/UDP dest
+ * port.
+ */
+ if (nfc->flow_type != TCP_V4_FLOW &&
+ nfc->flow_type != TCP_V6_FLOW &&
+ nfc->flow_type != UDP_V4_FLOW &&
+ nfc->flow_type != UDP_V6_FLOW)
+ return -EOPNOTSUPP;
+
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EOPNOTSUPP;
+
+ if (nfc->data & RXH_IP_SRC)
+ rx_hash_field |= MLX5_HASH_FIELD_SEL_SRC_IP;
+ if (nfc->data & RXH_IP_DST)
+ rx_hash_field |= MLX5_HASH_FIELD_SEL_DST_IP;
+ if (nfc->data & RXH_L4_B_0_1)
+ rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_SPORT;
+ if (nfc->data & RXH_L4_B_2_3)
+ rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_DPORT;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ mutex_lock(&priv->state_lock);
+
+ if (rx_hash_field == priv->rss_params.rx_hash_fields[tt])
+ goto out;
+
+ priv->rss_params.rx_hash_fields[tt] = rx_hash_field;
+ mlx5e_modify_tirs_hash(priv, in, inlen);
+
+out:
+ mutex_unlock(&priv->state_lock);
+ kvfree(in);
+ return 0;
+}
+
+static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
+ struct ethtool_rxnfc *nfc)
+{
+ enum mlx5e_traffic_types tt;
+ u32 hash_field = 0;
+
+ tt = flow_type_to_traffic_type(nfc->flow_type);
+ if (tt == MLX5E_NUM_INDIR_TIRS)
+ return -EINVAL;
+
+ hash_field = priv->rss_params.rx_hash_fields[tt];
+ nfc->data = 0;
+
+ if (hash_field & MLX5_HASH_FIELD_SEL_SRC_IP)
+ nfc->data |= RXH_IP_SRC;
+ if (hash_field & MLX5_HASH_FIELD_SEL_DST_IP)
+ nfc->data |= RXH_IP_DST;
+ if (hash_field & MLX5_HASH_FIELD_SEL_L4_SPORT)
+ nfc->data |= RXH_L4_B_0_1;
+ if (hash_field & MLX5_HASH_FIELD_SEL_L4_DPORT)
+ nfc->data |= RXH_L4_B_2_3;
+
+ return 0;
+}
+
int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
int err = 0;
@@ -783,6 +889,9 @@ int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXCLSRLDEL:
err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
break;
+ case ETHTOOL_SRXFH:
+ err = mlx5e_set_rss_hash_opt(priv, cmd);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -810,6 +919,9 @@ int mlx5e_get_rxnfc(struct net_device *dev,
case ETHTOOL_GRXCLSRLALL:
err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
break;
+ case ETHTOOL_GRXFH:
+ err = mlx5e_get_rss_hash_opt(priv, info);
+ break;
default:
err = -EOPNOTSUPP;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index b70cb6fd164c..8cfd2ec7c0a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -49,6 +49,8 @@
#include "lib/clock.h"
#include "en/port.h"
#include "en/xdp.h"
+#include "lib/eq.h"
+#include "en/monitor_stats.h"
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
@@ -59,6 +61,7 @@ struct mlx5e_rq_param {
struct mlx5e_sq_param {
u32 sqc[MLX5_ST_SZ_DW(sqc)];
struct mlx5_wq_param wq;
+ bool is_mpw;
};
struct mlx5e_cq_param {
@@ -228,7 +231,7 @@ void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
MLX5_WQ_TYPE_CYCLIC;
}
-static void mlx5e_update_carrier(struct mlx5e_priv *priv)
+void mlx5e_update_carrier(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
u8 port_state;
@@ -267,7 +270,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
mlx5e_stats_grps[i].update_stats(priv);
}
-static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
+void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
{
int i;
@@ -298,33 +301,35 @@ void mlx5e_queue_update_stats(struct mlx5e_priv *priv)
queue_work(priv->wq, &priv->update_stats_work);
}
-static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
- enum mlx5_dev_event event, unsigned long param)
+static int async_event(struct notifier_block *nb, unsigned long event, void *data)
{
- struct mlx5e_priv *priv = vpriv;
+ struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
+ struct mlx5_eqe *eqe = data;
- if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
- return;
+ if (event != MLX5_EVENT_TYPE_PORT_CHANGE)
+ return NOTIFY_DONE;
- switch (event) {
- case MLX5_DEV_EVENT_PORT_UP:
- case MLX5_DEV_EVENT_PORT_DOWN:
+ switch (eqe->sub_type) {
+ case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
+ case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
queue_work(priv->wq, &priv->update_carrier_work);
break;
default:
- break;
+ return NOTIFY_DONE;
}
+
+ return NOTIFY_OK;
}
static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
{
- set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
+ priv->events_nb.notifier_call = async_event;
+ mlx5_notifier_register(priv->mdev, &priv->events_nb);
}
static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
{
- clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
- synchronize_irq(pci_irq_vector(priv->mdev->pdev, MLX5_EQ_VEC_ASYNC));
+ mlx5_notifier_unregister(priv->mdev, &priv->events_nb);
}
static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
@@ -988,18 +993,42 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
{
- kvfree(sq->db.xdpi);
+ kvfree(sq->db.xdpi_fifo.xi);
+ kvfree(sq->db.wqe_info);
+}
+
+static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
+{
+ struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
+ int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+
+ xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq,
+ GFP_KERNEL, numa);
+ if (!xdpi_fifo->xi)
+ return -ENOMEM;
+
+ xdpi_fifo->pc = &sq->xdpi_fifo_pc;
+ xdpi_fifo->cc = &sq->xdpi_fifo_cc;
+ xdpi_fifo->mask = dsegs_per_wq - 1;
+
+ return 0;
}
static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ int err;
- sq->db.xdpi = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.xdpi)),
- GFP_KERNEL, numa);
- if (!sq->db.xdpi) {
- mlx5e_free_xdpsq_db(sq);
+ sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz,
+ GFP_KERNEL, numa);
+ if (!sq->db.wqe_info)
return -ENOMEM;
+
+ err = mlx5e_alloc_xdpsq_fifo(sq, numa);
+ if (err) {
+ mlx5e_free_xdpsq_db(sq);
+ return err;
}
return 0;
@@ -1558,11 +1587,8 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
struct mlx5e_xdpsq *sq,
bool is_redirect)
{
- unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
struct mlx5e_create_sq_param csp = {};
- unsigned int inline_hdr_sz = 0;
int err;
- int i;
err = mlx5e_alloc_xdpsq(c, params, param, sq, is_redirect);
if (err)
@@ -1573,30 +1599,40 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
- if (is_redirect)
- set_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
if (err)
goto err_free_xdpsq;
- if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
- inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
- ds_cnt++;
- }
+ mlx5e_set_xmit_fp(sq, param->is_mpw);
- /* Pre initialize fixed WQE fields */
- for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
- struct mlx5_wqe_data_seg *dseg;
+ if (!param->is_mpw) {
+ unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
+ unsigned int inline_hdr_sz = 0;
+ int i;
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
+ if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
+ inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
+ ds_cnt++;
+ }
+
+ /* Pre initialize fixed WQE fields */
+ for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
+ struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[i];
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
+ struct mlx5_wqe_data_seg *dseg;
+
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
- dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
- dseg->lkey = sq->mkey_be;
+ dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
+ dseg->lkey = sq->mkey_be;
+
+ wi->num_wqebbs = 1;
+ wi->num_ds = 1;
+ }
}
return 0;
@@ -1608,7 +1644,7 @@ err_free_xdpsq:
return err;
}
-static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
+static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
{
struct mlx5e_channel *c = sq->channel;
@@ -1616,7 +1652,7 @@ static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
napi_synchronize(&c->napi);
mlx5e_destroy_sq(c->mdev, sq->sqn);
- mlx5e_free_xdpsq_descs(sq);
+ mlx5e_free_xdpsq_descs(sq, rq);
mlx5e_free_xdpsq(sq);
}
@@ -1769,11 +1805,6 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq)
mlx5e_free_cq(cq);
}
-static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
-{
- return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
-}
-
static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
@@ -1924,9 +1955,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel_param *cparam,
struct mlx5e_channel **cp)
{
+ int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
struct net_dim_cq_moder icocq_moder = {0, 0};
struct net_device *netdev = priv->netdev;
- int cpu = mlx5e_get_cpu(priv, ix);
struct mlx5e_channel *c;
unsigned int irq;
int err;
@@ -2009,7 +2040,7 @@ err_close_rq:
err_close_xdp_sq:
if (c->xdp)
- mlx5e_close_xdpsq(&c->rq.xdpsq);
+ mlx5e_close_xdpsq(&c->rq.xdpsq, &c->rq);
err_close_sqs:
mlx5e_close_sqs(c);
@@ -2062,10 +2093,10 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
static void mlx5e_close_channel(struct mlx5e_channel *c)
{
- mlx5e_close_xdpsq(&c->xdpsq);
+ mlx5e_close_xdpsq(&c->xdpsq, NULL);
mlx5e_close_rq(&c->rq);
if (c->xdp)
- mlx5e_close_xdpsq(&c->rq.xdpsq);
+ mlx5e_close_xdpsq(&c->rq.xdpsq, &c->rq);
mlx5e_close_sqs(c);
mlx5e_close_icosq(&c->icosq);
napi_disable(&c->napi);
@@ -2232,6 +2263,8 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
void *cqc = param->cqc;
MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
+ if (MLX5_CAP_GEN(priv->mdev, cqe_128_always) && cache_line_size() >= 128)
+ MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
}
static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
@@ -2308,6 +2341,7 @@ static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
mlx5e_build_sq_param_common(priv, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
+ param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
}
static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
@@ -2510,7 +2544,7 @@ static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
ix = mlx5e_bits_invert(i, ilog2(sz));
- ix = priv->channels.params.indirection_rqt[ix];
+ ix = priv->rss_params.indirection_rqt[ix];
rqn = rrp.rss.channels->c[ix]->rq.rqn;
} else {
rqn = rrp.rqn;
@@ -2593,7 +2627,7 @@ static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
{
.rss = {
.channels = chs,
- .hfunc = chs->params.rss_hfunc,
+ .hfunc = priv->rss_params.hfunc,
}
},
};
@@ -2613,6 +2647,54 @@ static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
mlx5e_redirect_rqts(priv, drop_rrp);
}
+static const struct mlx5e_tirc_config tirc_default_config[MLX5E_NUM_INDIR_TIRS] = {
+ [MLX5E_TT_IPV4_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
+ .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
+ },
+ [MLX5E_TT_IPV6_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
+ .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
+ },
+ [MLX5E_TT_IPV4_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
+ .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
+ },
+ [MLX5E_TT_IPV6_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
+ .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
+ },
+ [MLX5E_TT_IPV4_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
+ },
+ [MLX5E_TT_IPV6_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
+ },
+ [MLX5E_TT_IPV4_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
+ },
+ [MLX5E_TT_IPV6_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
+ },
+ [MLX5E_TT_IPV4] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP,
+ },
+ [MLX5E_TT_IPV6] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP,
+ },
+};
+
+struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt)
+{
+ return tirc_default_config[tt];
+}
+
static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
{
if (!params->lro_en)
@@ -2628,116 +2710,68 @@ static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
}
-void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
- enum mlx5e_traffic_types tt,
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
+ const struct mlx5e_tirc_config *ttconfig,
void *tirc, bool inner)
{
void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) :
MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
-#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
- MLX5_HASH_FIELD_SEL_DST_IP)
-
-#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
- MLX5_HASH_FIELD_SEL_DST_IP |\
- MLX5_HASH_FIELD_SEL_L4_SPORT |\
- MLX5_HASH_FIELD_SEL_L4_DPORT)
-
-#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
- MLX5_HASH_FIELD_SEL_DST_IP |\
- MLX5_HASH_FIELD_SEL_IPSEC_SPI)
-
- MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(params->rss_hfunc));
- if (params->rss_hfunc == ETH_RSS_HASH_TOP) {
+ MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(rss_params->hfunc));
+ if (rss_params->hfunc == ETH_RSS_HASH_TOP) {
void *rss_key = MLX5_ADDR_OF(tirc, tirc,
rx_hash_toeplitz_key);
size_t len = MLX5_FLD_SZ_BYTES(tirc,
rx_hash_toeplitz_key);
MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
- memcpy(rss_key, params->toeplitz_hash_key, len);
+ memcpy(rss_key, rss_params->toeplitz_hash_key, len);
}
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ ttconfig->l3_prot_type);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ ttconfig->l4_prot_type);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ ttconfig->rx_hash_fields);
+}
- switch (tt) {
- case MLX5E_TT_IPV4_TCP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_TCP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_L4PORTS);
- break;
-
- case MLX5E_TT_IPV6_TCP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_TCP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_L4PORTS);
- break;
-
- case MLX5E_TT_IPV4_UDP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_UDP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_L4PORTS);
- break;
-
- case MLX5E_TT_IPV6_UDP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_UDP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_L4PORTS);
- break;
-
- case MLX5E_TT_IPV4_IPSEC_AH:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_IPSEC_SPI);
- break;
+static void mlx5e_update_rx_hash_fields(struct mlx5e_tirc_config *ttconfig,
+ enum mlx5e_traffic_types tt,
+ u32 rx_hash_fields)
+{
+ *ttconfig = tirc_default_config[tt];
+ ttconfig->rx_hash_fields = rx_hash_fields;
+}
- case MLX5E_TT_IPV6_IPSEC_AH:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_IPSEC_SPI);
- break;
+void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
+{
+ void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
+ struct mlx5e_rss_params *rss = &priv->rss_params;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int ctxlen = MLX5_ST_SZ_BYTES(tirc);
+ struct mlx5e_tirc_config ttconfig;
+ int tt;
- case MLX5E_TT_IPV4_IPSEC_ESP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_IPSEC_SPI);
- break;
+ MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
- case MLX5E_TT_IPV6_IPSEC_ESP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_IPSEC_SPI);
- break;
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ memset(tirc, 0, ctxlen);
+ mlx5e_update_rx_hash_fields(&ttconfig, tt,
+ rss->rx_hash_fields[tt]);
+ mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, false);
+ mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
+ }
- case MLX5E_TT_IPV4:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP);
- break;
+ if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
+ return;
- case MLX5E_TT_IPV6:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP);
- break;
- default:
- WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ memset(tirc, 0, ctxlen);
+ mlx5e_update_rx_hash_fields(&ttconfig, tt,
+ rss->rx_hash_fields[tt]);
+ mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, true);
+ mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in,
+ inlen);
}
}
@@ -2794,7 +2828,8 @@ static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1);
- mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true);
+ mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
+ &tirc_default_config[tt], tirc, true);
}
static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
@@ -2825,7 +2860,7 @@ static void mlx5e_query_mtu(struct mlx5_core_dev *mdev,
*mtu = MLX5E_HW2SW_MTU(params, hw_mtu);
}
-static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
+int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
{
struct mlx5e_params *params = &priv->channels.params;
struct net_device *netdev = priv->netdev;
@@ -2905,7 +2940,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
mlx5e_activate_channels(&priv->channels);
netif_tx_start_all_queues(priv->netdev);
- if (MLX5_ESWITCH_MANAGER(priv->mdev))
+ if (mlx5e_is_vport_rep(priv))
mlx5e_add_sqs_fwd_rules(priv);
mlx5e_wait_channels_min_rx_wqes(&priv->channels);
@@ -2916,7 +2951,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
{
mlx5e_redirect_rqts_to_drop(priv);
- if (MLX5_ESWITCH_MANAGER(priv->mdev))
+ if (mlx5e_is_vport_rep(priv))
mlx5e_remove_sqs_fwd_rules(priv);
/* FIXME: This is a W/A only for tx timeout watch dog false alarm when
@@ -3168,7 +3203,7 @@ err_close_tises:
return err;
}
-void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
+static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
{
int tc;
@@ -3186,7 +3221,9 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
- mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
+
+ mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
+ &tirc_default_config[tt], tirc, false);
}
static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
@@ -3391,11 +3428,14 @@ static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
{
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
- return mlx5e_configure_flower(priv, cls_flower, flags);
+ return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
+ flags);
case TC_CLSFLOWER_DESTROY:
- return mlx5e_delete_flower(priv, cls_flower, flags);
+ return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
+ flags);
case TC_CLSFLOWER_STATS:
- return mlx5e_stats_flower(priv, cls_flower, flags);
+ return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
+ flags);
default:
return -EOPNOTSUPP;
}
@@ -3408,7 +3448,8 @@ static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
switch (type) {
case TC_SETUP_CLSFLOWER:
- return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
+ return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS |
+ MLX5E_TC_NIC_OFFLOAD);
default:
return -EOPNOTSUPP;
}
@@ -3451,7 +3492,7 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
-static void
+void
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -3459,8 +3500,10 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
- /* update HW stats in background for next time */
- mlx5e_queue_update_stats(priv);
+ if (!mlx5e_monitor_counter_supported(priv)) {
+ /* update HW stats in background for next time */
+ mlx5e_queue_update_stats(priv);
+ }
if (mlx5e_is_uplink_rep(priv)) {
stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
@@ -3593,7 +3636,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- if (!enable && mlx5e_tc_num_filters(priv)) {
+ if (!enable && mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD)) {
netdev_err(netdev,
"Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL;
@@ -3895,7 +3938,7 @@ static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
#ifdef CONFIG_MLX5_ESWITCH
-static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
@@ -3932,8 +3975,8 @@ static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
}
-static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
- int max_tx_rate)
+int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
+ int max_tx_rate)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
@@ -3974,8 +4017,8 @@ static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
mlx5_ifla_link2vport(link_state));
}
-static int mlx5e_get_vf_config(struct net_device *dev,
- int vf, struct ifla_vf_info *ivi)
+int mlx5e_get_vf_config(struct net_device *dev,
+ int vf, struct ifla_vf_info *ivi)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
@@ -3988,8 +4031,8 @@ static int mlx5e_get_vf_config(struct net_device *dev,
return 0;
}
-static int mlx5e_get_vf_stats(struct net_device *dev,
- int vf, struct ifla_vf_stats *vf_stats)
+int mlx5e_get_vf_stats(struct net_device *dev,
+ int vf, struct ifla_vf_stats *vf_stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
@@ -4050,8 +4093,7 @@ static void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, u16 port, int add)
queue_work(priv->wq, &vxlan_work->work);
}
-static void mlx5e_add_vxlan_port(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -4064,8 +4106,7 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev,
mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1);
}
-static void mlx5e_del_vxlan_port(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -4115,9 +4156,9 @@ out:
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
-static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
- struct net_device *netdev,
- netdev_features_t features)
+netdev_features_t mlx5e_features_check(struct sk_buff *skb,
+ struct net_device *netdev,
+ netdev_features_t features)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -4140,17 +4181,17 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
struct mlx5e_txqsq *sq)
{
- struct mlx5_eq *eq = sq->cq.mcq.eq;
+ struct mlx5_eq_comp *eq = sq->cq.mcq.eq;
u32 eqe_count;
netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
- eq->eqn, eq->cons_index, eq->irqn);
+ eq->core.eqn, eq->core.cons_index, eq->core.irqn);
eqe_count = mlx5_eq_poll_irq_disabled(eq);
if (!eqe_count)
return false;
- netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn);
+ netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->core.eqn);
sq->channel->stats->eq_rearm++;
return true;
}
@@ -4377,8 +4418,6 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_get_vf_config = mlx5e_get_vf_config,
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
- .ndo_has_offload_stats = mlx5e_has_offload_stats,
- .ndo_get_offload_stats = mlx5e_get_offload_stats,
#endif
};
@@ -4524,15 +4563,23 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
mlx5e_init_rq_type_params(mdev, params);
}
-void mlx5e_build_rss_params(struct mlx5e_params *params)
+void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
+ u16 num_channels)
{
- params->rss_hfunc = ETH_RSS_HASH_XOR;
- netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
- mlx5e_build_default_indir_rqt(params->indirection_rqt,
- MLX5E_INDIR_RQT_SIZE, params->num_channels);
+ enum mlx5e_traffic_types tt;
+
+ rss_params->hfunc = ETH_RSS_HASH_XOR;
+ netdev_rss_key_fill(rss_params->toeplitz_hash_key,
+ sizeof(rss_params->toeplitz_hash_key));
+ mlx5e_build_default_indir_rqt(rss_params->indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, num_channels);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ rss_params->rx_hash_fields[tt] =
+ tirc_default_config[tt].rx_hash_fields;
}
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_rss_params *rss_params,
struct mlx5e_params *params,
u16 max_channels, u16 mtu)
{
@@ -4548,6 +4595,10 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+ /* XDP SQ */
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE,
+ MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe));
+
/* set CQE compression */
params->rx_cqe_compress_def = false;
if (MLX5_CAP_GEN(mdev, cqe_compression) &&
@@ -4581,7 +4632,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
/* RSS */
- mlx5e_build_rss_params(params);
+ mlx5e_build_rss_params(rss_params, params->num_channels);
}
static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
@@ -4596,12 +4647,6 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
}
}
-#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
-static const struct switchdev_ops mlx5e_switchdev_ops = {
- .switchdev_port_attr_get = mlx5e_attr_get,
-};
-#endif
-
static void mlx5e_build_nic_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -4711,12 +4756,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->priv_flags |= IFF_UNICAST_FLT;
mlx5e_set_netdev_dev_addr(netdev);
-
-#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
- if (MLX5_ESWITCH_MANAGER(mdev))
- netdev->switchdev_ops = &mlx5e_switchdev_ops;
-#endif
-
mlx5e_ipsec_build_netdev(priv);
mlx5e_tls_build_netdev(priv);
}
@@ -4754,14 +4793,16 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
void *ppriv)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_rss_params *rss = &priv->rss_params;
int err;
err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
if (err)
return err;
- mlx5e_build_nic_params(mdev, &priv->channels.params,
- mlx5e_get_netdev_max_channels(netdev), netdev->mtu);
+ mlx5e_build_nic_params(mdev, rss, &priv->channels.params,
+ mlx5e_get_netdev_max_channels(netdev),
+ netdev->mtu);
mlx5e_timestamp_init(priv);
@@ -4891,9 +4932,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5_lag_add(mdev, netdev);
mlx5e_enable_async_events(priv);
-
- if (MLX5_ESWITCH_MANAGER(priv->mdev))
- mlx5e_register_vport_reps(priv);
+ if (mlx5e_monitor_counter_supported(priv))
+ mlx5e_monitor_counter_init(priv);
if (netdev->reg_state != NETREG_REGISTERED)
return;
@@ -4927,8 +4967,8 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
queue_work(priv->wq, &priv->set_rx_mode_work);
- if (MLX5_ESWITCH_MANAGER(priv->mdev))
- mlx5e_unregister_vport_reps(priv);
+ if (mlx5e_monitor_counter_supported(priv))
+ mlx5e_monitor_counter_cleanup(priv);
mlx5e_disable_async_events(priv);
mlx5_lag_remove(mdev);
@@ -4981,7 +5021,7 @@ int mlx5e_netdev_init(struct net_device *netdev,
netif_carrier_off(netdev);
#ifdef CONFIG_MLX5_EN_ARFS
- netdev->rx_cpu_rmap = mdev->rmap;
+ netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(mdev);
#endif
return 0;
@@ -5036,7 +5076,7 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
if (priv->channels.params.num_channels > max_nch) {
mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
priv->channels.params.num_channels = max_nch;
- mlx5e_build_default_indir_rqt(priv->channels.params.indirection_rqt,
+ mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, max_nch);
}
@@ -5125,7 +5165,6 @@ static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
static void *mlx5e_add(struct mlx5_core_dev *mdev)
{
struct net_device *netdev;
- void *rpriv = NULL;
void *priv;
int err;
int nch;
@@ -5135,20 +5174,18 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
return NULL;
#ifdef CONFIG_MLX5_ESWITCH
- if (MLX5_ESWITCH_MANAGER(mdev)) {
- rpriv = mlx5e_alloc_nic_rep_priv(mdev);
- if (!rpriv) {
- mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
- return NULL;
- }
+ if (MLX5_ESWITCH_MANAGER(mdev) &&
+ mlx5_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
+ mlx5e_rep_register_vport_reps(mdev);
+ return mdev;
}
#endif
nch = mlx5e_get_max_num_channels(mdev);
- netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, rpriv);
+ netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, NULL);
if (!netdev) {
mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
- goto err_free_rpriv;
+ return NULL;
}
priv = netdev_priv(netdev);
@@ -5174,30 +5211,26 @@ err_detach:
mlx5e_detach(mdev, priv);
err_destroy_netdev:
mlx5e_destroy_netdev(priv);
-err_free_rpriv:
- kfree(rpriv);
return NULL;
}
static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
{
- struct mlx5e_priv *priv = vpriv;
- void *ppriv = priv->ppriv;
+ struct mlx5e_priv *priv;
+#ifdef CONFIG_MLX5_ESWITCH
+ if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev) {
+ mlx5e_rep_unregister_vport_reps(mdev);
+ return;
+ }
+#endif
+ priv = vpriv;
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_delete_app(priv);
#endif
unregister_netdev(priv->netdev);
mlx5e_detach(mdev, vpriv);
mlx5e_destroy_netdev(priv);
- kfree(ppriv);
-}
-
-static void *mlx5e_get_netdev(void *vpriv)
-{
- struct mlx5e_priv *priv = vpriv;
-
- return priv->netdev;
}
static struct mlx5_interface mlx5e_interface = {
@@ -5205,9 +5238,7 @@ static struct mlx5_interface mlx5e_interface = {
.remove = mlx5e_remove,
.attach = mlx5e_attach,
.detach = mlx5e_detach,
- .event = mlx5e_async_event,
.protocol = MLX5_INTERFACE_PROTOCOL_ETH,
- .get_dev = mlx5e_get_netdev,
};
void mlx5e_init(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 820fe85100b0..96cc0c6a4014 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -42,14 +42,24 @@
#include "en.h"
#include "en_rep.h"
#include "en_tc.h"
+#include "en/tc_tun.h"
#include "fs_core.h"
-#define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
- max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
+#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
+ max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
#define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
+struct mlx5e_rep_indr_block_priv {
+ struct net_device *netdev;
+ struct mlx5e_rep_priv *rpriv;
+
+ struct list_head list;
+};
+
+static void mlx5e_rep_indr_unregister_block(struct net_device *netdev);
+
static void mlx5e_rep_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
@@ -99,7 +109,7 @@ static void mlx5e_rep_get_strings(struct net_device *dev,
}
}
-static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
+static void mlx5e_vf_rep_update_hw_counters(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -122,6 +132,32 @@ static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
vport_stats->tx_bytes = vf_stats.rx_bytes;
}
+static void mlx5e_uplink_rep_update_hw_counters(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct rtnl_link_stats64 *vport_stats;
+
+ mlx5e_grp_802_3_update_stats(priv);
+
+ vport_stats = &priv->stats.vf_vport;
+
+ vport_stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
+ vport_stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
+ vport_stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
+ vport_stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
+}
+
+static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+
+ if (rep->vport == FDB_UPLINK_VPORT)
+ mlx5e_uplink_rep_update_hw_counters(priv);
+ else
+ mlx5e_vf_rep_update_hw_counters(priv);
+}
+
static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
@@ -257,6 +293,22 @@ static int mlx5e_rep_set_channels(struct net_device *dev,
return 0;
}
+static int mlx5e_rep_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_get_coalesce(priv, coal);
+}
+
+static int mlx5e_rep_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_set_coalesce(priv, coal);
+}
+
static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -271,7 +323,55 @@ static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
return mlx5e_ethtool_get_rxfh_indir_size(priv);
}
-static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
+static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_ethtool_get_pauseparam(priv, pauseparam);
+}
+
+static int mlx5e_uplink_rep_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_set_pauseparam(priv, pauseparam);
+}
+
+static int mlx5e_uplink_rep_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *link_ksettings)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
+}
+
+static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *link_ksettings)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings);
+}
+
+static const struct ethtool_ops mlx5e_vf_rep_ethtool_ops = {
+ .get_drvinfo = mlx5e_rep_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = mlx5e_rep_get_strings,
+ .get_sset_count = mlx5e_rep_get_sset_count,
+ .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
+ .get_ringparam = mlx5e_rep_get_ringparam,
+ .set_ringparam = mlx5e_rep_set_ringparam,
+ .get_channels = mlx5e_rep_get_channels,
+ .set_channels = mlx5e_rep_set_channels,
+ .get_coalesce = mlx5e_rep_get_coalesce,
+ .set_coalesce = mlx5e_rep_set_coalesce,
+ .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
+ .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
+};
+
+static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
.get_drvinfo = mlx5e_rep_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = mlx5e_rep_get_strings,
@@ -281,24 +381,44 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
.set_ringparam = mlx5e_rep_set_ringparam,
.get_channels = mlx5e_rep_get_channels,
.set_channels = mlx5e_rep_set_channels,
+ .get_coalesce = mlx5e_rep_get_coalesce,
+ .set_coalesce = mlx5e_rep_set_coalesce,
+ .get_link_ksettings = mlx5e_uplink_rep_get_link_ksettings,
+ .set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings,
.get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
.get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
+ .get_pauseparam = mlx5e_uplink_rep_get_pauseparam,
+ .set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
};
-int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
+static int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct net_device *uplink_upper = NULL;
+ struct mlx5e_priv *uplink_priv = NULL;
+ struct net_device *uplink_dev;
if (esw->mode == SRIOV_NONE)
return -EOPNOTSUPP;
+ uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
+ if (uplink_dev) {
+ uplink_upper = netdev_master_upper_dev_get(uplink_dev);
+ uplink_priv = netdev_priv(uplink_dev);
+ }
+
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
attr->u.ppid.id_len = ETH_ALEN;
- ether_addr_copy(attr->u.ppid.id, rep->hw_id);
+ if (uplink_upper && mlx5_lag_is_sriov(uplink_priv->mdev)) {
+ ether_addr_copy(attr->u.ppid.id, uplink_upper->dev_addr);
+ } else {
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+
+ ether_addr_copy(attr->u.ppid.id, rep->hw_id);
+ }
break;
default:
return -EOPNOTSUPP;
@@ -519,6 +639,184 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
neigh_release(n);
}
+static struct mlx5e_rep_indr_block_priv *
+mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
+ struct net_device *netdev)
+{
+ struct mlx5e_rep_indr_block_priv *cb_priv;
+
+ /* All callback list access should be protected by RTNL. */
+ ASSERT_RTNL();
+
+ list_for_each_entry(cb_priv,
+ &rpriv->uplink_priv.tc_indr_block_priv_list,
+ list)
+ if (cb_priv->netdev == netdev)
+ return cb_priv;
+
+ return NULL;
+}
+
+static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5e_rep_indr_block_priv *cb_priv, *temp;
+ struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
+
+ list_for_each_entry_safe(cb_priv, temp, head, list) {
+ mlx5e_rep_indr_unregister_block(cb_priv->netdev);
+ kfree(cb_priv);
+ }
+}
+
+static int
+mlx5e_rep_indr_offload(struct net_device *netdev,
+ struct tc_cls_flower_offload *flower,
+ struct mlx5e_rep_indr_block_priv *indr_priv)
+{
+ struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
+ int flags = MLX5E_TC_EGRESS | MLX5E_TC_ESW_OFFLOAD;
+ int err = 0;
+
+ switch (flower->command) {
+ case TC_CLSFLOWER_REPLACE:
+ err = mlx5e_configure_flower(netdev, priv, flower, flags);
+ break;
+ case TC_CLSFLOWER_DESTROY:
+ err = mlx5e_delete_flower(netdev, priv, flower, flags);
+ break;
+ case TC_CLSFLOWER_STATS:
+ err = mlx5e_stats_flower(netdev, priv, flower, flags);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type,
+ void *type_data, void *indr_priv)
+{
+ struct mlx5e_rep_indr_block_priv *priv = indr_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return mlx5e_rep_indr_offload(priv->netdev, type_data, priv);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
+ struct mlx5e_rep_priv *rpriv,
+ struct tc_block_offload *f)
+{
+ struct mlx5e_rep_indr_block_priv *indr_priv;
+ int err = 0;
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
+ if (indr_priv)
+ return -EEXIST;
+
+ indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
+ if (!indr_priv)
+ return -ENOMEM;
+
+ indr_priv->netdev = netdev;
+ indr_priv->rpriv = rpriv;
+ list_add(&indr_priv->list,
+ &rpriv->uplink_priv.tc_indr_block_priv_list);
+
+ err = tcf_block_cb_register(f->block,
+ mlx5e_rep_indr_setup_block_cb,
+ netdev, indr_priv, f->extack);
+ if (err) {
+ list_del(&indr_priv->list);
+ kfree(indr_priv);
+ }
+
+ return err;
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block,
+ mlx5e_rep_indr_setup_block_cb,
+ netdev);
+ indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
+ if (indr_priv) {
+ list_del(&indr_priv->list);
+ kfree(indr_priv);
+ }
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static
+int mlx5e_rep_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return mlx5e_rep_indr_setup_tc_block(netdev, cb_priv,
+ type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
+ struct net_device *netdev)
+{
+ int err;
+
+ err = __tc_indr_block_cb_register(netdev, rpriv,
+ mlx5e_rep_indr_setup_tc_cb,
+ netdev);
+ if (err) {
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+
+ mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n",
+ netdev_name(netdev), err);
+ }
+ return err;
+}
+
+static void mlx5e_rep_indr_unregister_block(struct net_device *netdev)
+{
+ __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
+ netdev);
+}
+
+static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
+ uplink_priv.netdevice_nb);
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+
+ if (!mlx5e_tc_tun_device_to_offload(priv, netdev))
+ return NOTIFY_OK;
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ mlx5e_rep_indr_register_block(rpriv, netdev);
+ break;
+ case NETDEV_UNREGISTER:
+ mlx5e_rep_indr_unregister_block(netdev);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
static struct mlx5e_neigh_hash_entry *
mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
struct mlx5e_neigh *m_neigh);
@@ -780,7 +1078,7 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
mlx5e_rep_neigh_entry_destroy(priv, nhe);
}
-static int mlx5e_rep_open(struct net_device *dev)
+static int mlx5e_vf_rep_open(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -802,7 +1100,7 @@ unlock:
return err;
}
-static int mlx5e_rep_close(struct net_device *dev)
+static int mlx5e_vf_rep_close(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -839,24 +1137,14 @@ mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
{
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
- return mlx5e_configure_flower(priv, cls_flower, flags);
+ return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
+ flags);
case TC_CLSFLOWER_DESTROY:
- return mlx5e_delete_flower(priv, cls_flower, flags);
+ return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
+ flags);
case TC_CLSFLOWER_STATS:
- return mlx5e_stats_flower(priv, cls_flower, flags);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int mlx5e_rep_setup_tc_cb_egdev(enum tc_setup_type type, void *type_data,
- void *cb_priv)
-{
- struct mlx5e_priv *priv = cb_priv;
-
- switch (type) {
- case TC_SETUP_CLSFLOWER:
- return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_EGRESS);
+ return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
+ flags);
default:
return -EOPNOTSUPP;
}
@@ -869,7 +1157,8 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
switch (type) {
case TC_SETUP_CLSFLOWER:
- return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
+ return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS |
+ MLX5E_TC_ESW_OFFLOAD);
default:
return -EOPNOTSUPP;
}
@@ -908,43 +1197,23 @@ static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep;
if (!MLX5_ESWITCH_MANAGER(priv->mdev))
return false;
- rep = rpriv->rep;
- if (esw->mode == SRIOV_OFFLOADS &&
- rep && rep->vport == FDB_UPLINK_VPORT)
- return true;
-
- return false;
-}
-
-static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
-{
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_eswitch_rep *rep;
-
- if (!MLX5_ESWITCH_MANAGER(priv->mdev))
+ if (!rpriv) /* non vport rep mlx5e instances don't use this field */
return false;
rep = rpriv->rep;
- if (rep && rep->vport != FDB_UPLINK_VPORT)
- return true;
-
- return false;
+ return (rep->vport == FDB_UPLINK_VPORT);
}
-bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id)
+static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
-
switch (attr_id) {
case IFLA_OFFLOAD_XSTATS_CPU_HIT:
- if (mlx5e_is_vf_vport_rep(priv) || mlx5e_is_uplink_rep(priv))
return true;
}
@@ -970,8 +1239,8 @@ mlx5e_get_sw_stats64(const struct net_device *dev,
return 0;
}
-int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
- void *sp)
+static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *sp)
{
switch (attr_id) {
case IFLA_OFFLOAD_XSTATS_CPU_HIT:
@@ -982,7 +1251,7 @@ int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
}
static void
-mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+mlx5e_vf_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -991,37 +1260,93 @@ mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
}
+static int mlx5e_vf_rep_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ return mlx5e_change_mtu(netdev, new_mtu, NULL);
+}
+
+static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu);
+}
+
+static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr)
+{
+ struct sockaddr *saddr = addr;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ ether_addr_copy(netdev->dev_addr, saddr->sa_data);
+ return 0;
+}
+
static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
.switchdev_port_attr_get = mlx5e_attr_get,
};
-static int mlx5e_change_rep_mtu(struct net_device *netdev, int new_mtu)
-{
- return mlx5e_change_mtu(netdev, new_mtu, NULL);
-}
+static const struct net_device_ops mlx5e_netdev_ops_vf_rep = {
+ .ndo_open = mlx5e_vf_rep_open,
+ .ndo_stop = mlx5e_vf_rep_close,
+ .ndo_start_xmit = mlx5e_xmit,
+ .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
+ .ndo_setup_tc = mlx5e_rep_setup_tc,
+ .ndo_get_stats64 = mlx5e_vf_rep_get_stats,
+ .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
+ .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
+ .ndo_change_mtu = mlx5e_vf_rep_change_mtu,
+};
-static const struct net_device_ops mlx5e_netdev_ops_rep = {
- .ndo_open = mlx5e_rep_open,
- .ndo_stop = mlx5e_rep_close,
+static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
+ .ndo_open = mlx5e_open,
+ .ndo_stop = mlx5e_close,
.ndo_start_xmit = mlx5e_xmit,
+ .ndo_set_mac_address = mlx5e_uplink_rep_set_mac,
.ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
.ndo_setup_tc = mlx5e_rep_setup_tc,
- .ndo_get_stats64 = mlx5e_rep_get_stats,
- .ndo_has_offload_stats = mlx5e_has_offload_stats,
- .ndo_get_offload_stats = mlx5e_get_offload_stats,
- .ndo_change_mtu = mlx5e_change_rep_mtu,
+ .ndo_get_stats64 = mlx5e_get_stats,
+ .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
+ .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
+ .ndo_change_mtu = mlx5e_uplink_rep_change_mtu,
+ .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
+ .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
+ .ndo_features_check = mlx5e_features_check,
+ .ndo_set_vf_mac = mlx5e_set_vf_mac,
+ .ndo_set_vf_rate = mlx5e_set_vf_rate,
+ .ndo_get_vf_config = mlx5e_get_vf_config,
+ .ndo_get_vf_stats = mlx5e_get_vf_stats,
};
-static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params, u16 mtu)
+bool mlx5e_eswitch_rep(struct net_device *netdev)
+{
+ if (netdev->netdev_ops == &mlx5e_netdev_ops_vf_rep ||
+ netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep)
+ return true;
+
+ return false;
+}
+
+static void mlx5e_build_rep_params(struct net_device *netdev)
{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_params *params;
+
u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ params = &priv->channels.params;
params->hard_mtu = MLX5E_ETH_HARD_MTU;
- params->sw_mtu = mtu;
- params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
+ params->sw_mtu = netdev->mtu;
+
+ /* SQ */
+ if (rep->vport == FDB_UPLINK_VPORT)
+ params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+ else
+ params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
/* RQ */
mlx5e_build_rq_params(mdev, params);
@@ -1035,24 +1360,38 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
/* RSS */
- mlx5e_build_rss_params(params);
+ mlx5e_build_rss_params(&priv->rss_params, params->num_channels);
}
static void mlx5e_build_rep_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_core_dev *mdev = priv->mdev;
- u16 max_mtu;
- netdev->netdev_ops = &mlx5e_netdev_ops_rep;
+ if (rep->vport == FDB_UPLINK_VPORT) {
+ SET_NETDEV_DEV(netdev, &priv->mdev->pdev->dev);
+ netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
+ /* we want a persistent mac for the uplink rep */
+ mlx5_query_nic_vport_mac_address(mdev, 0, netdev->dev_addr);
+ netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ if (MLX5_CAP_GEN(mdev, qos))
+ netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
+#endif
+ } else {
+ netdev->netdev_ops = &mlx5e_netdev_ops_vf_rep;
+ eth_hw_addr_random(netdev);
+ netdev->ethtool_ops = &mlx5e_vf_rep_ethtool_ops;
+ }
netdev->watchdog_timeo = 15 * HZ;
- netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
- netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
+ netdev->features |= NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
netdev->hw_features |= NETIF_F_HW_TC;
netdev->hw_features |= NETIF_F_SG;
@@ -1063,13 +1402,10 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_TSO6;
netdev->hw_features |= NETIF_F_RXCSUM;
- netdev->features |= netdev->hw_features;
-
- eth_hw_addr_random(netdev);
+ if (rep->vport != FDB_UPLINK_VPORT)
+ netdev->features |= NETIF_F_VLAN_CHALLENGED;
- netdev->min_mtu = ETH_MIN_MTU;
- mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
- netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
+ netdev->features |= netdev->hw_features;
}
static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
@@ -1086,7 +1422,7 @@ static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
- mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu);
+ mlx5e_build_rep_params(netdev);
mlx5e_build_rep_netdev(netdev);
mlx5e_timestamp_init(priv);
@@ -1209,94 +1545,173 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
{
- int err;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ int tc, err;
err = mlx5e_create_tises(priv);
if (err) {
mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
return err;
}
- return 0;
-}
-static const struct mlx5e_profile mlx5e_rep_profile = {
- .init = mlx5e_init_rep,
- .cleanup = mlx5e_cleanup_rep,
- .init_rx = mlx5e_init_rep_rx,
- .cleanup_rx = mlx5e_cleanup_rep_rx,
- .init_tx = mlx5e_init_rep_tx,
- .cleanup_tx = mlx5e_cleanup_nic_tx,
- .update_stats = mlx5e_rep_update_hw_counters,
- .update_carrier = NULL,
- .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
- .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
- .max_tc = 1,
-};
+ if (rpriv->rep->vport == FDB_UPLINK_VPORT) {
+ uplink_priv = &rpriv->uplink_priv;
-/* e-Switch vport representors */
+ /* init shared tc flow table */
+ err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
+ if (err)
+ goto destroy_tises;
+
+ /* init indirect block notifications */
+ INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
+ uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
+ err = register_netdevice_notifier(&uplink_priv->netdevice_nb);
+ if (err) {
+ mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
+ goto tc_esw_cleanup;
+ }
+ }
-static int
-mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+ return 0;
+
+tc_esw_cleanup:
+ mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
+destroy_tises:
+ for (tc = 0; tc < priv->profile->max_tc; tc++)
+ mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
+ return err;
+}
+
+static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
{
- struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
- struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ int tc;
- int err;
+ for (tc = 0; tc < priv->profile->max_tc; tc++)
+ mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- err = mlx5e_add_sqs_fwd_rules(priv);
- if (err)
- return err;
+ if (rpriv->rep->vport == FDB_UPLINK_VPORT) {
+ /* clean indirect TC block notifications */
+ unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb);
+ mlx5e_rep_indr_clean_block_privs(rpriv);
+
+ /* delete shared tc flow table */
+ mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
}
+}
- err = mlx5e_rep_neigh_init(rpriv);
- if (err)
- goto err_remove_sqs;
+static void mlx5e_vf_rep_enable(struct mlx5e_priv *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u16 max_mtu;
- /* init shared tc flow table */
- err = mlx5e_tc_esw_init(&rpriv->tc_ht);
- if (err)
- goto err_neigh_cleanup;
+ netdev->min_mtu = ETH_MIN_MTU;
+ mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
+ netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
+}
- return 0;
+static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
+ struct mlx5_eqe *eqe = data;
-err_neigh_cleanup:
- mlx5e_rep_neigh_cleanup(rpriv);
-err_remove_sqs:
- mlx5e_remove_sqs_fwd_rules(priv);
- return err;
+ if (event != MLX5_EVENT_TYPE_PORT_CHANGE)
+ return NOTIFY_DONE;
+
+ switch (eqe->sub_type) {
+ case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
+ case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
+ queue_work(priv->wq, &priv->update_carrier_work);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
}
-static void
-mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep)
+static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
{
- struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
- struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct net_device *netdev = priv->netdev;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u16 max_mtu;
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_remove_sqs_fwd_rules(priv);
+ netdev->min_mtu = ETH_MIN_MTU;
+ mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
+ netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
+ mlx5e_set_dev_port_mtu(priv);
+
+ mlx5_lag_add(mdev, netdev);
+ priv->events_nb.notifier_call = uplink_rep_async_event;
+ mlx5_notifier_register(mdev, &priv->events_nb);
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ mlx5e_dcbnl_initialize(priv);
+ mlx5e_dcbnl_init_app(priv);
+#endif
+}
- /* clean uplink offloaded TC rules, delete shared tc flow table */
- mlx5e_tc_esw_cleanup(&rpriv->tc_ht);
+static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
- mlx5e_rep_neigh_cleanup(rpriv);
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ mlx5e_dcbnl_delete_app(priv);
+#endif
+ mlx5_notifier_unregister(mdev, &priv->events_nb);
+ mlx5_lag_remove(mdev);
}
+static const struct mlx5e_profile mlx5e_vf_rep_profile = {
+ .init = mlx5e_init_rep,
+ .cleanup = mlx5e_cleanup_rep,
+ .init_rx = mlx5e_init_rep_rx,
+ .cleanup_rx = mlx5e_cleanup_rep_rx,
+ .init_tx = mlx5e_init_rep_tx,
+ .cleanup_tx = mlx5e_cleanup_rep_tx,
+ .enable = mlx5e_vf_rep_enable,
+ .update_stats = mlx5e_vf_rep_update_hw_counters,
+ .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
+ .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
+ .max_tc = 1,
+};
+
+static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
+ .init = mlx5e_init_rep,
+ .cleanup = mlx5e_cleanup_rep,
+ .init_rx = mlx5e_init_rep_rx,
+ .cleanup_rx = mlx5e_cleanup_rep_rx,
+ .init_tx = mlx5e_init_rep_tx,
+ .cleanup_tx = mlx5e_cleanup_rep_tx,
+ .enable = mlx5e_uplink_rep_enable,
+ .disable = mlx5e_uplink_rep_disable,
+ .update_stats = mlx5e_uplink_rep_update_hw_counters,
+ .update_carrier = mlx5e_update_carrier,
+ .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
+ .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
+ .max_tc = MLX5E_MAX_NUM_TC,
+};
+
+/* e-Switch vport representors */
static int
mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
- struct mlx5e_rep_priv *uplink_rpriv;
+ const struct mlx5e_profile *profile;
struct mlx5e_rep_priv *rpriv;
struct net_device *netdev;
- struct mlx5e_priv *upriv;
int nch, err;
rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
if (!rpriv)
return -ENOMEM;
+ /* rpriv->rep to be looked up when profile->init() is called */
+ rpriv->rep = rep;
+
nch = mlx5e_get_max_num_channels(dev);
- netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, nch, rpriv);
+ profile = (rep->vport == FDB_UPLINK_VPORT) ? &mlx5e_uplink_rep_profile : &mlx5e_vf_rep_profile;
+ netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
if (!netdev) {
pr_warn("Failed to create representor netdev for vport %d\n",
rep->vport);
@@ -1305,15 +1720,20 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
}
rpriv->netdev = netdev;
- rpriv->rep = rep;
rep->rep_if[REP_ETH].priv = rpriv;
INIT_LIST_HEAD(&rpriv->vport_sqs_list);
+ if (rep->vport == FDB_UPLINK_VPORT) {
+ err = mlx5e_create_mdev_resources(dev);
+ if (err)
+ goto err_destroy_netdev;
+ }
+
err = mlx5e_attach_netdev(netdev_priv(netdev));
if (err) {
pr_warn("Failed to attach representor netdev for vport %d\n",
rep->vport);
- goto err_destroy_netdev;
+ goto err_destroy_mdev_resources;
}
err = mlx5e_rep_neigh_init(rpriv);
@@ -1323,32 +1743,25 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
goto err_detach_netdev;
}
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH);
- upriv = netdev_priv(uplink_rpriv->netdev);
- err = tc_setup_cb_egdev_register(netdev, mlx5e_rep_setup_tc_cb_egdev,
- upriv);
- if (err)
- goto err_neigh_cleanup;
-
err = register_netdev(netdev);
if (err) {
pr_warn("Failed to register representor netdev for vport %d\n",
rep->vport);
- goto err_egdev_cleanup;
+ goto err_neigh_cleanup;
}
return 0;
-err_egdev_cleanup:
- tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
- upriv);
-
err_neigh_cleanup:
mlx5e_rep_neigh_cleanup(rpriv);
err_detach_netdev:
mlx5e_detach_netdev(netdev_priv(netdev));
+err_destroy_mdev_resources:
+ if (rep->vport == FDB_UPLINK_VPORT)
+ mlx5e_destroy_mdev_resources(dev);
+
err_destroy_netdev:
mlx5e_destroy_netdev(netdev_priv(netdev));
kfree(rpriv);
@@ -1361,18 +1774,13 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_rep_priv *uplink_rpriv;
void *ppriv = priv->ppriv;
- struct mlx5e_priv *upriv;
unregister_netdev(netdev);
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch,
- REP_ETH);
- upriv = netdev_priv(uplink_rpriv->netdev);
- tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
- upriv);
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_detach_netdev(priv);
+ if (rep->vport == FDB_UPLINK_VPORT)
+ mlx5e_destroy_mdev_resources(priv->mdev);
mlx5e_destroy_netdev(priv);
kfree(ppriv); /* mlx5e_rep_priv */
}
@@ -1386,14 +1794,13 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
return rpriv->netdev;
}
-static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
+void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
int total_vfs = MLX5_TOTAL_VPORTS(mdev);
int vport;
- for (vport = 1; vport < total_vfs; vport++) {
+ for (vport = 0; vport < total_vfs; vport++) {
struct mlx5_eswitch_rep_if rep_if = {};
rep_if.load = mlx5e_vport_rep_load;
@@ -1403,55 +1810,12 @@ static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
}
}
-static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv)
+void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
{
- struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int total_vfs = MLX5_TOTAL_VPORTS(mdev);
int vport;
- for (vport = 1; vport < total_vfs; vport++)
+ for (vport = total_vfs - 1; vport >= 0; vport--)
mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH);
}
-
-void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_eswitch *esw = mdev->priv.eswitch;
- struct mlx5_eswitch_rep_if rep_if;
- struct mlx5e_rep_priv *rpriv;
-
- rpriv = priv->ppriv;
- rpriv->netdev = priv->netdev;
-
- rep_if.load = mlx5e_nic_rep_load;
- rep_if.unload = mlx5e_nic_rep_unload;
- rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
- rep_if.priv = rpriv;
- INIT_LIST_HEAD(&rpriv->vport_sqs_list);
- mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH); /* UPLINK PF vport*/
-
- mlx5e_rep_register_vf_vports(priv); /* VFs vports */
-}
-
-void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_eswitch *esw = mdev->priv.eswitch;
-
- mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */
- mlx5_eswitch_unregister_vport_rep(esw, 0, REP_ETH); /* UPLINK PF*/
-}
-
-void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev)
-{
- struct mlx5_eswitch *esw = mdev->priv.eswitch;
- struct mlx5e_rep_priv *rpriv;
-
- rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
- if (!rpriv)
- return NULL;
-
- rpriv->rep = &esw->offloads.vport_reps[0];
- return rpriv;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 844d32d5c29f..edd722824697 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -53,13 +53,33 @@ struct mlx5e_neigh_update_table {
unsigned long min_interval; /* jiffies */
};
+struct mlx5_rep_uplink_priv {
+ /* Filters DB - instantiated by the uplink representor and shared by
+ * the uplink's VFs
+ */
+ struct rhashtable tc_ht;
+
+ /* indirect block callbacks are invoked on bind/unbind events
+ * on registered higher level devices (e.g. tunnel devices)
+ *
+ * tc_indr_block_cb_priv_list is used to lookup indirect callback
+ * private data
+ *
+ * netdevice_nb is the netdev events notifier - used to register
+ * tunnel devices for block events
+ *
+ */
+ struct list_head tc_indr_block_priv_list;
+ struct notifier_block netdevice_nb;
+};
+
struct mlx5e_rep_priv {
struct mlx5_eswitch_rep *rep;
struct mlx5e_neigh_update_table neigh_update;
struct net_device *netdev;
struct mlx5_flow_handle *vport_rx_rule;
struct list_head vport_sqs_list;
- struct rhashtable tc_ht; /* valid for uplink rep */
+ struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
};
static inline
@@ -129,6 +149,8 @@ struct mlx5e_encap_entry {
struct net_device *out_dev;
int tunnel_type;
+ int tunnel_hlen;
+ int reformat_type;
u8 flags;
char *encap_header;
int encap_size;
@@ -140,16 +162,12 @@ struct mlx5e_rep_sq {
};
void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev);
-void mlx5e_register_vport_reps(struct mlx5e_priv *priv);
-void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv);
+void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev);
+void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev);
bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv);
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
-int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, void *sp);
-bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id);
-
-int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
@@ -158,12 +176,17 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e);
void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
+
+bool mlx5e_eswitch_rep(struct net_device *netdev);
+
#else /* CONFIG_MLX5_ESWITCH */
-static inline void mlx5e_register_vport_reps(struct mlx5e_priv *priv) {}
-static inline void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv) {}
static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; }
static inline int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) {}
#endif
+static inline bool mlx5e_is_vport_rep(struct mlx5e_priv *priv)
+{
+ return (MLX5_ESWITCH_MANAGER(priv->mdev) && priv->ppriv);
+}
#endif /* __MLX5E_REP_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 0b5ef6d4e815..1d0bb5ff8c26 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -554,9 +554,9 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
mlx5_cqwq_pop(&cq->wq);
- if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
+ if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
netdev_WARN_ONCE(cq->channel->netdev,
- "Bad OP in ICOSQ CQE: 0x%x\n", cqe->op_own);
+ "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe));
return;
}
@@ -898,7 +898,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
prefetchw(va); /* xdp_frame data area */
prefetch(data);
- if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
rq->stats->wqe_err++;
return NULL;
}
@@ -930,7 +930,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
u16 byte_cnt = cqe_bcnt - headlen;
struct sk_buff *skb;
- if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
rq->stats->wqe_err++;
return NULL;
}
@@ -1154,7 +1154,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi->consumed_strides += cstrides;
- if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
rq->stats->wqe_err++;
goto mpwrq_cqe_out;
}
@@ -1190,7 +1190,6 @@ mpwrq_cqe_out:
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
- struct mlx5e_xdpsq *xdpsq = &rq->xdpsq;
struct mlx5_cqe64 *cqe;
int work_done = 0;
@@ -1221,15 +1220,8 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
out:
- if (xdpsq->doorbell) {
- mlx5e_xmit_xdp_doorbell(xdpsq);
- xdpsq->doorbell = false;
- }
-
- if (xdpsq->redirect_flush) {
- xdp_do_flush_map();
- xdpsq->redirect_flush = false;
- }
+ if (rq->xdp_prog)
+ mlx5e_xdp_rx_poll_complete(rq);
mlx5_cqwq_update_db_record(&cq->wq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 4337afd610d7..d3fe48ff9da9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include "lib/mlx5.h"
#include "en.h"
#include "en_accel/ipsec.h"
#include "en_accel/tls.h"
@@ -480,7 +481,10 @@ static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
-static void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
+#define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
+ (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
+
+void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -488,6 +492,9 @@ static void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
void *out;
+ if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
+ return;
+
MLX5_SET(ppcnt_reg, in, local_port, 1);
out = pstats->IEEE_802_3_counters;
MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
@@ -600,6 +607,9 @@ static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv)
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
void *out;
+ if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
+ return;
+
MLX5_SET(ppcnt_reg, in, local_port, 1);
out = pstats->RFC_2819_counters;
MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
@@ -934,7 +944,7 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
};
static const struct counter_desc pport_pfc_stall_stats_desc[] = {
- { "tx_pause_storm_warning_events ", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
+ { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
{ "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
};
@@ -1075,6 +1085,9 @@ static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
int prio;
void *out;
+ if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
+ return;
+
MLX5_SET(ppcnt_reg, in, local_port, 1);
MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
@@ -1086,13 +1099,13 @@ static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
}
static const struct counter_desc mlx5e_pme_status_desc[] = {
- { "module_unplug", 8 },
+ { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
};
static const struct counter_desc mlx5e_pme_error_desc[] = {
- { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
- { "module_high_temp", 48 }, /* high temperature */
- { "module_bad_shorted", 56 }, /* bad or shorted cable/module */
+ { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
+ { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
+ { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
};
#define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
@@ -1120,15 +1133,17 @@ static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
int idx)
{
- struct mlx5_priv *mlx5_priv = &priv->mdev->priv;
+ struct mlx5_pme_stats pme_stats;
int i;
+ mlx5_get_pme_stats(priv->mdev, &pme_stats);
+
for (i = 0; i < NUM_PME_STATUS_STATS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters,
+ data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
mlx5e_pme_status_desc, i);
for (i = 0; i < NUM_PME_ERR_STATS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters,
+ data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
mlx5e_pme_error_desc, i);
return idx;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 3ff69ddae2d3..fe91ec06e3c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -278,5 +278,6 @@ extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
extern const int mlx5e_num_stats_grps;
void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv);
+void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv);
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9dabe9d4b279..cae6c6d48984 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -44,15 +44,15 @@
#include <net/tc_act/tc_tunnel_key.h>
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_csum.h>
-#include <net/vxlan.h>
#include <net/arp.h>
#include "en.h"
#include "en_rep.h"
#include "en_tc.h"
#include "eswitch.h"
-#include "lib/vxlan.h"
#include "fs_core.h"
#include "en/port.h"
+#include "en/tc_tun.h"
+#include "lib/devcom.h"
struct mlx5_nic_flow_attr {
u32 action;
@@ -69,25 +69,54 @@ struct mlx5_nic_flow_attr {
enum {
MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS,
MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS,
- MLX5E_TC_FLOW_ESWITCH = BIT(MLX5E_TC_FLOW_BASE),
- MLX5E_TC_FLOW_NIC = BIT(MLX5E_TC_FLOW_BASE + 1),
- MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE + 2),
- MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 3),
- MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4),
- MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 5),
+ MLX5E_TC_FLOW_ESWITCH = MLX5E_TC_ESW_OFFLOAD,
+ MLX5E_TC_FLOW_NIC = MLX5E_TC_NIC_OFFLOAD,
+ MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE),
+ MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 1),
+ MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 2),
+ MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 3),
+ MLX5E_TC_FLOW_DUP = BIT(MLX5E_TC_FLOW_BASE + 4),
};
#define MLX5E_TC_MAX_SPLITS 1
+/* Helper struct for accessing a struct containing list_head array.
+ * Containing struct
+ * |- Helper array
+ * [0] Helper item 0
+ * |- list_head item 0
+ * |- index (0)
+ * [1] Helper item 1
+ * |- list_head item 1
+ * |- index (1)
+ * To access the containing struct from one of the list_head items:
+ * 1. Get the helper item from the list_head item using
+ * helper item =
+ * container_of(list_head item, helper struct type, list_head field)
+ * 2. Get the contining struct from the helper item and its index in the array:
+ * containing struct =
+ * container_of(helper item, containing struct type, helper field[index])
+ */
+struct encap_flow_item {
+ struct list_head list;
+ int index;
+};
+
struct mlx5e_tc_flow {
struct rhash_head node;
struct mlx5e_priv *priv;
u64 cookie;
u16 flags;
struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
- struct list_head encap; /* flows sharing the same encap ID */
+ /* Flow can be associated with multiple encap IDs.
+ * The number of encaps is bounded by the number of supported
+ * destinations.
+ */
+ struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
+ struct mlx5e_tc_flow *peer_flow;
struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
struct list_head hairpin; /* flows sharing the same hairpin */
+ struct list_head peer; /* flows with peer flow */
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
@@ -95,11 +124,12 @@ struct mlx5e_tc_flow {
};
struct mlx5e_tc_flow_parse_attr {
- struct ip_tunnel_info tun_info;
+ struct ip_tunnel_info tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
+ struct net_device *filter_dev;
struct mlx5_flow_spec spec;
int num_mod_hdr_actions;
void *mod_hdr_actions;
- int mirred_ifindex;
+ int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
};
#define MLX5E_TC_TABLE_NUM_GROUPS 4
@@ -316,7 +346,7 @@ static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
for (i = 0; i < sz; i++) {
ix = i;
- if (priv->channels.params.rss_hfunc == ETH_RSS_HASH_XOR)
+ if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
ix = mlx5e_bits_invert(i, ilog2(sz));
ix = indirection_rqt[ix];
rqn = hp->pair->rqn[ix];
@@ -360,13 +390,15 @@ static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
void *tirc;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
+
memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
- mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
+ mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
err = mlx5_core_create_tir(hp->func_mdev, in,
MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
@@ -569,7 +601,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct netlink_ext_ack *extack)
{
- int peer_ifindex = parse_attr->mirred_ifindex;
+ int peer_ifindex = parse_attr->mirred_ifindex[0];
struct mlx5_hairpin_params params;
struct mlx5_core_dev *peer_mdev;
struct mlx5e_hairpin_entry *hpe;
@@ -802,7 +834,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
mlx5_del_flow_rules(flow->rule[0]);
mlx5_fc_destroy(priv->mdev, counter);
- if (!mlx5e_tc_num_filters(priv) && priv->fs.tc.t) {
+ if (!mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD) && priv->fs.tc.t) {
mlx5_destroy_flow_table(priv->fs.tc.t);
priv->fs.tc.t = NULL;
}
@@ -815,14 +847,15 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow);
+ struct mlx5e_tc_flow *flow, int out_index);
static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct ip_tunnel_info *tun_info,
struct net_device *mirred_dev,
struct net_device **encap_dev,
struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack);
+ struct netlink_ext_ack *extack,
+ int out_index);
static struct mlx5_flow_handle *
mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
@@ -836,7 +869,7 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
if (IS_ERR(rule))
return rule;
- if (attr->mirror_count) {
+ if (attr->split_count) {
flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
if (IS_ERR(flow->rule[1])) {
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
@@ -855,7 +888,7 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
{
flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
- if (attr->mirror_count)
+ if (attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
@@ -871,7 +904,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- slow_attr->mirror_count = 0;
+ slow_attr->split_count = 0;
slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
@@ -888,7 +921,7 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
{
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- slow_attr->mirror_count = 0;
+ slow_attr->split_count = 0;
slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
flow->flags &= ~MLX5E_TC_FLOW_SLOW;
@@ -909,6 +942,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *out_priv;
int err = 0, encap_err = 0;
+ int out_index;
if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) {
NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
@@ -927,20 +961,27 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
goto err_max_prio_chain;
}
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
+ for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
+ int mirred_ifindex;
+
+ if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
+ continue;
+
+ mirred_ifindex = attr->parse_attr->mirred_ifindex[out_index];
out_dev = __dev_get_by_index(dev_net(priv->netdev),
- attr->parse_attr->mirred_ifindex);
- encap_err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
- out_dev, &encap_dev, flow,
- extack);
- if (encap_err && encap_err != -EAGAIN) {
- err = encap_err;
+ mirred_ifindex);
+ err = mlx5e_attach_encap(priv,
+ &parse_attr->tun_info[out_index],
+ out_dev, &encap_dev, flow,
+ extack, out_index);
+ if (err && err != -EAGAIN)
goto err_attach_encap;
- }
+ if (err == -EAGAIN)
+ encap_err = err;
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
- attr->out_rep[attr->out_count] = rpriv->rep;
- attr->out_mdev[attr->out_count++] = out_priv->mdev;
+ attr->dests[out_index].rep = rpriv->rep;
+ attr->dests[out_index].mdev = out_priv->mdev;
}
err = mlx5_eswitch_add_vlan_action(esw, attr);
@@ -955,7 +996,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
- counter = mlx5_fc_create(esw->dev, true);
+ counter = mlx5_fc_create(attr->counter_dev, true);
if (IS_ERR(counter)) {
err = PTR_ERR(counter);
goto err_create_counter;
@@ -984,15 +1025,16 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
return 0;
err_add_rule:
- mlx5_fc_destroy(esw->dev, counter);
+ mlx5_fc_destroy(attr->counter_dev, counter);
err_create_counter:
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
err_mod_hdr:
mlx5_eswitch_del_vlan_action(esw, attr);
err_add_vlan:
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
- mlx5e_detach_encap(priv, flow);
+ for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
+ if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
+ mlx5e_detach_encap(priv, flow, out_index);
err_attach_encap:
err_max_prio_chain:
return err;
@@ -1004,6 +1046,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5_esw_flow_attr slow_attr;
+ int out_index;
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
if (flow->flags & MLX5E_TC_FLOW_SLOW)
@@ -1014,16 +1057,16 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5_eswitch_del_vlan_action(esw, attr);
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
- mlx5e_detach_encap(priv, flow);
- kvfree(attr->parse_attr);
- }
+ for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
+ if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
+ mlx5e_detach_encap(priv, flow, out_index);
+ kvfree(attr->parse_attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
- mlx5_fc_destroy(esw->dev, attr->counter);
+ mlx5_fc_destroy(attr->counter_dev, attr->counter);
}
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
@@ -1033,10 +1076,12 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr slow_attr, *esw_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
+ struct encap_flow_item *efi;
struct mlx5e_tc_flow *flow;
int err;
- err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
+ err = mlx5_packet_reformat_alloc(priv->mdev,
+ e->reformat_type,
e->encap_size, e->encap_header,
MLX5_FLOW_NAMESPACE_FDB,
&e->encap_id);
@@ -1048,11 +1093,31 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(priv);
- list_for_each_entry(flow, &e->flows, encap) {
+ list_for_each_entry(efi, &e->flows, list) {
+ bool all_flow_encaps_valid = true;
+ int i;
+
+ flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
esw_attr = flow->esw_attr;
- esw_attr->encap_id = e->encap_id;
spec = &esw_attr->parse_attr->spec;
+ esw_attr->dests[efi->index].encap_id = e->encap_id;
+ esw_attr->dests[efi->index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
+ /* Flow can be associated with multiple encap entries.
+ * Before offloading the flow verify that all of them have
+ * a valid neighbour.
+ */
+ for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
+ if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
+ continue;
+ if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
+ all_flow_encaps_valid = false;
+ break;
+ }
+ }
+ /* Do not offload flows with unresolved neighbors */
+ if (!all_flow_encaps_valid)
+ continue;
/* update from slow path rule to encap rule */
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
if (IS_ERR(rule)) {
@@ -1075,14 +1140,18 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr slow_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
+ struct encap_flow_item *efi;
struct mlx5e_tc_flow *flow;
int err;
- list_for_each_entry(flow, &e->flows, encap) {
+ list_for_each_entry(efi, &e->flows, list) {
+ flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
spec = &flow->esw_attr->parse_attr->spec;
/* update from encap rule to slow path rule */
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
+ /* mark the flow's encap dest as non-valid */
+ flow->esw_attr->dests[efi->index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1130,9 +1199,12 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
return;
list_for_each_entry(e, &nhe->encap_list, encap_list) {
+ struct encap_flow_item *efi;
if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
continue;
- list_for_each_entry(flow, &e->flows, encap) {
+ list_for_each_entry(efi, &e->flows, list) {
+ flow = container_of(efi, struct mlx5e_tc_flow,
+ encaps[efi->index]);
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
counter = mlx5e_tc_get_counter(flow);
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
@@ -1162,11 +1234,11 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow, int out_index)
{
- struct list_head *next = flow->encap.next;
+ struct list_head *next = flow->encaps[out_index].list.next;
- list_del(&flow->encap);
+ list_del(&flow->encaps[out_index].list);
if (list_empty(next)) {
struct mlx5e_encap_entry *e;
@@ -1182,49 +1254,55 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
}
}
-static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow)
+static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
{
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
- mlx5e_tc_del_fdb_flow(priv, flow);
- else
- mlx5e_tc_del_nic_flow(priv, flow);
+ struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
+
+ if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
+ !(flow->flags & MLX5E_TC_FLOW_DUP))
+ return;
+
+ mutex_lock(&esw->offloads.peer_mutex);
+ list_del(&flow->peer);
+ mutex_unlock(&esw->offloads.peer_mutex);
+
+ flow->flags &= ~MLX5E_TC_FLOW_DUP;
+
+ mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
+ kvfree(flow->peer_flow);
+ flow->peer_flow = NULL;
}
-static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
- struct tc_cls_flower_offload *f)
+static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
{
- void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers);
- void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
- outer_headers);
- void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- misc_parameters);
- void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
- misc_parameters);
+ struct mlx5_core_dev *dev = flow->priv->mdev;
+ struct mlx5_devcom *devcom = dev->priv.devcom;
+ struct mlx5_eswitch *peer_esw;
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
+ peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ if (!peer_esw)
+ return;
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
- struct flow_dissector_key_keyid *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- f->key);
- struct flow_dissector_key_keyid *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- f->mask);
- MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
- be32_to_cpu(mask->keyid));
- MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
- be32_to_cpu(key->keyid));
+ __mlx5e_tc_del_fdb_peer_flow(flow);
+ mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+}
+
+static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
+ mlx5e_tc_del_fdb_peer_flow(flow);
+ mlx5e_tc_del_fdb_flow(priv, flow);
+ } else {
+ mlx5e_tc_del_nic_flow(priv, flow);
}
}
+
static int parse_tunnel_attr(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
- struct tc_cls_flower_offload *f)
+ struct tc_cls_flower_offload *f,
+ struct net_device *filter_dev)
{
struct netlink_ext_ack *extack = f->common.extack;
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
@@ -1236,48 +1314,14 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL,
f->key);
+ int err = 0;
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
- struct flow_dissector_key_ports *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_PORTS,
- f->key);
- struct flow_dissector_key_ports *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_PORTS,
- f->mask);
-
- /* Full udp dst port must be given */
- if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
- goto vxlan_match_offload_err;
-
- if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst)) &&
- MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
- parse_vxlan_attr(spec, f);
- else {
- NL_SET_ERR_MSG_MOD(extack,
- "port isn't an offloaded vxlan udp dport");
- netdev_warn(priv->netdev,
- "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
- return -EOPNOTSUPP;
- }
-
- MLX5_SET(fte_match_set_lyr_2_4, headers_c,
- udp_dport, ntohs(mask->dst));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- udp_dport, ntohs(key->dst));
-
- MLX5_SET(fte_match_set_lyr_2_4, headers_c,
- udp_sport, ntohs(mask->src));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- udp_sport, ntohs(key->src));
- } else { /* udp dst port must be given */
-vxlan_match_offload_err:
+ err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
+ headers_c, headers_v);
+ if (err) {
NL_SET_ERR_MSG_MOD(extack,
- "IP tunnel decap offload supported only for vxlan, must set UDP dport");
- netdev_warn(priv->netdev,
- "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
- return -EOPNOTSUPP;
+ "failed to parse tunnel attributes");
+ return err;
}
if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
@@ -1381,6 +1425,7 @@ vxlan_match_offload_err:
static int __parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
+ struct net_device *filter_dev,
u8 *match_level)
{
struct netlink_ext_ack *extack = f->common.extack;
@@ -1432,7 +1477,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
switch (key->addr_type) {
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
- if (parse_tunnel_attr(priv, spec, f))
+ if (parse_tunnel_attr(priv, spec, f, filter_dev))
return -EOPNOTSUPP;
break;
default:
@@ -1774,7 +1819,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
static int parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
- struct tc_cls_flower_offload *f)
+ struct tc_cls_flower_offload *f,
+ struct net_device *filter_dev)
{
struct netlink_ext_ack *extack = f->common.extack;
struct mlx5_core_dev *dev = priv->mdev;
@@ -1784,7 +1830,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
u8 match_level;
int err;
- err = __parse_cls_flower(priv, spec, f, &match_level);
+ err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level);
if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
rep = rpriv->rep;
@@ -2137,7 +2183,6 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
{
const struct tc_action *a;
bool modify_ip_header;
- LIST_HEAD(actions);
u8 htype, ip_proto;
void *headers_v;
u16 ethertype;
@@ -2226,7 +2271,6 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
{
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
const struct tc_action *a;
- LIST_HEAD(actions);
u32 action = 0;
int err, i;
@@ -2269,7 +2313,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
same_hw_devs(priv, netdev_priv(peer_dev))) {
- parse_attr->mirred_ifindex = peer_dev->ifindex;
+ parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
@@ -2318,45 +2362,6 @@ static inline int hash_encap_info(struct ip_tunnel_key *key)
return jhash(key, sizeof(*key), 0);
}
-static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
- struct net_device **out_dev,
- struct flowi4 *fl4,
- struct neighbour **out_n,
- u8 *out_ttl)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_rep_priv *uplink_rpriv;
- struct rtable *rt;
- struct neighbour *n = NULL;
-
-#if IS_ENABLED(CONFIG_INET)
- int ret;
-
- rt = ip_route_output_key(dev_net(mirred_dev), fl4);
- ret = PTR_ERR_OR_ZERO(rt);
- if (ret)
- return ret;
-#else
- return -EOPNOTSUPP;
-#endif
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- /* if the egress device isn't on the same HW e-switch, we use the uplink */
- if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
- *out_dev = uplink_rpriv->netdev;
- else
- *out_dev = rt->dst.dev;
-
- if (!(*out_ttl))
- *out_ttl = ip4_dst_hoplimit(&rt->dst);
- n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
- ip_rt_put(rt);
- if (!n)
- return -ENOMEM;
-
- *out_n = n;
- return 0;
-}
static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
struct net_device *peer_netdev)
@@ -2372,377 +2377,24 @@ static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
(peer_priv->mdev->priv.eswitch->mode == SRIOV_OFFLOADS));
}
-static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
- struct net_device **out_dev,
- struct flowi6 *fl6,
- struct neighbour **out_n,
- u8 *out_ttl)
-{
- struct neighbour *n = NULL;
- struct dst_entry *dst;
-
-#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
- struct mlx5e_rep_priv *uplink_rpriv;
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- int ret;
-
- ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
- fl6);
- if (ret < 0)
- return ret;
-
- if (!(*out_ttl))
- *out_ttl = ip6_dst_hoplimit(dst);
-
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- /* if the egress device isn't on the same HW e-switch, we use the uplink */
- if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
- *out_dev = uplink_rpriv->netdev;
- else
- *out_dev = dst->dev;
-#else
- return -EOPNOTSUPP;
-#endif
-
- n = dst_neigh_lookup(dst, &fl6->daddr);
- dst_release(dst);
- if (!n)
- return -ENOMEM;
-
- *out_n = n;
- return 0;
-}
-
-static void gen_vxlan_header_ipv4(struct net_device *out_dev,
- char buf[], int encap_size,
- unsigned char h_dest[ETH_ALEN],
- u8 tos, u8 ttl,
- __be32 daddr,
- __be32 saddr,
- __be16 udp_dst_port,
- __be32 vx_vni)
-{
- struct ethhdr *eth = (struct ethhdr *)buf;
- struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
- struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
- struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
-
- memset(buf, 0, encap_size);
-
- ether_addr_copy(eth->h_dest, h_dest);
- ether_addr_copy(eth->h_source, out_dev->dev_addr);
- eth->h_proto = htons(ETH_P_IP);
-
- ip->daddr = daddr;
- ip->saddr = saddr;
-
- ip->tos = tos;
- ip->ttl = ttl;
- ip->protocol = IPPROTO_UDP;
- ip->version = 0x4;
- ip->ihl = 0x5;
-
- udp->dest = udp_dst_port;
- vxh->vx_flags = VXLAN_HF_VNI;
- vxh->vx_vni = vxlan_vni_field(vx_vni);
-}
-
-static void gen_vxlan_header_ipv6(struct net_device *out_dev,
- char buf[], int encap_size,
- unsigned char h_dest[ETH_ALEN],
- u8 tos, u8 ttl,
- struct in6_addr *daddr,
- struct in6_addr *saddr,
- __be16 udp_dst_port,
- __be32 vx_vni)
-{
- struct ethhdr *eth = (struct ethhdr *)buf;
- struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
- struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
- struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
-
- memset(buf, 0, encap_size);
-
- ether_addr_copy(eth->h_dest, h_dest);
- ether_addr_copy(eth->h_source, out_dev->dev_addr);
- eth->h_proto = htons(ETH_P_IPV6);
-
- ip6_flow_hdr(ip6h, tos, 0);
- /* the HW fills up ipv6 payload len */
- ip6h->nexthdr = IPPROTO_UDP;
- ip6h->hop_limit = ttl;
- ip6h->daddr = *daddr;
- ip6h->saddr = *saddr;
-
- udp->dest = udp_dst_port;
- vxh->vx_flags = VXLAN_HF_VNI;
- vxh->vx_vni = vxlan_vni_field(vx_vni);
-}
-
-static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
- struct mlx5e_encap_entry *e)
-{
- int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
- int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
- struct ip_tunnel_key *tun_key = &e->tun_info.key;
- struct net_device *out_dev;
- struct neighbour *n = NULL;
- struct flowi4 fl4 = {};
- u8 nud_state, tos, ttl;
- char *encap_header;
- int err;
- if (max_encap_size < ipv4_encap_size) {
- mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
- ipv4_encap_size, max_encap_size);
- return -EOPNOTSUPP;
- }
-
- encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
- if (!encap_header)
- return -ENOMEM;
-
- switch (e->tunnel_type) {
- case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
- fl4.flowi4_proto = IPPROTO_UDP;
- fl4.fl4_dport = tun_key->tp_dst;
- break;
- default:
- err = -EOPNOTSUPP;
- goto free_encap;
- }
-
- tos = tun_key->tos;
- ttl = tun_key->ttl;
-
- fl4.flowi4_tos = tun_key->tos;
- fl4.daddr = tun_key->u.ipv4.dst;
- fl4.saddr = tun_key->u.ipv4.src;
-
- err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
- &fl4, &n, &ttl);
- if (err)
- goto free_encap;
-
- /* used by mlx5e_detach_encap to lookup a neigh hash table
- * entry in the neigh hash table when a user deletes a rule
- */
- e->m_neigh.dev = n->dev;
- e->m_neigh.family = n->ops->family;
- memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
- e->out_dev = out_dev;
-
- /* It's importent to add the neigh to the hash table before checking
- * the neigh validity state. So if we'll get a notification, in case the
- * neigh changes it's validity state, we would find the relevant neigh
- * in the hash.
- */
- err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
- if (err)
- goto free_encap;
-
- read_lock_bh(&n->lock);
- nud_state = n->nud_state;
- ether_addr_copy(e->h_dest, n->ha);
- read_unlock_bh(&n->lock);
-
- switch (e->tunnel_type) {
- case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
- gen_vxlan_header_ipv4(out_dev, encap_header,
- ipv4_encap_size, e->h_dest, tos, ttl,
- fl4.daddr,
- fl4.saddr, tun_key->tp_dst,
- tunnel_id_to_key32(tun_key->tun_id));
- break;
- default:
- err = -EOPNOTSUPP;
- goto destroy_neigh_entry;
- }
- e->encap_size = ipv4_encap_size;
- e->encap_header = encap_header;
-
- if (!(nud_state & NUD_VALID)) {
- neigh_event_send(n, NULL);
- err = -EAGAIN;
- goto out;
- }
-
- err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
- ipv4_encap_size, encap_header,
- MLX5_FLOW_NAMESPACE_FDB,
- &e->encap_id);
- if (err)
- goto destroy_neigh_entry;
-
- e->flags |= MLX5_ENCAP_ENTRY_VALID;
- mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
- neigh_release(n);
- return err;
-
-destroy_neigh_entry:
- mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
-free_encap:
- kfree(encap_header);
-out:
- if (n)
- neigh_release(n);
- return err;
-}
-
-static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
- struct mlx5e_encap_entry *e)
-{
- int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
- int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
- struct ip_tunnel_key *tun_key = &e->tun_info.key;
- struct net_device *out_dev;
- struct neighbour *n = NULL;
- struct flowi6 fl6 = {};
- u8 nud_state, tos, ttl;
- char *encap_header;
- int err;
-
- if (max_encap_size < ipv6_encap_size) {
- mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
- ipv6_encap_size, max_encap_size);
- return -EOPNOTSUPP;
- }
-
- encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
- if (!encap_header)
- return -ENOMEM;
-
- switch (e->tunnel_type) {
- case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
- fl6.flowi6_proto = IPPROTO_UDP;
- fl6.fl6_dport = tun_key->tp_dst;
- break;
- default:
- err = -EOPNOTSUPP;
- goto free_encap;
- }
-
- tos = tun_key->tos;
- ttl = tun_key->ttl;
-
- fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
- fl6.daddr = tun_key->u.ipv6.dst;
- fl6.saddr = tun_key->u.ipv6.src;
-
- err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
- &fl6, &n, &ttl);
- if (err)
- goto free_encap;
-
- /* used by mlx5e_detach_encap to lookup a neigh hash table
- * entry in the neigh hash table when a user deletes a rule
- */
- e->m_neigh.dev = n->dev;
- e->m_neigh.family = n->ops->family;
- memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
- e->out_dev = out_dev;
-
- /* It's importent to add the neigh to the hash table before checking
- * the neigh validity state. So if we'll get a notification, in case the
- * neigh changes it's validity state, we would find the relevant neigh
- * in the hash.
- */
- err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
- if (err)
- goto free_encap;
-
- read_lock_bh(&n->lock);
- nud_state = n->nud_state;
- ether_addr_copy(e->h_dest, n->ha);
- read_unlock_bh(&n->lock);
-
- switch (e->tunnel_type) {
- case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
- gen_vxlan_header_ipv6(out_dev, encap_header,
- ipv6_encap_size, e->h_dest, tos, ttl,
- &fl6.daddr,
- &fl6.saddr, tun_key->tp_dst,
- tunnel_id_to_key32(tun_key->tun_id));
- break;
- default:
- err = -EOPNOTSUPP;
- goto destroy_neigh_entry;
- }
-
- e->encap_size = ipv6_encap_size;
- e->encap_header = encap_header;
-
- if (!(nud_state & NUD_VALID)) {
- neigh_event_send(n, NULL);
- err = -EAGAIN;
- goto out;
- }
-
- err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
- ipv6_encap_size, encap_header,
- MLX5_FLOW_NAMESPACE_FDB,
- &e->encap_id);
- if (err)
- goto destroy_neigh_entry;
-
- e->flags |= MLX5_ENCAP_ENTRY_VALID;
- mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
- neigh_release(n);
- return err;
-
-destroy_neigh_entry:
- mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
-free_encap:
- kfree(encap_header);
-out:
- if (n)
- neigh_release(n);
- return err;
-}
static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct ip_tunnel_info *tun_info,
struct net_device *mirred_dev,
struct net_device **encap_dev,
struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
+ struct netlink_ext_ack *extack,
+ int out_index)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
unsigned short family = ip_tunnel_info_af(tun_info);
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct ip_tunnel_key *key = &tun_info->key;
struct mlx5e_encap_entry *e;
- int tunnel_type, err = 0;
uintptr_t hash_key;
bool found = false;
-
- /* udp dst port must be set */
- if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
- goto vxlan_encap_offload_err;
-
- /* setting udp src port isn't supported */
- if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
-vxlan_encap_offload_err:
- NL_SET_ERR_MSG_MOD(extack,
- "must set udp dst port and not set udp src port");
- netdev_warn(priv->netdev,
- "must set udp dst port and not set udp src port\n");
- return -EOPNOTSUPP;
- }
-
- if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->tp_dst)) &&
- MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
- tunnel_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
- } else {
- NL_SET_ERR_MSG_MOD(extack,
- "port isn't an offloaded vxlan udp dport");
- netdev_warn(priv->netdev,
- "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
- return -EOPNOTSUPP;
- }
+ int err = 0;
hash_key = hash_encap_info(key);
@@ -2763,13 +2415,16 @@ vxlan_encap_offload_err:
return -ENOMEM;
e->tun_info = *tun_info;
- e->tunnel_type = tunnel_type;
+ err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
+ if (err)
+ goto out_err;
+
INIT_LIST_HEAD(&e->flows);
if (family == AF_INET)
- err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
+ err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
else if (family == AF_INET6)
- err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
+ err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
if (err && err != -EAGAIN)
goto out_err;
@@ -2777,12 +2432,15 @@ vxlan_encap_offload_err:
hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
attach_flow:
- list_add(&flow->encap, &e->flows);
+ list_add(&flow->encaps[out_index].list, &e->flows);
+ flow->encaps[out_index].index = out_index;
*encap_dev = e->out_dev;
- if (e->flags & MLX5_ENCAP_ENTRY_VALID)
- attr->encap_id = e->encap_id;
- else
+ if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
+ attr->dests[out_index].encap_id = e->encap_id;
+ attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
+ } else {
err = -EAGAIN;
+ }
return err;
@@ -2851,7 +2509,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct ip_tunnel_info *info = NULL;
const struct tc_action *a;
- LIST_HEAD(actions);
bool encap = false;
u32 action = 0;
int err, i;
@@ -2876,7 +2533,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return err;
action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- attr->mirror_count = attr->out_count;
+ attr->split_count = attr->out_count;
continue;
}
@@ -2894,6 +2551,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct net_device *out_dev;
out_dev = tcf_mirred_dev(a);
+ if (!out_dev) {
+ /* out_dev is NULL when filters with
+ * non-existing mirred device are replayed to
+ * the driver.
+ */
+ return -EINVAL;
+ }
if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
NL_SET_ERR_MSG_MOD(extack,
@@ -2903,23 +2567,47 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EOPNOTSUPP;
}
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
if (switchdev_port_same_parent_id(priv->netdev,
out_dev) ||
is_merged_eswitch_dev(priv, out_dev)) {
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
+ struct net_device *uplink_upper = netdev_master_upper_dev_get(uplink_dev);
+
+ if (uplink_upper &&
+ netif_is_lag_master(uplink_upper) &&
+ uplink_upper == out_dev)
+ out_dev = uplink_dev;
+
+ if (!mlx5e_eswitch_rep(out_dev))
+ return -EOPNOTSUPP;
+
out_priv = netdev_priv(out_dev);
rpriv = out_priv->ppriv;
- attr->out_rep[attr->out_count] = rpriv->rep;
- attr->out_mdev[attr->out_count++] = out_priv->mdev;
+ attr->dests[attr->out_count].rep = rpriv->rep;
+ attr->dests[attr->out_count].mdev = out_priv->mdev;
+ attr->out_count++;
} else if (encap) {
- parse_attr->mirred_ifindex = out_dev->ifindex;
- parse_attr->tun_info = *info;
+ parse_attr->mirred_ifindex[attr->out_count] =
+ out_dev->ifindex;
+ parse_attr->tun_info[attr->out_count] = *info;
+ encap = false;
attr->parse_attr = parse_attr;
- action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- /* attr->out_rep is resolved when we handle encap */
+ attr->dests[attr->out_count].flags |=
+ MLX5_ESW_DEST_ENCAP;
+ attr->out_count++;
+ /* attr->dests[].rep is resolved when we
+ * handle encap
+ */
+ } else if (parse_attr->filter_dev != priv->netdev) {
+ /* All mlx5 devices are called to configure
+ * high level device filters. Therefore, the
+ * *attempt* to install a filter on invalid
+ * eswitch should not trigger an explicit error
+ */
+ return -EINVAL;
} else {
NL_SET_ERR_MSG_MOD(extack,
"devices are not on same switch HW, can't offload forwarding");
@@ -2936,7 +2624,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
encap = true;
else
return -EOPNOTSUPP;
- attr->mirror_count = attr->out_count;
continue;
}
@@ -2946,7 +2633,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (err)
return err;
- attr->mirror_count = attr->out_count;
+ attr->split_count = attr->out_count;
continue;
}
@@ -2988,7 +2675,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
}
- if (attr->mirror_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
+ if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
NL_SET_ERR_MSG_MOD(extack,
"current firmware doesn't support split rule for port mirroring");
netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
@@ -3007,6 +2694,11 @@ static void get_flags(int flags, u16 *flow_flags)
if (flags & MLX5E_TC_EGRESS)
__flow_flags |= MLX5E_TC_FLOW_EGRESS;
+ if (flags & MLX5E_TC_ESW_OFFLOAD)
+ __flow_flags |= MLX5E_TC_FLOW_ESWITCH;
+ if (flags & MLX5E_TC_NIC_OFFLOAD)
+ __flow_flags |= MLX5E_TC_FLOW_NIC;
+
*flow_flags = __flow_flags;
}
@@ -3017,18 +2709,32 @@ static const struct rhashtable_params tc_ht_params = {
.automatic_shrinking = true,
};
-static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv)
+static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, int flags)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *uplink_rpriv;
- if (MLX5_VPORT_MANAGER(priv->mdev) && esw->mode == SRIOV_OFFLOADS) {
+ if (flags & MLX5E_TC_ESW_OFFLOAD) {
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- return &uplink_rpriv->tc_ht;
- } else
+ return &uplink_rpriv->uplink_priv.tc_ht;
+ } else /* NIC offload */
return &priv->fs.tc.ht;
}
+static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ bool is_rep_ingress = attr->in_rep->vport != FDB_UPLINK_VPORT &&
+ flow->flags & MLX5E_TC_FLOW_INGRESS;
+ bool act_is_encap = !!(attr->action &
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
+ bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
+ MLX5_DEVCOM_ESW_OFFLOADS);
+
+ return esw_paired && mlx5_lag_is_sriov(attr->in_mdev) &&
+ (is_rep_ingress || act_is_encap);
+}
+
static int
mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
struct tc_cls_flower_offload *f, u16 flow_flags,
@@ -3050,10 +2756,6 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
flow->flags = flow_flags;
flow->priv = priv;
- err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
- if (err)
- goto err_free;
-
*__flow = flow;
*__parse_attr = parse_attr;
@@ -3066,12 +2768,16 @@ err_free:
}
static int
-mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f,
- u16 flow_flags,
- struct mlx5e_tc_flow **__flow)
+__mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
+ struct tc_cls_flower_offload *f,
+ u16 flow_flags,
+ struct net_device *filter_dev,
+ struct mlx5_eswitch_rep *in_rep,
+ struct mlx5_core_dev *in_mdev,
+ struct mlx5e_tc_flow **__flow)
{
struct netlink_ext_ack *extack = f->common.extack;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_tc_flow *flow;
int attr_size, err;
@@ -3082,6 +2788,12 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
&parse_attr, &flow);
if (err)
goto out;
+ parse_attr->filter_dev = filter_dev;
+ flow->esw_attr->parse_attr = parse_attr;
+ err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
+ f, filter_dev);
+ if (err)
+ goto err_free;
flow->esw_attr->chain = f->common.chain_index;
flow->esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16;
@@ -3089,14 +2801,19 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
+ flow->esw_attr->in_rep = in_rep;
+ flow->esw_attr->in_mdev = in_mdev;
+
+ if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
+ MLX5_COUNTER_SOURCE_ESWITCH)
+ flow->esw_attr->counter_dev = in_mdev;
+ else
+ flow->esw_attr->counter_dev = priv->mdev;
+
err = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow, extack);
if (err)
goto err_free;
- if (!(flow->esw_attr->action &
- MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT))
- kvfree(parse_attr);
-
*__flow = flow;
return 0;
@@ -3108,10 +2825,92 @@ out:
return err;
}
+static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f,
+ struct mlx5e_tc_flow *flow)
+{
+ struct mlx5e_priv *priv = flow->priv, *peer_priv;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
+ struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5e_rep_priv *peer_urpriv;
+ struct mlx5e_tc_flow *peer_flow;
+ struct mlx5_core_dev *in_mdev;
+ int err = 0;
+
+ peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ if (!peer_esw)
+ return -ENODEV;
+
+ peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
+ peer_priv = netdev_priv(peer_urpriv->netdev);
+
+ /* in_mdev is assigned of which the packet originated from.
+ * So packets redirected to uplink use the same mdev of the
+ * original flow and packets redirected from uplink use the
+ * peer mdev.
+ */
+ if (flow->esw_attr->in_rep->vport == FDB_UPLINK_VPORT)
+ in_mdev = peer_priv->mdev;
+ else
+ in_mdev = priv->mdev;
+
+ parse_attr = flow->esw_attr->parse_attr;
+ err = __mlx5e_add_fdb_flow(peer_priv, f, flow->flags,
+ parse_attr->filter_dev,
+ flow->esw_attr->in_rep, in_mdev, &peer_flow);
+ if (err)
+ goto out;
+
+ flow->peer_flow = peer_flow;
+ flow->flags |= MLX5E_TC_FLOW_DUP;
+ mutex_lock(&esw->offloads.peer_mutex);
+ list_add_tail(&flow->peer, &esw->offloads.peer_flows);
+ mutex_unlock(&esw->offloads.peer_mutex);
+
+out:
+ mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ return err;
+}
+
+static int
+mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
+ struct tc_cls_flower_offload *f,
+ u16 flow_flags,
+ struct net_device *filter_dev,
+ struct mlx5e_tc_flow **__flow)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *in_rep = rpriv->rep;
+ struct mlx5_core_dev *in_mdev = priv->mdev;
+ struct mlx5e_tc_flow *flow;
+ int err;
+
+ err = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
+ in_mdev, &flow);
+ if (err)
+ goto out;
+
+ if (is_peer_flow_needed(flow)) {
+ err = mlx5e_tc_add_fdb_peer_flow(f, flow);
+ if (err) {
+ mlx5e_tc_del_fdb_flow(priv, flow);
+ goto out;
+ }
+ }
+
+ *__flow = flow;
+
+ return 0;
+
+out:
+ return err;
+}
+
static int
mlx5e_add_nic_flow(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f,
u16 flow_flags,
+ struct net_device *filter_dev,
struct mlx5e_tc_flow **__flow)
{
struct netlink_ext_ack *extack = f->common.extack;
@@ -3130,6 +2929,12 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
if (err)
goto out;
+ parse_attr->filter_dev = filter_dev;
+ err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
+ f, filter_dev);
+ if (err)
+ goto err_free;
+
err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow, extack);
if (err)
goto err_free;
@@ -3155,6 +2960,7 @@ static int
mlx5e_tc_add_flow(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f,
int flags,
+ struct net_device *filter_dev,
struct mlx5e_tc_flow **flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -3167,18 +2973,20 @@ mlx5e_tc_add_flow(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
if (esw && esw->mode == SRIOV_OFFLOADS)
- err = mlx5e_add_fdb_flow(priv, f, flow_flags, flow);
+ err = mlx5e_add_fdb_flow(priv, f, flow_flags,
+ filter_dev, flow);
else
- err = mlx5e_add_nic_flow(priv, f, flow_flags, flow);
+ err = mlx5e_add_nic_flow(priv, f, flow_flags,
+ filter_dev, flow);
return err;
}
-int mlx5e_configure_flower(struct mlx5e_priv *priv,
+int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f, int flags)
{
struct netlink_ext_ack *extack = f->common.extack;
- struct rhashtable *tc_ht = get_tc_ht(priv);
+ struct rhashtable *tc_ht = get_tc_ht(priv, flags);
struct mlx5e_tc_flow *flow;
int err = 0;
@@ -3192,7 +3000,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
goto out;
}
- err = mlx5e_tc_add_flow(priv, f, flags, &flow);
+ err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
if (err)
goto out;
@@ -3220,10 +3028,10 @@ static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
return false;
}
-int mlx5e_delete_flower(struct mlx5e_priv *priv,
+int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f, int flags)
{
- struct rhashtable *tc_ht = get_tc_ht(priv);
+ struct rhashtable *tc_ht = get_tc_ht(priv, flags);
struct mlx5e_tc_flow *flow;
flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
@@ -3239,10 +3047,12 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
return 0;
}
-int mlx5e_stats_flower(struct mlx5e_priv *priv,
+int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f, int flags)
{
- struct rhashtable *tc_ht = get_tc_ht(priv);
+ struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
+ struct rhashtable *tc_ht = get_tc_ht(priv, flags);
+ struct mlx5_eswitch *peer_esw;
struct mlx5e_tc_flow *flow;
struct mlx5_fc *counter;
u64 bytes;
@@ -3262,6 +3072,27 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
+ peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ if (!peer_esw)
+ goto out;
+
+ if ((flow->flags & MLX5E_TC_FLOW_DUP) &&
+ (flow->peer_flow->flags & MLX5E_TC_FLOW_OFFLOADED)) {
+ u64 bytes2;
+ u64 packets2;
+ u64 lastuse2;
+
+ counter = mlx5e_tc_get_counter(flow->peer_flow);
+ mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
+
+ bytes += bytes2;
+ packets += packets2;
+ lastuse = max_t(u64, lastuse, lastuse2);
+ }
+
+ mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+
+out:
tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
return 0;
@@ -3350,7 +3181,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
if (tc->netdevice_nb.notifier_call)
unregister_netdevice_notifier(&tc->netdevice_nb);
- rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
+ rhashtable_destroy(&tc->ht);
if (!IS_ERR_OR_NULL(tc->t)) {
mlx5_destroy_flow_table(tc->t);
@@ -3368,9 +3199,17 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
}
-int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
+int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags)
{
- struct rhashtable *tc_ht = get_tc_ht(priv);
+ struct rhashtable *tc_ht = get_tc_ht(priv, flags);
return atomic_read(&tc_ht->nelems);
}
+
+void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
+{
+ struct mlx5e_tc_flow *flow, *tmp;
+
+ list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
+ __mlx5e_tc_del_fdb_peer_flow(flow);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 49436bf3b80a..d2d87f978c06 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -42,7 +42,9 @@
enum {
MLX5E_TC_INGRESS = BIT(0),
MLX5E_TC_EGRESS = BIT(1),
- MLX5E_TC_LAST_EXPORTED_BIT = 1,
+ MLX5E_TC_NIC_OFFLOAD = BIT(2),
+ MLX5E_TC_ESW_OFFLOAD = BIT(3),
+ MLX5E_TC_LAST_EXPORTED_BIT = 3,
};
int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
@@ -51,12 +53,12 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
-int mlx5e_configure_flower(struct mlx5e_priv *priv,
+int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f, int flags);
-int mlx5e_delete_flower(struct mlx5e_priv *priv,
+int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f, int flags);
-int mlx5e_stats_flower(struct mlx5e_priv *priv,
+int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f, int flags);
struct mlx5e_encap_entry;
@@ -68,12 +70,13 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
struct mlx5e_neigh_hash_entry;
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
-int mlx5e_tc_num_filters(struct mlx5e_priv *priv);
+int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags);
+
#else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
-static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) { return 0; }
+static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags) { return 0; }
#endif
#endif /* __MLX5_EN_TC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 6dacaeba2fbf..598ad7e4d5c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -127,7 +127,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
else
#endif
if (skb_vlan_tag_present(skb))
- up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
+ up = skb_vlan_tag_get_prio(skb);
/* channel_ix can be larger than num_channels since
* dev->num_real_tx_queues = num_channels * num_tc
@@ -459,9 +459,10 @@ static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
u32 ci = mlx5_cqwq_get_ci(&sq->cq.wq);
netdev_err(sq->channel->netdev,
- "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
- sq->cq.mcq.cqn, ci, sq->sqn, err_cqe->syndrome,
- err_cqe->vendor_err_synd);
+ "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
+ sq->cq.mcq.cqn, ci, sq->sqn,
+ get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
+ err_cqe->syndrome, err_cqe->vendor_err_synd);
mlx5_dump_err_cqe(sq->cq.mdev, err_cqe);
}
@@ -507,7 +508,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
wqe_counter = be16_to_cpu(cqe->wqe_counter);
- if (unlikely(cqe->op_own >> 4 == MLX5_CQE_REQ_ERR)) {
+ if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
&sq->state)) {
mlx5e_dump_error_cqe(sq,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 85d517360157..b4af5e19f6ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -76,6 +76,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
napi);
struct mlx5e_ch_stats *ch_stats = c->stats;
+ struct mlx5e_rq *rq = &c->rq;
bool busy = false;
int work_done = 0;
int i;
@@ -85,17 +86,17 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
for (i = 0; i < c->num_tc; i++)
busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
- busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
+ busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq, NULL);
if (c->xdp)
- busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq);
+ busy |= mlx5e_poll_xdpsq_cq(&rq->xdpsq.cq, rq);
if (likely(budget)) { /* budget=0 means: don't poll rx rings */
- work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
+ work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
busy |= work_done == budget;
}
- busy |= c->rq.post_wqes(&c->rq);
+ busy |= c->rq.post_wqes(rq);
if (busy) {
if (likely(mlx5e_channel_no_affinity_change(c)))
@@ -115,9 +116,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
mlx5e_cq_arm(&c->sq[i].cq);
}
- mlx5e_handle_rx_dim(&c->rq);
+ mlx5e_handle_rx_dim(rq);
- mlx5e_cq_arm(&c->rq.cq);
+ mlx5e_cq_arm(&rq->cq);
mlx5e_cq_arm(&c->icosq.cq);
mlx5e_cq_arm(&c->xdpsq.cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index c1e1a16a9b07..ee04aab65a9f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -31,20 +31,22 @@
*/
#include <linux/interrupt.h>
+#include <linux/notifier.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/eq.h>
#include <linux/mlx5/cmd.h>
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
#include "mlx5_core.h"
+#include "lib/eq.h"
#include "fpga/core.h"
#include "eswitch.h"
#include "lib/clock.h"
#include "diag/fw_tracer.h"
enum {
- MLX5_EQE_SIZE = sizeof(struct mlx5_eqe),
MLX5_EQE_OWNER_INIT_VAL = 0x1,
};
@@ -55,14 +57,32 @@ enum {
};
enum {
- MLX5_NUM_SPARE_EQE = 0x80,
- MLX5_NUM_ASYNC_EQE = 0x1000,
- MLX5_NUM_CMD_EQE = 32,
- MLX5_NUM_PF_DRAIN = 64,
+ MLX5_EQ_DOORBEL_OFFSET = 0x40,
};
-enum {
- MLX5_EQ_DOORBEL_OFFSET = 0x40,
+struct mlx5_irq_info {
+ cpumask_var_t mask;
+ char name[MLX5_MAX_IRQ_NAME];
+ void *context; /* dev_id provided to request_irq */
+};
+
+struct mlx5_eq_table {
+ struct list_head comp_eqs_list;
+ struct mlx5_eq pages_eq;
+ struct mlx5_eq cmd_eq;
+ struct mlx5_eq async_eq;
+
+ struct atomic_notifier_head nh[MLX5_EVENT_TYPE_MAX];
+
+ /* Since CQ DB is stored in async_eq */
+ struct mlx5_nb cq_err_nb;
+
+ struct mutex lock; /* sync async eqs creations */
+ int num_comp_vectors;
+ struct mlx5_irq_info *irq_info;
+#ifdef CONFIG_RFS_ACCEL
+ struct cpu_rmap *rmap;
+#endif
};
#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
@@ -78,17 +98,6 @@ enum {
(1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
(1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
-struct map_eq_in {
- u64 mask;
- u32 reserved;
- u32 unmap_eqn;
-};
-
-struct cre_des_eq {
- u8 reserved[15];
- u8 eqn;
-};
-
static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
{
u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
@@ -99,213 +108,56 @@ static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
-{
- return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
-}
-
-static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
-{
- struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
-
- return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
-}
-
-static const char *eqe_type_str(u8 type)
-{
- switch (type) {
- case MLX5_EVENT_TYPE_COMP:
- return "MLX5_EVENT_TYPE_COMP";
- case MLX5_EVENT_TYPE_PATH_MIG:
- return "MLX5_EVENT_TYPE_PATH_MIG";
- case MLX5_EVENT_TYPE_COMM_EST:
- return "MLX5_EVENT_TYPE_COMM_EST";
- case MLX5_EVENT_TYPE_SQ_DRAINED:
- return "MLX5_EVENT_TYPE_SQ_DRAINED";
- case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
- return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
- case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
- return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
- case MLX5_EVENT_TYPE_CQ_ERROR:
- return "MLX5_EVENT_TYPE_CQ_ERROR";
- case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
- return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
- case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
- return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
- case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
- return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
- case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
- return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
- case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
- return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
- case MLX5_EVENT_TYPE_INTERNAL_ERROR:
- return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
- case MLX5_EVENT_TYPE_PORT_CHANGE:
- return "MLX5_EVENT_TYPE_PORT_CHANGE";
- case MLX5_EVENT_TYPE_GPIO_EVENT:
- return "MLX5_EVENT_TYPE_GPIO_EVENT";
- case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
- return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
- case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
- return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT";
- case MLX5_EVENT_TYPE_REMOTE_CONFIG:
- return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
- case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
- return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
- case MLX5_EVENT_TYPE_STALL_EVENT:
- return "MLX5_EVENT_TYPE_STALL_EVENT";
- case MLX5_EVENT_TYPE_CMD:
- return "MLX5_EVENT_TYPE_CMD";
- case MLX5_EVENT_TYPE_PAGE_REQUEST:
- return "MLX5_EVENT_TYPE_PAGE_REQUEST";
- case MLX5_EVENT_TYPE_PAGE_FAULT:
- return "MLX5_EVENT_TYPE_PAGE_FAULT";
- case MLX5_EVENT_TYPE_PPS_EVENT:
- return "MLX5_EVENT_TYPE_PPS_EVENT";
- case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
- return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
- case MLX5_EVENT_TYPE_FPGA_ERROR:
- return "MLX5_EVENT_TYPE_FPGA_ERROR";
- case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
- return "MLX5_EVENT_TYPE_FPGA_QP_ERROR";
- case MLX5_EVENT_TYPE_GENERAL_EVENT:
- return "MLX5_EVENT_TYPE_GENERAL_EVENT";
- case MLX5_EVENT_TYPE_DEVICE_TRACER:
- return "MLX5_EVENT_TYPE_DEVICE_TRACER";
- default:
- return "Unrecognized event";
- }
-}
-
-static enum mlx5_dev_event port_subtype_event(u8 subtype)
-{
- switch (subtype) {
- case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
- return MLX5_DEV_EVENT_PORT_DOWN;
- case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
- return MLX5_DEV_EVENT_PORT_UP;
- case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
- return MLX5_DEV_EVENT_PORT_INITIALIZED;
- case MLX5_PORT_CHANGE_SUBTYPE_LID:
- return MLX5_DEV_EVENT_LID_CHANGE;
- case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
- return MLX5_DEV_EVENT_PKEY_CHANGE;
- case MLX5_PORT_CHANGE_SUBTYPE_GUID:
- return MLX5_DEV_EVENT_GUID_CHANGE;
- case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
- return MLX5_DEV_EVENT_CLIENT_REREG;
- }
- return -1;
-}
-
-static void eq_update_ci(struct mlx5_eq *eq, int arm)
+/* caller must eventually call mlx5_cq_put on the returned cq */
+static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
{
- __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
- u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
-
- __raw_writel((__force u32)cpu_to_be32(val), addr);
- /* We still want ordering, just not swabbing, so add a barrier */
- mb();
-}
+ struct mlx5_cq_table *table = &eq->cq_table;
+ struct mlx5_core_cq *cq = NULL;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-static void eqe_pf_action(struct work_struct *work)
-{
- struct mlx5_pagefault *pfault = container_of(work,
- struct mlx5_pagefault,
- work);
- struct mlx5_eq *eq = pfault->eq;
+ spin_lock(&table->lock);
+ cq = radix_tree_lookup(&table->tree, cqn);
+ if (likely(cq))
+ mlx5_cq_hold(cq);
+ spin_unlock(&table->lock);
- mlx5_core_page_fault(eq->dev, pfault);
- mempool_free(pfault, eq->pf_ctx.pool);
+ return cq;
}
-static void eq_pf_process(struct mlx5_eq *eq)
+static irqreturn_t mlx5_eq_comp_int(int irq, void *eq_ptr)
{
- struct mlx5_core_dev *dev = eq->dev;
- struct mlx5_eqe_page_fault *pf_eqe;
- struct mlx5_pagefault *pfault;
+ struct mlx5_eq_comp *eq_comp = eq_ptr;
+ struct mlx5_eq *eq = eq_ptr;
struct mlx5_eqe *eqe;
int set_ci = 0;
+ u32 cqn = -1;
while ((eqe = next_eqe_sw(eq))) {
- pfault = mempool_alloc(eq->pf_ctx.pool, GFP_ATOMIC);
- if (!pfault) {
- schedule_work(&eq->pf_ctx.work);
- break;
- }
-
+ struct mlx5_core_cq *cq;
+ /* Make sure we read EQ entry contents after we've
+ * checked the ownership bit.
+ */
dma_rmb();
- pf_eqe = &eqe->data.page_fault;
- pfault->event_subtype = eqe->sub_type;
- pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
-
- mlx5_core_dbg(dev,
- "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
- eqe->sub_type, pfault->bytes_committed);
-
- switch (eqe->sub_type) {
- case MLX5_PFAULT_SUBTYPE_RDMA:
- /* RDMA based event */
- pfault->type =
- be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
- pfault->token =
- be32_to_cpu(pf_eqe->rdma.pftype_token) &
- MLX5_24BIT_MASK;
- pfault->rdma.r_key =
- be32_to_cpu(pf_eqe->rdma.r_key);
- pfault->rdma.packet_size =
- be16_to_cpu(pf_eqe->rdma.packet_length);
- pfault->rdma.rdma_op_len =
- be32_to_cpu(pf_eqe->rdma.rdma_op_len);
- pfault->rdma.rdma_va =
- be64_to_cpu(pf_eqe->rdma.rdma_va);
- mlx5_core_dbg(dev,
- "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
- pfault->type, pfault->token,
- pfault->rdma.r_key);
- mlx5_core_dbg(dev,
- "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
- pfault->rdma.rdma_op_len,
- pfault->rdma.rdma_va);
- break;
-
- case MLX5_PFAULT_SUBTYPE_WQE:
- /* WQE based event */
- pfault->type =
- (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
- pfault->token =
- be32_to_cpu(pf_eqe->wqe.token);
- pfault->wqe.wq_num =
- be32_to_cpu(pf_eqe->wqe.pftype_wq) &
- MLX5_24BIT_MASK;
- pfault->wqe.wqe_index =
- be16_to_cpu(pf_eqe->wqe.wqe_index);
- pfault->wqe.packet_size =
- be16_to_cpu(pf_eqe->wqe.packet_length);
- mlx5_core_dbg(dev,
- "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
- pfault->type, pfault->token,
- pfault->wqe.wq_num,
- pfault->wqe.wqe_index);
- break;
-
- default:
- mlx5_core_warn(dev,
- "Unsupported page fault event sub-type: 0x%02hhx\n",
- eqe->sub_type);
- /* Unsupported page faults should still be
- * resolved by the page fault handler
- */
+ /* Assume (eqe->type) is always MLX5_EVENT_TYPE_COMP */
+ cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
+
+ cq = mlx5_eq_cq_get(eq, cqn);
+ if (likely(cq)) {
+ ++cq->arm_sn;
+ cq->comp(cq);
+ mlx5_cq_put(cq);
+ } else {
+ mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
}
- pfault->eq = eq;
- INIT_WORK(&pfault->work, eqe_pf_action);
- queue_work(eq->pf_ctx.wq, &pfault->work);
-
++eq->cons_index;
++set_ci;
+ /* The HCA will think the queue has overflowed if we
+ * don't tell it we've been processing events. We
+ * create our EQs with MLX5_NUM_SPARE_EQE extra
+ * entries, so we must update our consumer index at
+ * least that often.
+ */
if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
eq_update_ci(eq, 0);
set_ci = 0;
@@ -313,165 +165,41 @@ static void eq_pf_process(struct mlx5_eq *eq)
}
eq_update_ci(eq, 1);
-}
-static irqreturn_t mlx5_eq_pf_int(int irq, void *eq_ptr)
-{
- struct mlx5_eq *eq = eq_ptr;
- unsigned long flags;
-
- if (spin_trylock_irqsave(&eq->pf_ctx.lock, flags)) {
- eq_pf_process(eq);
- spin_unlock_irqrestore(&eq->pf_ctx.lock, flags);
- } else {
- schedule_work(&eq->pf_ctx.work);
- }
+ if (cqn != -1)
+ tasklet_schedule(&eq_comp->tasklet_ctx.task);
return IRQ_HANDLED;
}
-/* mempool_refill() was proposed but unfortunately wasn't accepted
- * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
- * Chip workaround.
+/* Some architectures don't latch interrupts when they are disabled, so using
+ * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
+ * avoid losing them. It is not recommended to use it, unless this is the last
+ * resort.
*/
-static void mempool_refill(mempool_t *pool)
-{
- while (pool->curr_nr < pool->min_nr)
- mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
-}
-
-static void eq_pf_action(struct work_struct *work)
-{
- struct mlx5_eq *eq = container_of(work, struct mlx5_eq, pf_ctx.work);
-
- mempool_refill(eq->pf_ctx.pool);
-
- spin_lock_irq(&eq->pf_ctx.lock);
- eq_pf_process(eq);
- spin_unlock_irq(&eq->pf_ctx.lock);
-}
-
-static int init_pf_ctx(struct mlx5_eq_pagefault *pf_ctx, const char *name)
-{
- spin_lock_init(&pf_ctx->lock);
- INIT_WORK(&pf_ctx->work, eq_pf_action);
-
- pf_ctx->wq = alloc_ordered_workqueue(name,
- WQ_MEM_RECLAIM);
- if (!pf_ctx->wq)
- return -ENOMEM;
-
- pf_ctx->pool = mempool_create_kmalloc_pool
- (MLX5_NUM_PF_DRAIN, sizeof(struct mlx5_pagefault));
- if (!pf_ctx->pool)
- goto err_wq;
-
- return 0;
-err_wq:
- destroy_workqueue(pf_ctx->wq);
- return -ENOMEM;
-}
-
-int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
- u32 wq_num, u8 type, int error)
-{
- u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0};
-
- MLX5_SET(page_fault_resume_in, in, opcode,
- MLX5_CMD_OP_PAGE_FAULT_RESUME);
- MLX5_SET(page_fault_resume_in, in, error, !!error);
- MLX5_SET(page_fault_resume_in, in, page_fault_type, type);
- MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
- MLX5_SET(page_fault_resume_in, in, token, token);
-
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
-#endif
-
-static void general_event_handler(struct mlx5_core_dev *dev,
- struct mlx5_eqe *eqe)
-{
- switch (eqe->sub_type) {
- case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
- if (dev->event)
- dev->event(dev, MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT, 0);
- break;
- default:
- mlx5_core_dbg(dev, "General event with unrecognized subtype: sub_type %d\n",
- eqe->sub_type);
- }
-}
-
-static void mlx5_temp_warning_event(struct mlx5_core_dev *dev,
- struct mlx5_eqe *eqe)
-{
- u64 value_lsb;
- u64 value_msb;
-
- value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb);
- value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
-
- mlx5_core_warn(dev,
- "High temperature on sensors with bit set %llx %llx",
- value_msb, value_lsb);
-}
-
-/* caller must eventually call mlx5_cq_put on the returned cq */
-static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
-{
- struct mlx5_cq_table *table = &eq->cq_table;
- struct mlx5_core_cq *cq = NULL;
-
- spin_lock(&table->lock);
- cq = radix_tree_lookup(&table->tree, cqn);
- if (likely(cq))
- mlx5_cq_hold(cq);
- spin_unlock(&table->lock);
-
- return cq;
-}
-
-static void mlx5_eq_cq_completion(struct mlx5_eq *eq, u32 cqn)
-{
- struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
-
- if (unlikely(!cq)) {
- mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
- return;
- }
-
- ++cq->arm_sn;
-
- cq->comp(cq);
-
- mlx5_cq_put(cq);
-}
-
-static void mlx5_eq_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type)
+u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
{
- struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
-
- if (unlikely(!cq)) {
- mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
- return;
- }
+ u32 count_eqe;
- cq->event(cq, event_type);
+ disable_irq(eq->core.irqn);
+ count_eqe = eq->core.cons_index;
+ mlx5_eq_comp_int(eq->core.irqn, eq);
+ count_eqe = eq->core.cons_index - count_eqe;
+ enable_irq(eq->core.irqn);
- mlx5_cq_put(cq);
+ return count_eqe;
}
-static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
+static irqreturn_t mlx5_eq_async_int(int irq, void *eq_ptr)
{
struct mlx5_eq *eq = eq_ptr;
- struct mlx5_core_dev *dev = eq->dev;
+ struct mlx5_eq_table *eqt;
+ struct mlx5_core_dev *dev;
struct mlx5_eqe *eqe;
int set_ci = 0;
- u32 cqn = -1;
- u32 rsn;
- u8 port;
+
+ dev = eq->dev;
+ eqt = dev->priv.eq_table;
while ((eqe = next_eqe_sw(eq))) {
/*
@@ -480,116 +208,12 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
*/
dma_rmb();
- mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
- eq->eqn, eqe_type_str(eqe->type));
- switch (eqe->type) {
- case MLX5_EVENT_TYPE_COMP:
- cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
- mlx5_eq_cq_completion(eq, cqn);
- break;
- case MLX5_EVENT_TYPE_DCT_DRAINED:
- rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
- rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
- mlx5_rsc_event(dev, rsn, eqe->type);
- break;
- case MLX5_EVENT_TYPE_PATH_MIG:
- case MLX5_EVENT_TYPE_COMM_EST:
- case MLX5_EVENT_TYPE_SQ_DRAINED:
- case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
- case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
- case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
- case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
- case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
- rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
- rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
- mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n",
- eqe_type_str(eqe->type), eqe->type, rsn);
- mlx5_rsc_event(dev, rsn, eqe->type);
- break;
-
- case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
- case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
- rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
- mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
- eqe_type_str(eqe->type), eqe->type, rsn);
- mlx5_srq_event(dev, rsn, eqe->type);
- break;
-
- case MLX5_EVENT_TYPE_CMD:
- mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
- break;
+ if (likely(eqe->type < MLX5_EVENT_TYPE_MAX))
+ atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
+ else
+ mlx5_core_warn_once(dev, "notifier_call_chain is not setup for eqe: %d\n", eqe->type);
- case MLX5_EVENT_TYPE_PORT_CHANGE:
- port = (eqe->data.port.port >> 4) & 0xf;
- switch (eqe->sub_type) {
- case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
- case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
- case MLX5_PORT_CHANGE_SUBTYPE_LID:
- case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
- case MLX5_PORT_CHANGE_SUBTYPE_GUID:
- case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
- case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
- if (dev->event)
- dev->event(dev, port_subtype_event(eqe->sub_type),
- (unsigned long)port);
- break;
- default:
- mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
- port, eqe->sub_type);
- }
- break;
- case MLX5_EVENT_TYPE_CQ_ERROR:
- cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
- mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
- cqn, eqe->data.cq_err.syndrome);
- mlx5_eq_cq_event(eq, cqn, eqe->type);
- break;
-
- case MLX5_EVENT_TYPE_PAGE_REQUEST:
- {
- u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
- s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
-
- mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
- func_id, npages);
- mlx5_core_req_pages_handler(dev, func_id, npages);
- }
- break;
-
- case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
- mlx5_eswitch_vport_event(dev->priv.eswitch, eqe);
- break;
-
- case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
- mlx5_port_module_event(dev, eqe);
- break;
-
- case MLX5_EVENT_TYPE_PPS_EVENT:
- mlx5_pps_event(dev, eqe);
- break;
-
- case MLX5_EVENT_TYPE_FPGA_ERROR:
- case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
- mlx5_fpga_event(dev, eqe->type, &eqe->data.raw);
- break;
-
- case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
- mlx5_temp_warning_event(dev, eqe);
- break;
-
- case MLX5_EVENT_TYPE_GENERAL_EVENT:
- general_event_handler(dev, eqe);
- break;
-
- case MLX5_EVENT_TYPE_DEVICE_TRACER:
- mlx5_fw_tracer_event(dev, eqe);
- break;
-
- default:
- mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
- eqe->type, eq->eqn);
- break;
- }
+ atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
++eq->cons_index;
++set_ci;
@@ -608,30 +232,9 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
eq_update_ci(eq, 1);
- if (cqn != -1)
- tasklet_schedule(&eq->tasklet_ctx.task);
-
return IRQ_HANDLED;
}
-/* Some architectures don't latch interrupts when they are disabled, so using
- * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
- * avoid losing them. It is not recommended to use it, unless this is the last
- * resort.
- */
-u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq)
-{
- u32 count_eqe;
-
- disable_irq(eq->irqn);
- count_eqe = eq->cons_index;
- mlx5_eq_int(eq->irqn, eq);
- count_eqe = eq->cons_index - count_eqe;
- enable_irq(eq->irqn);
-
- return count_eqe;
-}
-
static void init_eq_buf(struct mlx5_eq *eq)
{
struct mlx5_eqe *eqe;
@@ -643,39 +246,35 @@ static void init_eq_buf(struct mlx5_eq *eq)
}
}
-int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
- int nent, u64 mask, const char *name,
- enum mlx5_eq_type type)
+static int
+create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, const char *name,
+ struct mlx5_eq_param *param)
{
+ struct mlx5_eq_table *eq_table = dev->priv.eq_table;
struct mlx5_cq_table *cq_table = &eq->cq_table;
u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
struct mlx5_priv *priv = &dev->priv;
- irq_handler_t handler;
+ u8 vecidx = param->index;
__be64 *pas;
void *eqc;
int inlen;
u32 *in;
int err;
+ if (eq_table->irq_info[vecidx].context)
+ return -EEXIST;
+
/* Init CQ table */
memset(cq_table, 0, sizeof(*cq_table));
spin_lock_init(&cq_table->lock);
INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
- eq->type = type;
- eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
+ eq->nent = roundup_pow_of_two(param->nent + MLX5_NUM_SPARE_EQE);
eq->cons_index = 0;
err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
if (err)
return err;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (type == MLX5_EQ_TYPE_PF)
- handler = mlx5_eq_pf_int;
- else
-#endif
- handler = mlx5_eq_int;
-
init_eq_buf(eq);
inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
@@ -691,7 +290,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
mlx5_fill_page_array(&eq->buf, pas);
MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
- MLX5_SET64(create_eq_in, in, event_bitmask, mask);
+ MLX5_SET64(create_eq_in, in, event_bitmask, param->mask);
eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
@@ -704,15 +303,17 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
if (err)
goto err_in;
- snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
+ snprintf(eq_table->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
name, pci_name(dev->pdev));
+ eq_table->irq_info[vecidx].context = param->context;
+ eq->vecidx = vecidx;
eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
eq->irqn = pci_irq_vector(dev->pdev, vecidx);
eq->dev = dev;
eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
- err = request_irq(eq->irqn, handler, 0,
- priv->irq_info[vecidx].name, eq);
+ err = request_irq(eq->irqn, param->handler, 0,
+ eq_table->irq_info[vecidx].name, param->context);
if (err)
goto err_eq;
@@ -720,21 +321,6 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
if (err)
goto err_irq;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (type == MLX5_EQ_TYPE_PF) {
- err = init_pf_ctx(&eq->pf_ctx, name);
- if (err)
- goto err_irq;
- } else
-#endif
- {
- INIT_LIST_HEAD(&eq->tasklet_ctx.list);
- INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
- spin_lock_init(&eq->tasklet_ctx.lock);
- tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
- (unsigned long)&eq->tasklet_ctx);
- }
-
/* EQs are created in ARMED state
*/
eq_update_ci(eq, 1);
@@ -756,27 +342,25 @@ err_buf:
return err;
}
-int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
+static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
+ struct mlx5_eq_table *eq_table = dev->priv.eq_table;
+ struct mlx5_irq_info *irq_info;
int err;
+ irq_info = &eq_table->irq_info[eq->vecidx];
+
mlx5_debug_eq_remove(dev, eq);
- free_irq(eq->irqn, eq);
+
+ free_irq(eq->irqn, irq_info->context);
+ irq_info->context = NULL;
+
err = mlx5_cmd_destroy_eq(dev, eq->eqn);
if (err)
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
eq->eqn);
synchronize_irq(eq->irqn);
- if (eq->type == MLX5_EQ_TYPE_COMP) {
- tasklet_disable(&eq->tasklet_ctx.task);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- } else if (eq->type == MLX5_EQ_TYPE_PF) {
- cancel_work_sync(&eq->pf_ctx.work);
- destroy_workqueue(eq->pf_ctx.wq);
- mempool_destroy(eq->pf_ctx.pool);
-#endif
- }
mlx5_buf_free(dev, &eq->buf);
return err;
@@ -816,28 +400,106 @@ int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
return 0;
}
-int mlx5_eq_init(struct mlx5_core_dev *dev)
+int mlx5_eq_table_init(struct mlx5_core_dev *dev)
{
- int err;
+ struct mlx5_eq_table *eq_table;
+ int i, err;
- spin_lock_init(&dev->priv.eq_table.lock);
+ eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL);
+ if (!eq_table)
+ return -ENOMEM;
+
+ dev->priv.eq_table = eq_table;
err = mlx5_eq_debugfs_init(dev);
+ if (err)
+ goto kvfree_eq_table;
+
+ mutex_init(&eq_table->lock);
+ for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
+ ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
+ return 0;
+
+kvfree_eq_table:
+ kvfree(eq_table);
+ dev->priv.eq_table = NULL;
return err;
}
-void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
+void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
{
mlx5_eq_debugfs_cleanup(dev);
+ kvfree(dev->priv.eq_table);
}
-int mlx5_start_eqs(struct mlx5_core_dev *dev)
+/* Async EQs */
+
+static int create_async_eq(struct mlx5_core_dev *dev, const char *name,
+ struct mlx5_eq *eq, struct mlx5_eq_param *param)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
- u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
+ struct mlx5_eq_table *eq_table = dev->priv.eq_table;
+ int err;
+
+ mutex_lock(&eq_table->lock);
+ if (param->index >= MLX5_EQ_MAX_ASYNC_EQS) {
+ err = -ENOSPC;
+ goto unlock;
+ }
+
+ err = create_map_eq(dev, eq, name, param);
+unlock:
+ mutex_unlock(&eq_table->lock);
+ return err;
+}
+
+static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
+{
+ struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int err;
+ mutex_lock(&eq_table->lock);
+ err = destroy_unmap_eq(dev, eq);
+ mutex_unlock(&eq_table->lock);
+ return err;
+}
+
+static int cq_err_event_notifier(struct notifier_block *nb,
+ unsigned long type, void *data)
+{
+ struct mlx5_eq_table *eqt;
+ struct mlx5_core_cq *cq;
+ struct mlx5_eqe *eqe;
+ struct mlx5_eq *eq;
+ u32 cqn;
+
+ /* type == MLX5_EVENT_TYPE_CQ_ERROR */
+
+ eqt = mlx5_nb_cof(nb, struct mlx5_eq_table, cq_err_nb);
+ eq = &eqt->async_eq;
+ eqe = data;
+
+ cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
+ mlx5_core_warn(eq->dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
+ cqn, eqe->data.cq_err.syndrome);
+
+ cq = mlx5_eq_cq_get(eq, cqn);
+ if (unlikely(!cq)) {
+ mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
+ return NOTIFY_OK;
+ }
+
+ cq->event(cq, type);
+
+ mlx5_cq_put(cq);
+
+ return NOTIFY_OK;
+}
+
+static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
+{
+ u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
+
if (MLX5_VPORT_MANAGER(dev))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
@@ -865,127 +527,521 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
- err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
- MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
- "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC);
+ if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
+
+ return async_event_mask;
+}
+
+static int create_async_eqs(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_eq_param param = {};
+ int err;
+
+ MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
+ mlx5_eq_notifier_register(dev, &table->cq_err_nb);
+
+ param = (struct mlx5_eq_param) {
+ .index = MLX5_EQ_CMD_IDX,
+ .mask = 1ull << MLX5_EVENT_TYPE_CMD,
+ .nent = MLX5_NUM_CMD_EQE,
+ .context = &table->cmd_eq,
+ .handler = mlx5_eq_async_int,
+ };
+ err = create_async_eq(dev, "mlx5_cmd_eq", &table->cmd_eq, &param);
if (err) {
mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
- return err;
+ goto err0;
}
mlx5_cmd_use_events(dev);
- err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
- MLX5_NUM_ASYNC_EQE, async_event_mask,
- "mlx5_async_eq", MLX5_EQ_TYPE_ASYNC);
+ param = (struct mlx5_eq_param) {
+ .index = MLX5_EQ_ASYNC_IDX,
+ .mask = gather_async_events_mask(dev),
+ .nent = MLX5_NUM_ASYNC_EQE,
+ .context = &table->async_eq,
+ .handler = mlx5_eq_async_int,
+ };
+ err = create_async_eq(dev, "mlx5_async_eq", &table->async_eq, &param);
if (err) {
mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
goto err1;
}
- err = mlx5_create_map_eq(dev, &table->pages_eq,
- MLX5_EQ_VEC_PAGES,
- /* TODO: sriov max_vf + */ 1,
- 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
- MLX5_EQ_TYPE_ASYNC);
+ param = (struct mlx5_eq_param) {
+ .index = MLX5_EQ_PAGEREQ_IDX,
+ .mask = 1 << MLX5_EVENT_TYPE_PAGE_REQUEST,
+ .nent = /* TODO: sriov max_vf + */ 1,
+ .context = &table->pages_eq,
+ .handler = mlx5_eq_async_int,
+ };
+ err = create_async_eq(dev, "mlx5_pages_eq", &table->pages_eq, &param);
if (err) {
mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
goto err2;
}
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (MLX5_CAP_GEN(dev, pg)) {
- err = mlx5_create_map_eq(dev, &table->pfault_eq,
- MLX5_EQ_VEC_PFAULT,
- MLX5_NUM_ASYNC_EQE,
- 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
- "mlx5_page_fault_eq",
- MLX5_EQ_TYPE_PF);
- if (err) {
- mlx5_core_warn(dev, "failed to create page fault EQ %d\n",
- err);
- goto err3;
- }
- }
-
- return err;
-err3:
- mlx5_destroy_unmap_eq(dev, &table->pages_eq);
-#else
return err;
-#endif
err2:
- mlx5_destroy_unmap_eq(dev, &table->async_eq);
+ destroy_async_eq(dev, &table->async_eq);
err1:
mlx5_cmd_use_polling(dev);
- mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
+ destroy_async_eq(dev, &table->cmd_eq);
+err0:
+ mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
return err;
}
-void mlx5_stop_eqs(struct mlx5_core_dev *dev)
+static void destroy_async_eqs(struct mlx5_core_dev *dev)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
+ struct mlx5_eq_table *table = dev->priv.eq_table;
int err;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (MLX5_CAP_GEN(dev, pg)) {
- err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);
- if (err)
- mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n",
- err);
- }
-#endif
-
- err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
+ err = destroy_async_eq(dev, &table->pages_eq);
if (err)
mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
err);
- err = mlx5_destroy_unmap_eq(dev, &table->async_eq);
+ err = destroy_async_eq(dev, &table->async_eq);
if (err)
mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
err);
+
mlx5_cmd_use_polling(dev);
- err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
+ err = destroy_async_eq(dev, &table->cmd_eq);
if (err)
mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
err);
+
+ mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
}
-int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
- u32 *out, int outlen)
+struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
{
- u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0};
+ return &dev->priv.eq_table->async_eq;
+}
- MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
- MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev)
+{
+ synchronize_irq(dev->priv.eq_table->async_eq.irqn);
+}
+
+void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev)
+{
+ synchronize_irq(dev->priv.eq_table->cmd_eq.irqn);
+}
+
+/* Generic EQ API for mlx5_core consumers
+ * Needed For RDMA ODP EQ for now
+ */
+struct mlx5_eq *
+mlx5_eq_create_generic(struct mlx5_core_dev *dev, const char *name,
+ struct mlx5_eq_param *param)
+{
+ struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
+ int err;
+
+ if (!eq)
+ return ERR_PTR(-ENOMEM);
+
+ err = create_async_eq(dev, name, eq, param);
+ if (err) {
+ kvfree(eq);
+ eq = ERR_PTR(err);
+ }
+
+ return eq;
+}
+EXPORT_SYMBOL(mlx5_eq_create_generic);
+
+int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
+{
+ int err;
+
+ if (IS_ERR(eq))
+ return -EINVAL;
+
+ err = destroy_async_eq(dev, eq);
+ if (err)
+ goto out;
+
+ kvfree(eq);
+out:
+ return err;
+}
+EXPORT_SYMBOL(mlx5_eq_destroy_generic);
+
+struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc)
+{
+ u32 ci = eq->cons_index + cc;
+ struct mlx5_eqe *eqe;
+
+ eqe = get_eqe(eq, ci & (eq->nent - 1));
+ eqe = ((eqe->owner & 1) ^ !!(ci & eq->nent)) ? NULL : eqe;
+ /* Make sure we read EQ entry contents after we've
+ * checked the ownership bit.
+ */
+ if (eqe)
+ dma_rmb();
+
+ return eqe;
+}
+EXPORT_SYMBOL(mlx5_eq_get_eqe);
+
+void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
+{
+ __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
+ u32 val;
+
+ eq->cons_index += cc;
+ val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
+
+ __raw_writel((__force u32)cpu_to_be32(val), addr);
+ /* We still want ordering, just not swabbing, so add a barrier */
+ mb();
+}
+EXPORT_SYMBOL(mlx5_eq_update_ci);
+
+/* Completion EQs */
+
+static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
+ int irq = pci_irq_vector(mdev->pdev, vecidx);
+ struct mlx5_irq_info *irq_info = &priv->eq_table->irq_info[vecidx];
+
+ if (!zalloc_cpumask_var(&irq_info->mask, GFP_KERNEL)) {
+ mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
+ return -ENOMEM;
+ }
+
+ cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
+ irq_info->mask);
+
+ if (IS_ENABLED(CONFIG_SMP) &&
+ irq_set_affinity_hint(irq, irq_info->mask))
+ mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
+
+ return 0;
+}
+
+static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+ int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
+ struct mlx5_priv *priv = &mdev->priv;
+ int irq = pci_irq_vector(mdev->pdev, vecidx);
+ struct mlx5_irq_info *irq_info = &priv->eq_table->irq_info[vecidx];
+
+ irq_set_affinity_hint(irq, NULL);
+ free_cpumask_var(irq_info->mask);
+}
+
+static int set_comp_irq_affinity_hints(struct mlx5_core_dev *mdev)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < mdev->priv.eq_table->num_comp_vectors; i++) {
+ err = set_comp_irq_affinity_hint(mdev, i);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ for (i--; i >= 0; i--)
+ clear_comp_irq_affinity_hint(mdev, i);
+
+ return err;
+}
+
+static void clear_comp_irqs_affinity_hints(struct mlx5_core_dev *mdev)
+{
+ int i;
+
+ for (i = 0; i < mdev->priv.eq_table->num_comp_vectors; i++)
+ clear_comp_irq_affinity_hint(mdev, i);
+}
+
+static void destroy_comp_eqs(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_eq_comp *eq, *n;
+
+ clear_comp_irqs_affinity_hints(dev);
+
+#ifdef CONFIG_RFS_ACCEL
+ if (table->rmap) {
+ free_irq_cpu_rmap(table->rmap);
+ table->rmap = NULL;
+ }
+#endif
+ list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
+ list_del(&eq->list);
+ if (destroy_unmap_eq(dev, &eq->core))
+ mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
+ eq->core.eqn);
+ tasklet_disable(&eq->tasklet_ctx.task);
+ kfree(eq);
+ }
+}
+
+static int create_comp_eqs(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ char name[MLX5_MAX_IRQ_NAME];
+ struct mlx5_eq_comp *eq;
+ int ncomp_vec;
+ int nent;
+ int err;
+ int i;
+
+ INIT_LIST_HEAD(&table->comp_eqs_list);
+ ncomp_vec = table->num_comp_vectors;
+ nent = MLX5_COMP_EQ_SIZE;
+#ifdef CONFIG_RFS_ACCEL
+ table->rmap = alloc_irq_cpu_rmap(ncomp_vec);
+ if (!table->rmap)
+ return -ENOMEM;
+#endif
+ for (i = 0; i < ncomp_vec; i++) {
+ int vecidx = i + MLX5_EQ_VEC_COMP_BASE;
+ struct mlx5_eq_param param = {};
+
+ eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+ if (!eq) {
+ err = -ENOMEM;
+ goto clean;
+ }
+
+ INIT_LIST_HEAD(&eq->tasklet_ctx.list);
+ INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
+ spin_lock_init(&eq->tasklet_ctx.lock);
+ tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
+ (unsigned long)&eq->tasklet_ctx);
+
+#ifdef CONFIG_RFS_ACCEL
+ irq_cpu_rmap_add(table->rmap, pci_irq_vector(dev->pdev, vecidx));
+#endif
+ snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
+ param = (struct mlx5_eq_param) {
+ .index = vecidx,
+ .mask = 0,
+ .nent = nent,
+ .context = &eq->core,
+ .handler = mlx5_eq_comp_int
+ };
+ err = create_map_eq(dev, &eq->core, name, &param);
+ if (err) {
+ kfree(eq);
+ goto clean;
+ }
+ mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
+ /* add tail, to keep the list ordered, for mlx5_vector2eqn to work */
+ list_add_tail(&eq->list, &table->comp_eqs_list);
+ }
+
+ err = set_comp_irq_affinity_hints(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to alloc affinity hint cpumask\n");
+ goto clean;
+ }
+
+ return 0;
+
+clean:
+ destroy_comp_eqs(dev);
+ return err;
+}
+
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
+ unsigned int *irqn)
+{
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_eq_comp *eq, *n;
+ int err = -ENOENT;
+ int i = 0;
+
+ list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
+ if (i++ == vector) {
+ *eqn = eq->core.eqn;
+ *irqn = eq->core.irqn;
+ err = 0;
+ break;
+ }
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_vector2eqn);
+
+unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
+{
+ return dev->priv.eq_table->num_comp_vectors;
+}
+EXPORT_SYMBOL(mlx5_comp_vectors_count);
+
+struct cpumask *
+mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
+{
+ /* TODO: consider irq_get_affinity_mask(irq) */
+ return dev->priv.eq_table->irq_info[vector + MLX5_EQ_VEC_COMP_BASE].mask;
+}
+EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
+
+struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ return dev->priv.eq_table->rmap;
+#else
+ return NULL;
+#endif
+}
+
+struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
+{
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_eq_comp *eq;
+
+ list_for_each_entry(eq, &table->comp_eqs_list, list) {
+ if (eq->core.eqn == eqn)
+ return eq;
+ }
+
+ return ERR_PTR(-ENOENT);
}
/* This function should only be called after mlx5_cmd_force_teardown_hca */
void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
- struct mlx5_eq *eq;
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ int i, max_eqs;
+
+ clear_comp_irqs_affinity_hints(dev);
#ifdef CONFIG_RFS_ACCEL
- if (dev->rmap) {
- free_irq_cpu_rmap(dev->rmap);
- dev->rmap = NULL;
+ if (table->rmap) {
+ free_irq_cpu_rmap(table->rmap);
+ table->rmap = NULL;
}
#endif
- list_for_each_entry(eq, &table->comp_eqs_list, list)
- free_irq(eq->irqn, eq);
-
- free_irq(table->pages_eq.irqn, &table->pages_eq);
- free_irq(table->async_eq.irqn, &table->async_eq);
- free_irq(table->cmd_eq.irqn, &table->cmd_eq);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (MLX5_CAP_GEN(dev, pg))
- free_irq(table->pfault_eq.irqn, &table->pfault_eq);
-#endif
+
+ mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
+ max_eqs = table->num_comp_vectors + MLX5_EQ_VEC_COMP_BASE;
+ for (i = max_eqs - 1; i >= 0; i--) {
+ if (!table->irq_info[i].context)
+ continue;
+ free_irq(pci_irq_vector(dev->pdev, i), table->irq_info[i].context);
+ table->irq_info[i].context = NULL;
+ }
+ mutex_unlock(&table->lock);
+ pci_free_irq_vectors(dev->pdev);
+}
+
+static int alloc_irq_vectors(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_eq_table *table = priv->eq_table;
+ int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
+ MLX5_CAP_GEN(dev, max_num_eqs) :
+ 1 << MLX5_CAP_GEN(dev, log_max_eq);
+ int nvec;
+ int err;
+
+ nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
+ MLX5_EQ_VEC_COMP_BASE;
+ nvec = min_t(int, nvec, num_eqs);
+ if (nvec <= MLX5_EQ_VEC_COMP_BASE)
+ return -ENOMEM;
+
+ table->irq_info = kcalloc(nvec, sizeof(*table->irq_info), GFP_KERNEL);
+ if (!table->irq_info)
+ return -ENOMEM;
+
+ nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_EQ_VEC_COMP_BASE + 1,
+ nvec, PCI_IRQ_MSIX);
+ if (nvec < 0) {
+ err = nvec;
+ goto err_free_irq_info;
+ }
+
+ table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
+
+ return 0;
+
+err_free_irq_info:
+ kfree(table->irq_info);
+ return err;
+}
+
+static void free_irq_vectors(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+
pci_free_irq_vectors(dev->pdev);
+ kfree(priv->eq_table->irq_info);
+}
+
+int mlx5_eq_table_create(struct mlx5_core_dev *dev)
+{
+ int err;
+
+ err = alloc_irq_vectors(dev);
+ if (err) {
+ mlx5_core_err(dev, "alloc irq vectors failed\n");
+ return err;
+ }
+
+ err = create_async_eqs(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to create async EQs\n");
+ goto err_async_eqs;
+ }
+
+ err = create_comp_eqs(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to create completion EQs\n");
+ goto err_comp_eqs;
+ }
+
+ return 0;
+err_comp_eqs:
+ destroy_async_eqs(dev);
+err_async_eqs:
+ free_irq_vectors(dev);
+ return err;
+}
+
+void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
+{
+ destroy_comp_eqs(dev);
+ destroy_async_eqs(dev);
+ free_irq_vectors(dev);
+}
+
+int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
+{
+ struct mlx5_eq_table *eqt = dev->priv.eq_table;
+
+ if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
+ return -EINVAL;
+
+ return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
+}
+
+int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
+{
+ struct mlx5_eq_table *eqt = dev->priv.eq_table;
+
+ if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
+ return -EINVAL;
+
+ return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index d004957328f9..a44ea7b85614 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -36,6 +36,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
+#include "lib/eq.h"
#include "eswitch.h"
#include "fs_core.h"
@@ -1567,7 +1568,6 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
/* Mark this vport as disabled to discard new events */
vport->enabled = false;
- synchronize_irq(pci_irq_vector(esw->dev->pdev, MLX5_EQ_VEC_ASYNC));
/* Wait for current already scheduled events to complete */
flush_workqueue(esw->work_queue);
/* Disable events from this vport */
@@ -1593,10 +1593,25 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
mutex_unlock(&esw->state_lock);
}
+static int eswitch_vport_event(struct notifier_block *nb,
+ unsigned long type, void *data)
+{
+ struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb);
+ struct mlx5_eqe *eqe = data;
+ struct mlx5_vport *vport;
+ u16 vport_num;
+
+ vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
+ vport = &esw->vports[vport_num];
+ if (vport->enabled)
+ queue_work(esw->work_queue, &vport->vport_change_handler);
+
+ return NOTIFY_OK;
+}
+
/* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
-
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
{
int err;
@@ -1615,13 +1630,16 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
+
esw->mode = mode;
+ mlx5_lag_update(esw->dev);
+
if (mode == SRIOV_LEGACY) {
err = esw_create_legacy_fdb_table(esw);
} else {
+ mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
-
err = esw_offloads_init(esw, nvfs + 1);
}
@@ -1640,6 +1658,11 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
for (i = 0; i <= nvfs; i++)
esw_enable_vport(esw, i, enabled_events);
+ if (mode == SRIOV_LEGACY) {
+ MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
+ mlx5_eq_notifier_register(esw->dev, &esw->nb);
+ }
+
esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
esw->enabled_vports);
return 0;
@@ -1647,8 +1670,10 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
abort:
esw->mode = SRIOV_NONE;
- if (mode == SRIOV_OFFLOADS)
+ if (mode == SRIOV_OFFLOADS) {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+ mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
+ }
return err;
}
@@ -1669,6 +1694,9 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
mc_promisc = &esw->mc_promisc;
nvports = esw->enabled_vports;
+ if (esw->mode == SRIOV_LEGACY)
+ mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
+
for (i = 0; i < esw->total_vports; i++)
esw_disable_vport(esw, i);
@@ -1685,8 +1713,12 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
old_mode = esw->mode;
esw->mode = SRIOV_NONE;
- if (old_mode == SRIOV_OFFLOADS)
+ mlx5_lag_update(esw->dev);
+
+ if (old_mode == SRIOV_OFFLOADS) {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+ mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
+ }
}
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
@@ -1777,23 +1809,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
kfree(esw);
}
-void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
-{
- struct mlx5_eqe_vport_change *vc_eqe = &eqe->data.vport_change;
- u16 vport_num = be16_to_cpu(vc_eqe->vport_num);
- struct mlx5_vport *vport;
-
- if (!esw) {
- pr_warn("MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n",
- vport_num);
- return;
- }
-
- vport = &esw->vports[vport_num];
- if (vport->enabled)
- queue_work(esw->work_queue, &vport->vport_change_handler);
-}
-
/* Vport Administration */
#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
@@ -2219,3 +2234,14 @@ u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
+
+bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
+{
+ if ((dev0->priv.eswitch->mode == SRIOV_NONE &&
+ dev1->priv.eswitch->mode == SRIOV_NONE) ||
+ (dev0->priv.eswitch->mode == SRIOV_OFFLOADS &&
+ dev1->priv.eswitch->mode == SRIOV_OFFLOADS))
+ return true;
+
+ return false;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index aaafc9f17115..9c89eea9b2c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -143,6 +143,8 @@ struct mlx5_eswitch_fdb {
struct offloads_fdb {
struct mlx5_flow_table *slow_fdb;
struct mlx5_flow_group *send_to_vport_grp;
+ struct mlx5_flow_group *peer_miss_grp;
+ struct mlx5_flow_handle **peer_miss_rules;
struct mlx5_flow_group *miss_grp;
struct mlx5_flow_handle *miss_rule_uni;
struct mlx5_flow_handle *miss_rule_multi;
@@ -165,6 +167,8 @@ struct mlx5_esw_offload {
struct mlx5_flow_table *ft_offloads;
struct mlx5_flow_group *vport_rx_group;
struct mlx5_eswitch_rep *vport_reps;
+ struct list_head peer_flows;
+ struct mutex peer_mutex;
DECLARE_HASHTABLE(encap_tbl, 8);
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
u8 inline_mode;
@@ -181,6 +185,7 @@ struct esw_mc_addr { /* SRIOV only */
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
+ struct mlx5_nb nb;
struct mlx5_eswitch_fdb fdb_table;
struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
struct workqueue_struct *work_queue;
@@ -211,7 +216,6 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw);
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
-void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe);
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode);
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
@@ -281,13 +285,17 @@ enum mlx5_flow_match_level {
/* current maximum for flow based vport multicasting */
#define MLX5_MAX_FLOW_FWD_VPORTS 2
+enum {
+ MLX5_ESW_DEST_ENCAP = BIT(0),
+ MLX5_ESW_DEST_ENCAP_VALID = BIT(1),
+};
+
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
- struct mlx5_eswitch_rep *out_rep[MLX5_MAX_FLOW_FWD_VPORTS];
- struct mlx5_core_dev *out_mdev[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5_core_dev *in_mdev;
+ struct mlx5_core_dev *counter_dev;
- int mirror_count;
+ int split_count;
int out_count;
int action;
@@ -296,7 +304,12 @@ struct mlx5_esw_flow_attr {
u8 vlan_prio[MLX5_FS_VLAN_DEPTH];
u8 total_vlan;
bool vlan_handled;
- u32 encap_id;
+ struct {
+ u32 flags;
+ struct mlx5_eswitch_rep *rep;
+ struct mlx5_core_dev *mdev;
+ u32 encap_id;
+ } dests[MLX5_MAX_FLOW_FWD_VPORTS];
u32 mod_hdr_id;
u8 match_level;
struct mlx5_fc *counter;
@@ -338,6 +351,9 @@ static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2);
}
+bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0,
+ struct mlx5_core_dev *dev1);
+
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
#define esw_info(dev, format, ...) \
@@ -352,9 +368,9 @@ static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
-static inline void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) {}
static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; }
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {}
+static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
#define FDB_MAX_CHAIN 1
#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 9eac137790f5..53065b6ae593 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -39,6 +39,7 @@
#include "eswitch.h"
#include "en.h"
#include "fs_core.h"
+#include "lib/devcom.h"
enum {
FDB_FAST_PATH = 0,
@@ -81,7 +82,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
{
struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
- bool mirror = !!(attr->mirror_count);
+ bool split = !!(attr->split_count);
struct mlx5_flow_handle *rule;
struct mlx5_flow_table *fdb;
int j, i = 0;
@@ -120,13 +121,21 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
dest[i].ft = ft;
i++;
} else {
- for (j = attr->mirror_count; j < attr->out_count; j++) {
+ for (j = attr->split_count; j < attr->out_count; j++) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest[i].vport.num = attr->out_rep[j]->vport;
+ dest[i].vport.num = attr->dests[j].rep->vport;
dest[i].vport.vhca_id =
- MLX5_CAP_GEN(attr->out_mdev[j], vhca_id);
- dest[i].vport.vhca_id_valid =
- !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
+ MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ dest[i].vport.flags |=
+ MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act.reformat_id = attr->dests[j].encap_id;
+ dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
+ dest[i].vport.reformat_id =
+ attr->dests[j].encap_id;
+ }
i++;
}
}
@@ -163,10 +172,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_id = attr->mod_hdr_id;
- if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
- flow_act.reformat_id = attr->encap_id;
-
- fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!mirror);
+ fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
if (IS_ERR(fdb)) {
rule = ERR_CAST(fdb);
goto err_esw_get;
@@ -181,7 +187,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
return rule;
err_add_rule:
- esw_put_prio_table(esw, attr->chain, attr->prio, !!mirror);
+ esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
err_esw_get:
if (attr->dest_chain)
esw_put_prio_table(esw, attr->dest_chain, 1, 0);
@@ -215,12 +221,17 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- for (i = 0; i < attr->mirror_count; i++) {
+ for (i = 0; i < attr->split_count; i++) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest[i].vport.num = attr->out_rep[i]->vport;
+ dest[i].vport.num = attr->dests[i].rep->vport;
dest[i].vport.vhca_id =
- MLX5_CAP_GEN(attr->out_mdev[i], vhca_id);
- dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
+ MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
+ dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
+ dest[i].vport.reformat_id = attr->dests[i].encap_id;
+ }
}
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = fwd_fdb,
@@ -268,7 +279,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr,
bool fwd_rule)
{
- bool mirror = (attr->mirror_count > 0);
+ bool split = (attr->split_count > 0);
mlx5_del_flow_rules(rule);
esw->offloads.num_flows--;
@@ -277,7 +288,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
esw_put_prio_table(esw, attr->chain, attr->prio, 1);
esw_put_prio_table(esw, attr->chain, attr->prio, 0);
} else {
- esw_put_prio_table(esw, attr->chain, attr->prio, !!mirror);
+ esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
if (attr->dest_chain)
esw_put_prio_table(esw, attr->dest_chain, 1, 0);
}
@@ -325,7 +336,7 @@ esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
in_rep = attr->in_rep;
- out_rep = attr->out_rep[0];
+ out_rep = attr->dests[0].rep;
if (push)
vport = in_rep;
@@ -346,7 +357,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
goto out_notsupp;
in_rep = attr->in_rep;
- out_rep = attr->out_rep[0];
+ out_rep = attr->dests[0].rep;
if (push && in_rep->vport == FDB_UPLINK_VPORT)
goto out_notsupp;
@@ -398,7 +409,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */
- if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) {
+ if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT) {
vport->vlan_refcount++;
attr->vlan_handled = true;
}
@@ -458,7 +469,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */
- if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT)
+ if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT)
vport->vlan_refcount--;
return 0;
@@ -531,6 +542,98 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule);
}
+static void peer_miss_rules_setup(struct mlx5_core_dev *peer_dev,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_destination *dest)
+{
+ void *misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+
+ MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
+ MLX5_CAP_GEN(peer_dev, vhca_id));
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc,
+ source_eswitch_owner_vhca_id);
+
+ dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest->vport.num = 0;
+ dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
+ dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+}
+
+static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
+ struct mlx5_core_dev *peer_dev)
+{
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {0};
+ struct mlx5_flow_handle **flows;
+ struct mlx5_flow_handle *flow;
+ struct mlx5_flow_spec *spec;
+ /* total vports is the same for both e-switches */
+ int nvports = esw->total_vports;
+ void *misc;
+ int err, i;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ peer_miss_rules_setup(peer_dev, spec, &dest);
+
+ flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
+ if (!flows) {
+ err = -ENOMEM;
+ goto alloc_flows_err;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+
+ for (i = 1; i < nvports; i++) {
+ MLX5_SET(fte_match_set_misc, misc, source_port, i);
+ flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ spec, &flow_act, &dest, 1);
+ if (IS_ERR(flow)) {
+ err = PTR_ERR(flow);
+ esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
+ goto add_flow_err;
+ }
+ flows[i] = flow;
+ }
+
+ esw->fdb_table.offloads.peer_miss_rules = flows;
+
+ kvfree(spec);
+ return 0;
+
+add_flow_err:
+ for (i--; i > 0; i--)
+ mlx5_del_flow_rules(flows[i]);
+ kvfree(flows);
+alloc_flows_err:
+ kvfree(spec);
+ return err;
+}
+
+static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_handle **flows;
+ int i;
+
+ flows = esw->fdb_table.offloads.peer_miss_rules;
+
+ for (i = 1; i < esw->total_vports; i++)
+ mlx5_del_flow_rules(flows[i]);
+
+ kvfree(flows);
+}
+
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
{
struct mlx5_flow_act flow_act = {0};
@@ -801,7 +904,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
esw->fdb_table.offloads.fdb_left[i] =
ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
- table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2;
+ table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2 +
+ esw->total_vports;
/* create the slow path fdb with encap set, so further table instances
* can be created at run time while VFs are probed if the FW allows that.
@@ -856,6 +960,34 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
}
esw->fdb_table.offloads.send_to_vport_grp = g;
+ /* create peer esw miss group */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS);
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters.source_port);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters.source_eswitch_owner_vhca_id);
+
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ source_eswitch_owner_vhca_id_valid, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ ix + esw->total_vports - 1);
+ ix += esw->total_vports;
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
+ goto peer_miss_err;
+ }
+ esw->fdb_table.offloads.peer_miss_grp = g;
+
/* create miss group */
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
@@ -888,6 +1020,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
miss_rule_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
miss_err:
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
+peer_miss_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
send_vport_err:
esw_destroy_offloads_fast_fdb_tables(esw);
@@ -907,6 +1041,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
@@ -1163,6 +1298,105 @@ err_reps:
return err;
}
+#define ESW_OFFLOADS_DEVCOM_PAIR (0)
+#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
+
+static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch *peer_esw)
+{
+ int err;
+
+ err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
+
+static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
+{
+ mlx5e_tc_clean_fdb_peer_flows(esw);
+ esw_del_fdb_peer_miss_rules(esw);
+}
+
+static int mlx5_esw_offloads_devcom_event(int event,
+ void *my_data,
+ void *event_data)
+{
+ struct mlx5_eswitch *esw = my_data;
+ struct mlx5_eswitch *peer_esw = event_data;
+ struct mlx5_devcom *devcom = esw->dev->priv.devcom;
+ int err;
+
+ switch (event) {
+ case ESW_OFFLOADS_DEVCOM_PAIR:
+ err = mlx5_esw_offloads_pair(esw, peer_esw);
+ if (err)
+ goto err_out;
+
+ err = mlx5_esw_offloads_pair(peer_esw, esw);
+ if (err)
+ goto err_pair;
+
+ mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
+ break;
+
+ case ESW_OFFLOADS_DEVCOM_UNPAIR:
+ if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
+ break;
+
+ mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
+ mlx5_esw_offloads_unpair(peer_esw);
+ mlx5_esw_offloads_unpair(esw);
+ break;
+ }
+
+ return 0;
+
+err_pair:
+ mlx5_esw_offloads_unpair(esw);
+
+err_out:
+ mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
+ event, err);
+ return err;
+}
+
+static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
+{
+ struct mlx5_devcom *devcom = esw->dev->priv.devcom;
+
+ INIT_LIST_HEAD(&esw->offloads.peer_flows);
+ mutex_init(&esw->offloads.peer_mutex);
+
+ if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ return;
+
+ mlx5_devcom_register_component(devcom,
+ MLX5_DEVCOM_ESW_OFFLOADS,
+ mlx5_esw_offloads_devcom_event,
+ esw);
+
+ mlx5_devcom_send_event(devcom,
+ MLX5_DEVCOM_ESW_OFFLOADS,
+ ESW_OFFLOADS_DEVCOM_PAIR, esw);
+}
+
+static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
+{
+ struct mlx5_devcom *devcom = esw->dev->priv.devcom;
+
+ if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ return;
+
+ mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
+ ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
+
+ mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+}
+
int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
{
int err;
@@ -1185,6 +1419,7 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
if (err)
goto err_reps;
+ esw_offloads_devcom_init(esw);
return 0;
err_reps:
@@ -1215,14 +1450,12 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
}
}
- /* enable back PF RoCE */
- mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
-
return err;
}
void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
{
+ esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_reps(esw, nvports);
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_table(esw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
new file mode 100644
index 000000000000..fbc42b7252a9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2018 Mellanox Technologies
+
+#include <linux/mlx5/driver.h>
+
+#include "mlx5_core.h"
+#include "lib/eq.h"
+#include "lib/mlx5.h"
+
+struct mlx5_event_nb {
+ struct mlx5_nb nb;
+ void *ctx;
+};
+
+/* General events handlers for the low level mlx5_core driver
+ *
+ * Other Major feature specific events such as
+ * clock/eswitch/fpga/FW trace and many others, are handled elsewhere, with
+ * separate notifiers callbacks, specifically by those mlx5 components.
+ */
+static int any_notifier(struct notifier_block *, unsigned long, void *);
+static int temp_warn(struct notifier_block *, unsigned long, void *);
+static int port_module(struct notifier_block *, unsigned long, void *);
+
+/* handler which forwards the event to events->nh, driver notifiers */
+static int forward_event(struct notifier_block *, unsigned long, void *);
+
+static struct mlx5_nb events_nbs_ref[] = {
+ /* Events to be proccessed by mlx5_core */
+ {.nb.notifier_call = any_notifier, .event_type = MLX5_EVENT_TYPE_NOTIFY_ANY },
+ {.nb.notifier_call = temp_warn, .event_type = MLX5_EVENT_TYPE_TEMP_WARN_EVENT },
+ {.nb.notifier_call = port_module, .event_type = MLX5_EVENT_TYPE_PORT_MODULE_EVENT },
+
+ /* Events to be forwarded (as is) to mlx5 core interfaces (mlx5e/mlx5_ib) */
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PORT_CHANGE },
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_GENERAL_EVENT },
+ /* QP/WQ resource events to forward */
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_DCT_DRAINED },
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PATH_MIG },
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_COMM_EST },
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_SQ_DRAINED },
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_SRQ_LAST_WQE },
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_WQ_CATAS_ERROR },
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PATH_MIG_FAILED },
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR },
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_WQ_ACCESS_ERROR },
+ /* SRQ events */
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_SRQ_CATAS_ERROR },
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_SRQ_RQ_LIMIT },
+};
+
+struct mlx5_events {
+ struct mlx5_core_dev *dev;
+ struct mlx5_event_nb notifiers[ARRAY_SIZE(events_nbs_ref)];
+ /* driver notifier chain */
+ struct atomic_notifier_head nh;
+ /* port module events stats */
+ struct mlx5_pme_stats pme_stats;
+};
+
+static const char *eqe_type_str(u8 type)
+{
+ switch (type) {
+ case MLX5_EVENT_TYPE_COMP:
+ return "MLX5_EVENT_TYPE_COMP";
+ case MLX5_EVENT_TYPE_PATH_MIG:
+ return "MLX5_EVENT_TYPE_PATH_MIG";
+ case MLX5_EVENT_TYPE_COMM_EST:
+ return "MLX5_EVENT_TYPE_COMM_EST";
+ case MLX5_EVENT_TYPE_SQ_DRAINED:
+ return "MLX5_EVENT_TYPE_SQ_DRAINED";
+ case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
+ return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
+ case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
+ return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
+ case MLX5_EVENT_TYPE_CQ_ERROR:
+ return "MLX5_EVENT_TYPE_CQ_ERROR";
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
+ case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
+ return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
+ case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
+ case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+ return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
+ case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
+ return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
+ case MLX5_EVENT_TYPE_INTERNAL_ERROR:
+ return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
+ case MLX5_EVENT_TYPE_PORT_CHANGE:
+ return "MLX5_EVENT_TYPE_PORT_CHANGE";
+ case MLX5_EVENT_TYPE_GPIO_EVENT:
+ return "MLX5_EVENT_TYPE_GPIO_EVENT";
+ case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
+ return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
+ case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
+ return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT";
+ case MLX5_EVENT_TYPE_REMOTE_CONFIG:
+ return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
+ case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
+ return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
+ case MLX5_EVENT_TYPE_STALL_EVENT:
+ return "MLX5_EVENT_TYPE_STALL_EVENT";
+ case MLX5_EVENT_TYPE_CMD:
+ return "MLX5_EVENT_TYPE_CMD";
+ case MLX5_EVENT_TYPE_PAGE_REQUEST:
+ return "MLX5_EVENT_TYPE_PAGE_REQUEST";
+ case MLX5_EVENT_TYPE_PAGE_FAULT:
+ return "MLX5_EVENT_TYPE_PAGE_FAULT";
+ case MLX5_EVENT_TYPE_PPS_EVENT:
+ return "MLX5_EVENT_TYPE_PPS_EVENT";
+ case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
+ return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
+ case MLX5_EVENT_TYPE_FPGA_ERROR:
+ return "MLX5_EVENT_TYPE_FPGA_ERROR";
+ case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
+ return "MLX5_EVENT_TYPE_FPGA_QP_ERROR";
+ case MLX5_EVENT_TYPE_GENERAL_EVENT:
+ return "MLX5_EVENT_TYPE_GENERAL_EVENT";
+ case MLX5_EVENT_TYPE_MONITOR_COUNTER:
+ return "MLX5_EVENT_TYPE_MONITOR_COUNTER";
+ case MLX5_EVENT_TYPE_DEVICE_TRACER:
+ return "MLX5_EVENT_TYPE_DEVICE_TRACER";
+ default:
+ return "Unrecognized event";
+ }
+}
+
+/* handles all FW events, type == eqe->type */
+static int any_notifier(struct notifier_block *nb,
+ unsigned long type, void *data)
+{
+ struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, struct mlx5_event_nb, nb);
+ struct mlx5_events *events = event_nb->ctx;
+ struct mlx5_eqe *eqe = data;
+
+ mlx5_core_dbg(events->dev, "Async eqe type %s, subtype (%d)\n",
+ eqe_type_str(eqe->type), eqe->sub_type);
+ return NOTIFY_OK;
+}
+
+/* type == MLX5_EVENT_TYPE_TEMP_WARN_EVENT */
+static int temp_warn(struct notifier_block *nb, unsigned long type, void *data)
+{
+ struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, struct mlx5_event_nb, nb);
+ struct mlx5_events *events = event_nb->ctx;
+ struct mlx5_eqe *eqe = data;
+ u64 value_lsb;
+ u64 value_msb;
+
+ value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb);
+ value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
+
+ mlx5_core_warn(events->dev,
+ "High temperature on sensors with bit set %llx %llx",
+ value_msb, value_lsb);
+
+ return NOTIFY_OK;
+}
+
+/* MLX5_EVENT_TYPE_PORT_MODULE_EVENT */
+static const char *mlx5_pme_status_to_string(enum port_module_event_status_type status)
+{
+ switch (status) {
+ case MLX5_MODULE_STATUS_PLUGGED:
+ return "Cable plugged";
+ case MLX5_MODULE_STATUS_UNPLUGGED:
+ return "Cable unplugged";
+ case MLX5_MODULE_STATUS_ERROR:
+ return "Cable error";
+ case MLX5_MODULE_STATUS_DISABLED:
+ return "Cable disabled";
+ default:
+ return "Unknown status";
+ }
+}
+
+static const char *mlx5_pme_error_to_string(enum port_module_event_error_type error)
+{
+ switch (error) {
+ case MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED:
+ return "Power budget exceeded";
+ case MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX:
+ return "Long Range for non MLNX cable";
+ case MLX5_MODULE_EVENT_ERROR_BUS_STUCK:
+ return "Bus stuck (I2C or data shorted)";
+ case MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT:
+ return "No EEPROM/retry timeout";
+ case MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST:
+ return "Enforce part number list";
+ case MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER:
+ return "Unknown identifier";
+ case MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE:
+ return "High Temperature";
+ case MLX5_MODULE_EVENT_ERROR_BAD_CABLE:
+ return "Bad or shorted cable/module";
+ case MLX5_MODULE_EVENT_ERROR_PCIE_POWER_SLOT_EXCEEDED:
+ return "One or more network ports have been powered down due to insufficient/unadvertised power on the PCIe slot";
+ default:
+ return "Unknown error";
+ }
+}
+
+/* type == MLX5_EVENT_TYPE_PORT_MODULE_EVENT */
+static int port_module(struct notifier_block *nb, unsigned long type, void *data)
+{
+ struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, struct mlx5_event_nb, nb);
+ struct mlx5_events *events = event_nb->ctx;
+ struct mlx5_eqe *eqe = data;
+
+ enum port_module_event_status_type module_status;
+ enum port_module_event_error_type error_type;
+ struct mlx5_eqe_port_module *module_event_eqe;
+ const char *status_str, *error_str;
+ u8 module_num;
+
+ module_event_eqe = &eqe->data.port_module;
+ module_num = module_event_eqe->module;
+ module_status = module_event_eqe->module_status &
+ PORT_MODULE_EVENT_MODULE_STATUS_MASK;
+ error_type = module_event_eqe->error_type &
+ PORT_MODULE_EVENT_ERROR_TYPE_MASK;
+
+ if (module_status < MLX5_MODULE_STATUS_NUM)
+ events->pme_stats.status_counters[module_status]++;
+ status_str = mlx5_pme_status_to_string(module_status);
+
+ if (module_status == MLX5_MODULE_STATUS_ERROR) {
+ if (error_type < MLX5_MODULE_EVENT_ERROR_NUM)
+ events->pme_stats.error_counters[error_type]++;
+ error_str = mlx5_pme_error_to_string(error_type);
+ }
+
+ if (!printk_ratelimit())
+ return NOTIFY_OK;
+
+ if (module_status == MLX5_MODULE_STATUS_ERROR)
+ mlx5_core_err(events->dev,
+ "Port module event[error]: module %u, %s, %s\n",
+ module_num, status_str, error_str);
+ else
+ mlx5_core_info(events->dev,
+ "Port module event: module %u, %s\n",
+ module_num, status_str);
+
+ return NOTIFY_OK;
+}
+
+void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats)
+{
+ *stats = dev->priv.events->pme_stats;
+}
+
+/* forward event as is to registered interfaces (mlx5e/mlx5_ib) */
+static int forward_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, struct mlx5_event_nb, nb);
+ struct mlx5_events *events = event_nb->ctx;
+ struct mlx5_eqe *eqe = data;
+
+ mlx5_core_dbg(events->dev, "Async eqe type %s, subtype (%d) forward to interfaces\n",
+ eqe_type_str(eqe->type), eqe->sub_type);
+ atomic_notifier_call_chain(&events->nh, event, data);
+ return NOTIFY_OK;
+}
+
+int mlx5_events_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_events *events = kzalloc(sizeof(*events), GFP_KERNEL);
+
+ if (!events)
+ return -ENOMEM;
+
+ ATOMIC_INIT_NOTIFIER_HEAD(&events->nh);
+ events->dev = dev;
+ dev->priv.events = events;
+ return 0;
+}
+
+void mlx5_events_cleanup(struct mlx5_core_dev *dev)
+{
+ kvfree(dev->priv.events);
+}
+
+void mlx5_events_start(struct mlx5_core_dev *dev)
+{
+ struct mlx5_events *events = dev->priv.events;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(events_nbs_ref); i++) {
+ events->notifiers[i].nb = events_nbs_ref[i];
+ events->notifiers[i].ctx = events;
+ mlx5_eq_notifier_register(dev, &events->notifiers[i].nb);
+ }
+}
+
+void mlx5_events_stop(struct mlx5_core_dev *dev)
+{
+ struct mlx5_events *events = dev->priv.events;
+ int i;
+
+ for (i = ARRAY_SIZE(events_nbs_ref) - 1; i >= 0 ; i--)
+ mlx5_eq_notifier_unregister(dev, &events->notifiers[i].nb);
+}
+
+int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
+{
+ struct mlx5_events *events = dev->priv.events;
+
+ return atomic_notifier_chain_register(&events->nh, nb);
+}
+EXPORT_SYMBOL(mlx5_notifier_register);
+
+int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb)
+{
+ struct mlx5_events *events = dev->priv.events;
+
+ return atomic_notifier_chain_unregister(&events->nh, nb);
+}
+EXPORT_SYMBOL(mlx5_notifier_unregister);
+
+int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data)
+{
+ return atomic_notifier_call_chain(&events->nh, event, data);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index 8ca1d1949d93..873541ef4c1b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -334,7 +334,7 @@ static void mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn,
{
u8 opcode, status = 0;
- opcode = cqe->op_own >> 4;
+ opcode = get_cqe_opcode(cqe);
switch (opcode) {
case MLX5_CQE_REQ_ERR:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
index 436a8136f26f..27c5f6c7d36a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -36,6 +36,7 @@
#include "mlx5_core.h"
#include "lib/mlx5.h"
+#include "lib/eq.h"
#include "fpga/core.h"
#include "fpga/conn.h"
@@ -145,6 +146,22 @@ static int mlx5_fpga_device_brb(struct mlx5_fpga_device *fdev)
return 0;
}
+static int mlx5_fpga_event(struct mlx5_fpga_device *, unsigned long, void *);
+
+static int fpga_err_event(struct notifier_block *nb, unsigned long event, void *eqe)
+{
+ struct mlx5_fpga_device *fdev = mlx5_nb_cof(nb, struct mlx5_fpga_device, fpga_err_nb);
+
+ return mlx5_fpga_event(fdev, event, eqe);
+}
+
+static int fpga_qp_err_event(struct notifier_block *nb, unsigned long event, void *eqe)
+{
+ struct mlx5_fpga_device *fdev = mlx5_nb_cof(nb, struct mlx5_fpga_device, fpga_qp_err_nb);
+
+ return mlx5_fpga_event(fdev, event, eqe);
+}
+
int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
{
struct mlx5_fpga_device *fdev = mdev->fpga;
@@ -185,6 +202,11 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
if (err)
goto out;
+ MLX5_NB_INIT(&fdev->fpga_err_nb, fpga_err_event, FPGA_ERROR);
+ MLX5_NB_INIT(&fdev->fpga_qp_err_nb, fpga_qp_err_event, FPGA_QP_ERROR);
+ mlx5_eq_notifier_register(fdev->mdev, &fdev->fpga_err_nb);
+ mlx5_eq_notifier_register(fdev->mdev, &fdev->fpga_qp_err_nb);
+
err = mlx5_fpga_conn_device_init(fdev);
if (err)
goto err_rsvd_gid;
@@ -201,6 +223,8 @@ err_conn_init:
mlx5_fpga_conn_device_cleanup(fdev);
err_rsvd_gid:
+ mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_err_nb);
+ mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_qp_err_nb);
mlx5_core_unreserve_gids(mdev, max_num_qps);
out:
spin_lock_irqsave(&fdev->state_lock, flags);
@@ -256,6 +280,9 @@ void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev)
}
mlx5_fpga_conn_device_cleanup(fdev);
+ mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_err_nb);
+ mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_qp_err_nb);
+
max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps);
mlx5_core_unreserve_gids(mdev, max_num_qps);
}
@@ -283,9 +310,10 @@ static const char *mlx5_fpga_qp_syndrome_to_string(u8 syndrome)
return "Unknown";
}
-void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data)
+static int mlx5_fpga_event(struct mlx5_fpga_device *fdev,
+ unsigned long event, void *eqe)
{
- struct mlx5_fpga_device *fdev = mdev->fpga;
+ void *data = ((struct mlx5_eqe *)eqe)->data.raw;
const char *event_name;
bool teardown = false;
unsigned long flags;
@@ -303,9 +331,7 @@ void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data)
fpga_qpn = MLX5_GET(fpga_qp_error_event, data, fpga_qpn);
break;
default:
- mlx5_fpga_warn_ratelimited(fdev, "Unexpected event %u\n",
- event);
- return;
+ return NOTIFY_DONE;
}
spin_lock_irqsave(&fdev->state_lock, flags);
@@ -326,4 +352,6 @@ void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data)
*/
if (teardown)
mlx5_trigger_health_work(fdev->mdev);
+
+ return NOTIFY_OK;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
index 3e2355c8df3f..7e2e871dbf83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
@@ -35,11 +35,16 @@
#ifdef CONFIG_MLX5_FPGA
+#include <linux/mlx5/eq.h>
+
+#include "lib/eq.h"
#include "fpga/cmd.h"
/* Represents an Innova device */
struct mlx5_fpga_device {
struct mlx5_core_dev *mdev;
+ struct mlx5_nb fpga_err_nb;
+ struct mlx5_nb fpga_qp_err_nb;
spinlock_t state_lock; /* Protects state transitions */
enum mlx5_fpga_status state;
enum mlx5_fpga_image last_admin_image;
@@ -82,7 +87,6 @@ int mlx5_fpga_init(struct mlx5_core_dev *mdev);
void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev);
int mlx5_fpga_device_start(struct mlx5_core_dev *mdev);
void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev);
-void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data);
#else
@@ -104,11 +108,6 @@ static inline void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev)
{
}
-static inline void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event,
- void *data)
-{
-}
-
#endif
#endif /* __MLX5_FPGA_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 08a891f9aade..c44ccb67c4a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -308,22 +308,68 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
+static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
+ struct fs_fte *fte, bool *extended_dest)
+{
+ int fw_log_max_fdb_encap_uplink =
+ MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
+ int num_fwd_destinations = 0;
+ struct mlx5_flow_rule *dst;
+ int num_encap = 0;
+
+ *extended_dest = false;
+ if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+ return 0;
+
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+ if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
+ num_encap++;
+ num_fwd_destinations++;
+ }
+ if (num_fwd_destinations > 1 && num_encap > 0)
+ *extended_dest = true;
+
+ if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
+ mlx5_core_warn(dev, "FW does not support extended destination");
+ return -EOPNOTSUPP;
+ }
+ if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
+ mlx5_core_warn(dev, "FW does not support more than %d encaps",
+ 1 << fw_log_max_fdb_encap_uplink);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
int opmod, int modify_mask,
struct mlx5_flow_table *ft,
unsigned group_id,
struct fs_fte *fte)
{
- unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
- fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
+ bool extended_dest = false;
struct mlx5_flow_rule *dst;
void *in_flow_context, *vlan;
void *in_match_value;
+ unsigned int inlen;
+ int dst_cnt_size;
void *in_dests;
u32 *in;
int err;
+ if (mlx5_set_extended_dest(dev, fte, &extended_dest))
+ return -EOPNOTSUPP;
+
+ if (!extended_dest)
+ dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
+ else
+ dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
+
+ inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -343,9 +389,20 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag);
- MLX5_SET(flow_context, in_flow_context, action, fte->action.action);
- MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
- fte->action.reformat_id);
+ MLX5_SET(flow_context, in_flow_context, extended_destination,
+ extended_dest);
+ if (extended_dest) {
+ u32 action;
+
+ action = fte->action.action &
+ ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ MLX5_SET(flow_context, in_flow_context, action, action);
+ } else {
+ MLX5_SET(flow_context, in_flow_context, action,
+ fte->action.action);
+ MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
+ fte->action.reformat_id);
+ }
MLX5_SET(flow_context, in_flow_context, modify_header_id,
fte->action.modify_id);
@@ -387,10 +444,20 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
id = dst->dest_attr.vport.num;
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id_valid,
- dst->dest_attr.vport.vhca_id_valid);
+ !!(dst->dest_attr.vport.flags &
+ MLX5_FLOW_DEST_VPORT_VHCA_ID));
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id,
dst->dest_attr.vport.vhca_id);
+ if (extended_dest) {
+ MLX5_SET(dest_format_struct, in_dests,
+ packet_reformat,
+ !!(dst->dest_attr.vport.flags &
+ MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
+ MLX5_SET(extended_dest_format, in_dests,
+ packet_reformat_id,
+ dst->dest_attr.vport.reformat_id);
+ }
break;
default:
id = dst->dest_attr.tir_num;
@@ -399,7 +466,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(dest_format_struct, in_dests, destination_type,
type);
MLX5_SET(dest_format_struct, in_dests, destination_id, id);
- in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
+ in_dests += dst_cnt_size;
list_size++;
}
@@ -420,7 +487,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
dst->dest_attr.counter_id);
- in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
+ in_dests += dst_cnt_size;
list_size++;
}
if (list_size > max_list_size) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 08233cf44871..79f122b45def 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1373,7 +1373,10 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
{
if (d1->type == d2->type) {
if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
- d1->vport.num == d2->vport.num) ||
+ d1->vport.num == d2->vport.num &&
+ d1->vport.flags == d2->vport.flags &&
+ ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
+ (d1->vport.reformat_id == d2->vport.reformat_id) : true)) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
d1->ft == d2->ft) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index b51ad217da32..2dc86347af58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -145,29 +145,6 @@ struct mlx5_flow_table {
struct rhltable fgs_hash;
};
-struct mlx5_fc_cache {
- u64 packets;
- u64 bytes;
- u64 lastuse;
-};
-
-struct mlx5_fc {
- struct list_head list;
- struct llist_node addlist;
- struct llist_node dellist;
-
- /* last{packets,bytes} members are used when calculating the delta since
- * last reading
- */
- u64 lastpackets;
- u64 lastbytes;
-
- u32 id;
- bool aging;
-
- struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
-};
-
struct mlx5_ft_underlay_qp {
struct list_head list;
u32 qpn;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 32accd6b041b..c6c28f56aa29 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -41,6 +41,29 @@
/* Max number of counters to query in bulk read is 32K */
#define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
+struct mlx5_fc_cache {
+ u64 packets;
+ u64 bytes;
+ u64 lastuse;
+};
+
+struct mlx5_fc {
+ struct list_head list;
+ struct llist_node addlist;
+ struct llist_node dellist;
+
+ /* last{packets,bytes} members are used when calculating the delta since
+ * last reading
+ */
+ u64 lastpackets;
+ u64 lastbytes;
+
+ u32 id;
+ bool aging;
+
+ struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
+};
+
/* locking scheme:
*
* It is the responsibility of the user to prevent concurrent calls or bad
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 43118de8ee99..196c07383082 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -38,6 +38,8 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
+#include "lib/eq.h"
+#include "lib/mlx5.h"
enum {
MLX5_HEALTH_POLL_INTERVAL = 2 * HZ,
@@ -78,29 +80,6 @@ void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state)
&dev->iseg->cmdq_addr_l_sz);
}
-static void trigger_cmd_completions(struct mlx5_core_dev *dev)
-{
- unsigned long flags;
- u64 vector;
-
- /* wait for pending handlers to complete */
- synchronize_irq(pci_irq_vector(dev->pdev, MLX5_EQ_VEC_CMD));
- spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
- vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
- if (!vector)
- goto no_trig;
-
- vector |= MLX5_TRIGGERED_CMD_COMP;
- spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
-
- mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
- mlx5_cmd_comp_handler(dev, vector, true);
- return;
-
-no_trig:
- spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
-}
-
static int in_fatal(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
@@ -124,10 +103,10 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
mlx5_core_err(dev, "start\n");
if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) {
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
- trigger_cmd_completions(dev);
+ mlx5_cmd_trigger_completions(dev);
}
- mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1);
+ mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
mlx5_core_err(dev, "end\n");
unlock:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 11dabd62e2c7..bfc0f6581729 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -87,7 +87,7 @@ int mlx5i_init(struct mlx5_core_dev *mdev,
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
netdev->mtu = max_mtu;
- mlx5e_build_nic_params(mdev, &priv->channels.params,
+ mlx5e_build_nic_params(mdev, &priv->rss_params, &priv->channels.params,
mlx5e_get_netdev_max_channels(netdev),
netdev->mtu);
mlx5i_build_nic_params(mdev, &priv->channels.params);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 582b2f18010a..3a6baed722d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -34,11 +34,15 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
+#include "eswitch.h"
enum {
- MLX5_LAG_FLAG_BONDED = 1 << 0,
+ MLX5_LAG_FLAG_ROCE = 1 << 0,
+ MLX5_LAG_FLAG_SRIOV = 1 << 1,
};
+#define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV)
+
struct lag_func {
struct mlx5_core_dev *dev;
struct net_device *netdev;
@@ -61,11 +65,6 @@ struct mlx5_lag {
struct lag_tracker tracker;
struct delayed_work bond_work;
struct notifier_block nb;
-
- /* Admin state. Allow lag only if allowed is true
- * even if network conditions for lag were met
- */
- bool allowed;
};
/* General purpose, use for short periods of time.
@@ -165,9 +164,19 @@ static int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
return -1;
}
-static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
+static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
+{
+ return !!(ldev->flags & MLX5_LAG_FLAG_ROCE);
+}
+
+static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
+{
+ return !!(ldev->flags & MLX5_LAG_FLAG_SRIOV);
+}
+
+static bool __mlx5_lag_is_active(struct mlx5_lag *ldev)
{
- return !!(ldev->flags & MLX5_LAG_FLAG_BONDED);
+ return !!(ldev->flags & MLX5_LAG_MODE_FLAGS);
}
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
@@ -186,36 +195,131 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
*port2 = 1;
}
-static void mlx5_activate_lag(struct mlx5_lag *ldev,
- struct lag_tracker *tracker)
+static void mlx5_modify_lag(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker)
{
struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ u8 v2p_port1, v2p_port2;
int err;
- ldev->flags |= MLX5_LAG_FLAG_BONDED;
+ mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1,
+ &v2p_port2);
+
+ if (v2p_port1 != ldev->v2p_map[0] ||
+ v2p_port2 != ldev->v2p_map[1]) {
+ ldev->v2p_map[0] = v2p_port1;
+ ldev->v2p_map[1] = v2p_port2;
+
+ mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d",
+ ldev->v2p_map[0], ldev->v2p_map[1]);
+
+ err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
+ if (err)
+ mlx5_core_err(dev0,
+ "Failed to modify LAG (%d)\n",
+ err);
+ }
+}
+
+static int mlx5_create_lag(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker)
+{
+ struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ int err;
mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0],
&ldev->v2p_map[1]);
+ mlx5_core_info(dev0, "lag map port 1:%d port 2:%d",
+ ldev->v2p_map[0], ldev->v2p_map[1]);
+
err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]);
if (err)
mlx5_core_err(dev0,
"Failed to create LAG (%d)\n",
err);
+ return err;
+}
+
+static int mlx5_activate_lag(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker,
+ u8 flags)
+{
+ bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE);
+ struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ int err;
+
+ err = mlx5_create_lag(ldev, tracker);
+ if (err) {
+ if (roce_lag) {
+ mlx5_core_err(dev0,
+ "Failed to activate RoCE LAG\n");
+ } else {
+ mlx5_core_err(dev0,
+ "Failed to activate VF LAG\n"
+ "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
+ }
+ return err;
+ }
+
+ ldev->flags |= flags;
+ return 0;
}
-static void mlx5_deactivate_lag(struct mlx5_lag *ldev)
+static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ bool roce_lag = __mlx5_lag_is_roce(ldev);
int err;
- ldev->flags &= ~MLX5_LAG_FLAG_BONDED;
+ ldev->flags &= ~MLX5_LAG_MODE_FLAGS;
err = mlx5_cmd_destroy_lag(dev0);
- if (err)
- mlx5_core_err(dev0,
- "Failed to destroy LAG (%d)\n",
- err);
+ if (err) {
+ if (roce_lag) {
+ mlx5_core_err(dev0,
+ "Failed to deactivate RoCE LAG; driver restart required\n");
+ } else {
+ mlx5_core_err(dev0,
+ "Failed to deactivate VF LAG; driver restart required\n"
+ "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
+ }
+ }
+
+ return err;
+}
+
+static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
+{
+ if (!ldev->pf[0].dev || !ldev->pf[1].dev)
+ return false;
+
+#ifdef CONFIG_MLX5_ESWITCH
+ return mlx5_esw_lag_prereq(ldev->pf[0].dev, ldev->pf[1].dev);
+#else
+ return (!mlx5_sriov_is_enabled(ldev->pf[0].dev) &&
+ !mlx5_sriov_is_enabled(ldev->pf[1].dev));
+#endif
+}
+
+static void mlx5_lag_add_ib_devices(struct mlx5_lag *ldev)
+{
+ int i;
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (ldev->pf[i].dev)
+ mlx5_add_dev_by_protocol(ldev->pf[i].dev,
+ MLX5_INTERFACE_PROTOCOL_IB);
+}
+
+static void mlx5_lag_remove_ib_devices(struct mlx5_lag *ldev)
+{
+ int i;
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (ldev->pf[i].dev)
+ mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
+ MLX5_INTERFACE_PROTOCOL_IB);
}
static void mlx5_do_bond(struct mlx5_lag *ldev)
@@ -223,9 +327,8 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
struct mlx5_core_dev *dev1 = ldev->pf[1].dev;
struct lag_tracker tracker;
- u8 v2p_port1, v2p_port2;
- int i, err;
- bool do_bond;
+ bool do_bond, roce_lag;
+ int err;
if (!dev0 || !dev1)
return;
@@ -234,42 +337,45 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
tracker = ldev->tracker;
mutex_unlock(&lag_mutex);
- do_bond = tracker.is_bonded && ldev->allowed;
+ do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
- if (do_bond && !mlx5_lag_is_bonded(ldev)) {
- for (i = 0; i < MLX5_MAX_PORTS; i++)
- mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
- MLX5_INTERFACE_PROTOCOL_IB);
+ if (do_bond && !__mlx5_lag_is_active(ldev)) {
+ roce_lag = !mlx5_sriov_is_enabled(dev0) &&
+ !mlx5_sriov_is_enabled(dev1);
- mlx5_activate_lag(ldev, &tracker);
+ if (roce_lag)
+ mlx5_lag_remove_ib_devices(ldev);
- mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
- mlx5_nic_vport_enable_roce(dev1);
- } else if (do_bond && mlx5_lag_is_bonded(ldev)) {
- mlx5_infer_tx_affinity_mapping(&tracker, &v2p_port1,
- &v2p_port2);
+ err = mlx5_activate_lag(ldev, &tracker,
+ roce_lag ? MLX5_LAG_FLAG_ROCE :
+ MLX5_LAG_FLAG_SRIOV);
+ if (err) {
+ if (roce_lag)
+ mlx5_lag_add_ib_devices(ldev);
- if ((v2p_port1 != ldev->v2p_map[0]) ||
- (v2p_port2 != ldev->v2p_map[1])) {
- ldev->v2p_map[0] = v2p_port1;
- ldev->v2p_map[1] = v2p_port2;
+ return;
+ }
- err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
- if (err)
- mlx5_core_err(dev0,
- "Failed to modify LAG (%d)\n",
- err);
+ if (roce_lag) {
+ mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
+ mlx5_nic_vport_enable_roce(dev1);
+ }
+ } else if (do_bond && __mlx5_lag_is_active(ldev)) {
+ mlx5_modify_lag(ldev, &tracker);
+ } else if (!do_bond && __mlx5_lag_is_active(ldev)) {
+ roce_lag = __mlx5_lag_is_roce(ldev);
+
+ if (roce_lag) {
+ mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
+ mlx5_nic_vport_disable_roce(dev1);
}
- } else if (!do_bond && mlx5_lag_is_bonded(ldev)) {
- mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
- mlx5_nic_vport_disable_roce(dev1);
- mlx5_deactivate_lag(ldev);
+ err = mlx5_deactivate_lag(ldev);
+ if (err)
+ return;
- for (i = 0; i < MLX5_MAX_PORTS; i++)
- if (ldev->pf[i].dev)
- mlx5_add_dev_by_protocol(ldev->pf[i].dev,
- MLX5_INTERFACE_PROTOCOL_IB);
+ if (roce_lag)
+ mlx5_lag_add_ib_devices(ldev);
}
}
@@ -419,15 +525,6 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
}
-static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
-{
- if ((ldev->pf[0].dev && mlx5_sriov_is_enabled(ldev->pf[0].dev)) ||
- (ldev->pf[1].dev && mlx5_sriov_is_enabled(ldev->pf[1].dev)))
- return false;
- else
- return true;
-}
-
static struct mlx5_lag *mlx5_lag_dev_alloc(void)
{
struct mlx5_lag *ldev;
@@ -437,7 +534,6 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(void)
return NULL;
INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
- ldev->allowed = mlx5_lag_check_prereq(ldev);
return ldev;
}
@@ -462,7 +558,6 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
ldev->tracker.netdev_state[fn].link_up = 0;
ldev->tracker.netdev_state[fn].tx_enabled = 0;
- ldev->allowed = mlx5_lag_check_prereq(ldev);
dev->priv.lag = ldev;
mutex_unlock(&lag_mutex);
@@ -484,7 +579,6 @@ static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
dev->priv.lag = NULL;
- ldev->allowed = mlx5_lag_check_prereq(ldev);
mutex_unlock(&lag_mutex);
}
@@ -532,7 +626,7 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev)
if (!ldev)
return;
- if (mlx5_lag_is_bonded(ldev))
+ if (__mlx5_lag_is_active(ldev))
mlx5_deactivate_lag(ldev);
mlx5_lag_dev_remove_pf(ldev, dev);
@@ -549,56 +643,61 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev)
}
}
-bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
+bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
bool res;
mutex_lock(&lag_mutex);
ldev = mlx5_lag_dev_get(dev);
- res = ldev && mlx5_lag_is_bonded(ldev);
+ res = ldev && __mlx5_lag_is_roce(ldev);
mutex_unlock(&lag_mutex);
return res;
}
-EXPORT_SYMBOL(mlx5_lag_is_active);
+EXPORT_SYMBOL(mlx5_lag_is_roce);
-static int mlx5_lag_set_state(struct mlx5_core_dev *dev, bool allow)
+bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
- int ret = 0;
- bool lag_active;
-
- mlx5_dev_list_lock();
+ bool res;
+ mutex_lock(&lag_mutex);
ldev = mlx5_lag_dev_get(dev);
- if (!ldev) {
- ret = -ENODEV;
- goto unlock;
- }
- lag_active = mlx5_lag_is_bonded(ldev);
- if (!mlx5_lag_check_prereq(ldev) && allow) {
- ret = -EINVAL;
- goto unlock;
- }
- if (ldev->allowed == allow)
- goto unlock;
- ldev->allowed = allow;
- if ((lag_active && !allow) || allow)
- mlx5_do_bond(ldev);
-unlock:
- mlx5_dev_list_unlock();
- return ret;
+ res = ldev && __mlx5_lag_is_active(ldev);
+ mutex_unlock(&lag_mutex);
+
+ return res;
}
+EXPORT_SYMBOL(mlx5_lag_is_active);
-int mlx5_lag_forbid(struct mlx5_core_dev *dev)
+bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
{
- return mlx5_lag_set_state(dev, false);
+ struct mlx5_lag *ldev;
+ bool res;
+
+ mutex_lock(&lag_mutex);
+ ldev = mlx5_lag_dev_get(dev);
+ res = ldev && __mlx5_lag_is_sriov(ldev);
+ mutex_unlock(&lag_mutex);
+
+ return res;
}
+EXPORT_SYMBOL(mlx5_lag_is_sriov);
-int mlx5_lag_allow(struct mlx5_core_dev *dev)
+void mlx5_lag_update(struct mlx5_core_dev *dev)
{
- return mlx5_lag_set_state(dev, true);
+ struct mlx5_lag *ldev;
+
+ mlx5_dev_list_lock();
+ ldev = mlx5_lag_dev_get(dev);
+ if (!ldev)
+ goto unlock;
+
+ mlx5_do_bond(ldev);
+
+unlock:
+ mlx5_dev_list_unlock();
}
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
@@ -609,7 +708,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
mutex_lock(&lag_mutex);
ldev = mlx5_lag_dev_get(dev);
- if (!(ldev && mlx5_lag_is_bonded(ldev)))
+ if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock;
if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
@@ -638,7 +737,7 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
return true;
ldev = mlx5_lag_dev_get(dev);
- if (!ldev || !mlx5_lag_is_bonded(ldev) || ldev->pf[0].dev == dev)
+ if (!ldev || !__mlx5_lag_is_roce(ldev) || ldev->pf[0].dev == dev)
return true;
/* If bonded, we do not add an IB device for PF1. */
@@ -665,7 +764,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
mutex_lock(&lag_mutex);
ldev = mlx5_lag_dev_get(dev);
- if (ldev && mlx5_lag_is_bonded(ldev)) {
+ if (ldev && __mlx5_lag_is_roce(ldev)) {
num_ports = MLX5_MAX_PORTS;
mdev[0] = ldev->pf[0].dev;
mdev[1] = ldev->pf[1].dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 0d90b1b4a3d3..ca0ee9916e9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -33,6 +33,7 @@
#include <linux/clocksource.h>
#include <linux/highmem.h>
#include <rdma/mlx5-abi.h>
+#include "lib/eq.h"
#include "en.h"
#include "clock.h"
@@ -71,7 +72,7 @@ static u64 read_internal_timer(const struct cyclecounter *cc)
struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
clock);
- return mlx5_read_internal_timer(mdev) & cc->mask;
+ return mlx5_read_internal_timer(mdev, NULL) & cc->mask;
}
static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
@@ -155,15 +156,19 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
return 0;
}
-static int mlx5_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
ptp_info);
- u64 ns;
+ struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
+ clock);
unsigned long flags;
+ u64 cycles, ns;
write_seqlock_irqsave(&clock->lock, flags);
- ns = timecounter_read(&clock->tc);
+ cycles = mlx5_read_internal_timer(mdev, sts);
+ ns = timecounter_cyc2time(&clock->tc, cycles);
write_sequnlock_irqrestore(&clock->lock, flags);
*ts = ns_to_timespec64(ns);
@@ -306,7 +311,7 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
ts.tv_sec = rq->perout.start.sec;
ts.tv_nsec = rq->perout.start.nsec;
ns = timespec64_to_ns(&ts);
- cycles_now = mlx5_read_internal_timer(mdev);
+ cycles_now = mlx5_read_internal_timer(mdev, NULL);
write_seqlock_irqsave(&clock->lock, flags);
nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
nsec_delta = ns - nsec_now;
@@ -383,7 +388,7 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
.pps = 0,
.adjfreq = mlx5_ptp_adjfreq,
.adjtime = mlx5_ptp_adjtime,
- .gettime64 = mlx5_ptp_gettime,
+ .gettimex64 = mlx5_ptp_gettimex,
.settime64 = mlx5_ptp_settime,
.enable = NULL,
.verify = NULL,
@@ -439,16 +444,17 @@ static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
}
-void mlx5_pps_event(struct mlx5_core_dev *mdev,
- struct mlx5_eqe *eqe)
+static int mlx5_pps_event(struct notifier_block *nb,
+ unsigned long type, void *data)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
+ struct mlx5_core_dev *mdev = clock->mdev;
struct ptp_clock_event ptp_event;
- struct timespec64 ts;
- u64 nsec_now, nsec_delta;
u64 cycles_now, cycles_delta;
+ u64 nsec_now, nsec_delta, ns;
+ struct mlx5_eqe *eqe = data;
int pin = eqe->data.pps.pin;
- s64 ns;
+ struct timespec64 ts;
unsigned long flags;
switch (clock->ptp_info.pin_config[pin].func) {
@@ -463,11 +469,12 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
} else {
ptp_event.type = PTP_CLOCK_EXTTS;
}
+ /* TODOL clock->ptp can be NULL if ptp_clock_register failes */
ptp_clock_event(clock->ptp, &ptp_event);
break;
case PTP_PF_PEROUT:
- mlx5_ptp_gettime(&clock->ptp_info, &ts);
- cycles_now = mlx5_read_internal_timer(mdev);
+ mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
+ cycles_now = mlx5_read_internal_timer(mdev, NULL);
ts.tv_sec += 1;
ts.tv_nsec = 0;
ns = timespec64_to_ns(&ts);
@@ -481,8 +488,11 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
write_sequnlock_irqrestore(&clock->lock, flags);
break;
default:
- mlx5_core_err(mdev, " Unhandled event\n");
+ mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
+ clock->ptp_info.pin_config[pin].func);
}
+
+ return NOTIFY_OK;
}
void mlx5_init_clock(struct mlx5_core_dev *mdev)
@@ -511,14 +521,14 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
ktime_to_ns(ktime_get_real()));
/* Calculate period in seconds to call the overflow watchdog - to make
- * sure counter is checked at least once every wrap around.
+ * sure counter is checked at least twice every wrap around.
* The period is calculated as the minimum between max HW cycles count
* (The clock source mask) and max amount of cycles that can be
* multiplied by clock multiplier where the result doesn't exceed
* 64bits.
*/
overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
- overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1);
+ overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
frac, &frac);
@@ -567,6 +577,9 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
PTR_ERR(clock->ptp));
clock->ptp = NULL;
}
+
+ MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
+ mlx5_eq_notifier_register(mdev, &clock->pps_nb);
}
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
@@ -576,6 +589,7 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
return;
+ mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
if (clock->ptp) {
ptp_clock_unregister(clock->ptp);
clock->ptp = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
index 263cb6e2aeee..31600924bdc3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
@@ -36,7 +36,6 @@
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
void mlx5_init_clock(struct mlx5_core_dev *mdev);
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev);
-void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
{
@@ -60,8 +59,6 @@ static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
#else
static inline void mlx5_init_clock(struct mlx5_core_dev *mdev) {}
static inline void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) {}
-static inline void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) {}
-
static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
{
return -1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
new file mode 100644
index 000000000000..bced2efe9bef
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2018 Mellanox Technologies */
+
+#include <linux/mlx5/vport.h>
+#include "lib/devcom.h"
+
+static LIST_HEAD(devcom_list);
+
+#define devcom_for_each_component(priv, comp, iter) \
+ for (iter = 0; \
+ comp = &(priv)->components[iter], iter < MLX5_DEVCOM_NUM_COMPONENTS; \
+ iter++)
+
+struct mlx5_devcom_component {
+ struct {
+ void *data;
+ } device[MLX5_MAX_PORTS];
+
+ mlx5_devcom_event_handler_t handler;
+ struct rw_semaphore sem;
+ bool paired;
+};
+
+struct mlx5_devcom_list {
+ struct list_head list;
+
+ struct mlx5_devcom_component components[MLX5_DEVCOM_NUM_COMPONENTS];
+ struct mlx5_core_dev *devs[MLX5_MAX_PORTS];
+};
+
+struct mlx5_devcom {
+ struct mlx5_devcom_list *priv;
+ int idx;
+};
+
+static struct mlx5_devcom_list *mlx5_devcom_list_alloc(void)
+{
+ struct mlx5_devcom_component *comp;
+ struct mlx5_devcom_list *priv;
+ int i;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+
+ devcom_for_each_component(priv, comp, i)
+ init_rwsem(&comp->sem);
+
+ return priv;
+}
+
+static struct mlx5_devcom *mlx5_devcom_alloc(struct mlx5_devcom_list *priv,
+ u8 idx)
+{
+ struct mlx5_devcom *devcom;
+
+ devcom = kzalloc(sizeof(*devcom), GFP_KERNEL);
+ if (!devcom)
+ return NULL;
+
+ devcom->priv = priv;
+ devcom->idx = idx;
+ return devcom;
+}
+
+/* Must be called with intf_mutex held */
+struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
+{
+ struct mlx5_devcom_list *priv = NULL, *iter;
+ struct mlx5_devcom *devcom = NULL;
+ bool new_priv = false;
+ u64 sguid0, sguid1;
+ int idx, i;
+
+ if (!mlx5_core_is_pf(dev))
+ return NULL;
+
+ sguid0 = mlx5_query_nic_system_image_guid(dev);
+ list_for_each_entry(iter, &devcom_list, list) {
+ struct mlx5_core_dev *tmp_dev = NULL;
+
+ idx = -1;
+ for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ if (iter->devs[i])
+ tmp_dev = iter->devs[i];
+ else
+ idx = i;
+ }
+
+ if (idx == -1)
+ continue;
+
+ sguid1 = mlx5_query_nic_system_image_guid(tmp_dev);
+ if (sguid0 != sguid1)
+ continue;
+
+ priv = iter;
+ break;
+ }
+
+ if (!priv) {
+ priv = mlx5_devcom_list_alloc();
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ idx = 0;
+ new_priv = true;
+ }
+
+ priv->devs[idx] = dev;
+ devcom = mlx5_devcom_alloc(priv, idx);
+ if (!devcom) {
+ kfree(priv);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (new_priv)
+ list_add(&priv->list, &devcom_list);
+
+ return devcom;
+}
+
+/* Must be called with intf_mutex held */
+void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom)
+{
+ struct mlx5_devcom_list *priv;
+ int i;
+
+ if (IS_ERR_OR_NULL(devcom))
+ return;
+
+ priv = devcom->priv;
+ priv->devs[devcom->idx] = NULL;
+
+ kfree(devcom);
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (priv->devs[i])
+ break;
+
+ if (i != MLX5_MAX_PORTS)
+ return;
+
+ list_del(&priv->list);
+ kfree(priv);
+}
+
+void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id,
+ mlx5_devcom_event_handler_t handler,
+ void *data)
+{
+ struct mlx5_devcom_component *comp;
+
+ if (IS_ERR_OR_NULL(devcom))
+ return;
+
+ WARN_ON(!data);
+
+ comp = &devcom->priv->components[id];
+ down_write(&comp->sem);
+ comp->handler = handler;
+ comp->device[devcom->idx].data = data;
+ up_write(&comp->sem);
+}
+
+void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id)
+{
+ struct mlx5_devcom_component *comp;
+
+ if (IS_ERR_OR_NULL(devcom))
+ return;
+
+ comp = &devcom->priv->components[id];
+ down_write(&comp->sem);
+ comp->device[devcom->idx].data = NULL;
+ up_write(&comp->sem);
+}
+
+int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id,
+ int event,
+ void *event_data)
+{
+ struct mlx5_devcom_component *comp;
+ int err = -ENODEV, i;
+
+ if (IS_ERR_OR_NULL(devcom))
+ return err;
+
+ comp = &devcom->priv->components[id];
+ down_write(&comp->sem);
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (i != devcom->idx && comp->device[i].data) {
+ err = comp->handler(event, comp->device[i].data,
+ event_data);
+ break;
+ }
+
+ up_write(&comp->sem);
+ return err;
+}
+
+void mlx5_devcom_set_paired(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id,
+ bool paired)
+{
+ struct mlx5_devcom_component *comp;
+
+ comp = &devcom->priv->components[id];
+ WARN_ON(!rwsem_is_locked(&comp->sem));
+
+ comp->paired = paired;
+}
+
+bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id)
+{
+ if (IS_ERR_OR_NULL(devcom))
+ return false;
+
+ return devcom->priv->components[id].paired;
+}
+
+void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id)
+{
+ struct mlx5_devcom_component *comp;
+ int i;
+
+ if (IS_ERR_OR_NULL(devcom))
+ return NULL;
+
+ comp = &devcom->priv->components[id];
+ down_read(&comp->sem);
+ if (!comp->paired) {
+ up_read(&comp->sem);
+ return NULL;
+ }
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (i != devcom->idx)
+ break;
+
+ return comp->device[i].data;
+}
+
+void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id)
+{
+ struct mlx5_devcom_component *comp = &devcom->priv->components[id];
+
+ up_read(&comp->sem);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
new file mode 100644
index 000000000000..939d5bf1581b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2018 Mellanox Technologies */
+
+#ifndef __LIB_MLX5_DEVCOM_H__
+#define __LIB_MLX5_DEVCOM_H__
+
+#include <linux/mlx5/driver.h>
+
+enum mlx5_devcom_components {
+ MLX5_DEVCOM_ESW_OFFLOADS,
+
+ MLX5_DEVCOM_NUM_COMPONENTS,
+};
+
+typedef int (*mlx5_devcom_event_handler_t)(int event,
+ void *my_data,
+ void *event_data);
+
+struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev);
+void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom);
+
+void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id,
+ mlx5_devcom_event_handler_t handler,
+ void *data);
+void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id);
+
+int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id,
+ int event,
+ void *event_data);
+
+void mlx5_devcom_set_paired(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id,
+ bool paired);
+bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id);
+
+void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id);
+void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id);
+
+#endif
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
new file mode 100644
index 000000000000..c0fb6d72b695
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2018 Mellanox Technologies */
+
+#ifndef __LIB_MLX5_EQ_H__
+#define __LIB_MLX5_EQ_H__
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/eq.h>
+#include <linux/mlx5/cq.h>
+
+#define MLX5_MAX_IRQ_NAME (32)
+#define MLX5_EQE_SIZE (sizeof(struct mlx5_eqe))
+
+struct mlx5_eq_tasklet {
+ struct list_head list;
+ struct list_head process_list;
+ struct tasklet_struct task;
+ spinlock_t lock; /* lock completion tasklet list */
+};
+
+struct mlx5_cq_table {
+ spinlock_t lock; /* protect radix tree */
+ struct radix_tree_root tree;
+};
+
+struct mlx5_eq {
+ struct mlx5_core_dev *dev;
+ struct mlx5_cq_table cq_table;
+ __be32 __iomem *doorbell;
+ u32 cons_index;
+ struct mlx5_frag_buf buf;
+ int size;
+ unsigned int vecidx;
+ unsigned int irqn;
+ u8 eqn;
+ int nent;
+ struct mlx5_rsc_debug *dbg;
+};
+
+struct mlx5_eq_comp {
+ struct mlx5_eq core; /* Must be first */
+ struct mlx5_eq_tasklet tasklet_ctx;
+ struct list_head list;
+};
+
+static inline struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
+{
+ return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
+}
+
+static inline struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
+{
+ struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
+
+ return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
+}
+
+static inline void eq_update_ci(struct mlx5_eq *eq, int arm)
+{
+ __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
+ u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
+
+ __raw_writel((__force u32)cpu_to_be32(val), addr);
+ /* We still want ordering, just not swabbing, so add a barrier */
+ mb();
+}
+
+int mlx5_eq_table_init(struct mlx5_core_dev *dev);
+void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev);
+int mlx5_eq_table_create(struct mlx5_core_dev *dev);
+void mlx5_eq_table_destroy(struct mlx5_core_dev *dev);
+
+int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
+int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
+struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn);
+struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev);
+void mlx5_cq_tasklet_cb(unsigned long data);
+struct cpumask *mlx5_eq_comp_cpumask(struct mlx5_core_dev *dev, int ix);
+
+u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq);
+void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev);
+void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev);
+
+int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
+
+/* This function should only be called after mlx5_cmd_force_teardown_hca */
+void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
+
+#ifdef CONFIG_RFS_ACCEL
+struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
+#endif
+
+int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
+int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index 7550b1cc8c6a..397a2847867a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -33,6 +33,8 @@
#ifndef __LIB_MLX5_H__
#define __LIB_MLX5_H__
+#include "mlx5_core.h"
+
void mlx5_init_reserved_gids(struct mlx5_core_dev *dev);
void mlx5_cleanup_reserved_gids(struct mlx5_core_dev *dev);
int mlx5_core_reserve_gids(struct mlx5_core_dev *dev, unsigned int count);
@@ -40,4 +42,38 @@ void mlx5_core_unreserve_gids(struct mlx5_core_dev *dev, unsigned int count);
int mlx5_core_reserved_gid_alloc(struct mlx5_core_dev *dev, int *gid_index);
void mlx5_core_reserved_gid_free(struct mlx5_core_dev *dev, int gid_index);
+/* TODO move to lib/events.h */
+
+#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
+#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF
+
+enum port_module_event_status_type {
+ MLX5_MODULE_STATUS_PLUGGED = 0x1,
+ MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
+ MLX5_MODULE_STATUS_ERROR = 0x3,
+ MLX5_MODULE_STATUS_DISABLED = 0x4,
+ MLX5_MODULE_STATUS_NUM,
+};
+
+enum port_module_event_error_type {
+ MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED = 0x0,
+ MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX = 0x1,
+ MLX5_MODULE_EVENT_ERROR_BUS_STUCK = 0x2,
+ MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT = 0x3,
+ MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST = 0x4,
+ MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER = 0x5,
+ MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE = 0x6,
+ MLX5_MODULE_EVENT_ERROR_BAD_CABLE = 0x7,
+ MLX5_MODULE_EVENT_ERROR_PCIE_POWER_SLOT_EXCEEDED = 0xc,
+ MLX5_MODULE_EVENT_ERROR_NUM,
+};
+
+struct mlx5_pme_stats {
+ u64 status_counters[MLX5_MODULE_STATUS_NUM];
+ u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
+};
+
+void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats);
+int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 28132c7dc05f..77896c11f6f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -43,7 +43,6 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/qp.h>
-#include <linux/mlx5/srq.h>
#include <linux/debugfs.h>
#include <linux/kmod.h>
#include <linux/mlx5/mlx5_ifc.h>
@@ -53,6 +52,7 @@
#endif
#include <net/devlink.h>
#include "mlx5_core.h"
+#include "lib/eq.h"
#include "fs_core.h"
#include "lib/mpfs.h"
#include "eswitch.h"
@@ -63,6 +63,7 @@
#include "accel/tls.h"
#include "lib/clock.h"
#include "lib/vxlan.h"
+#include "lib/devcom.h"
#include "diag/fw_tracer.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
@@ -319,51 +320,6 @@ static void release_bar(struct pci_dev *pdev)
pci_release_regions(pdev);
}
-static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
-{
- struct mlx5_priv *priv = &dev->priv;
- struct mlx5_eq_table *table = &priv->eq_table;
- int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
- MLX5_CAP_GEN(dev, max_num_eqs) :
- 1 << MLX5_CAP_GEN(dev, log_max_eq);
- int nvec;
- int err;
-
- nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
- MLX5_EQ_VEC_COMP_BASE;
- nvec = min_t(int, nvec, num_eqs);
- if (nvec <= MLX5_EQ_VEC_COMP_BASE)
- return -ENOMEM;
-
- priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
- if (!priv->irq_info)
- return -ENOMEM;
-
- nvec = pci_alloc_irq_vectors(dev->pdev,
- MLX5_EQ_VEC_COMP_BASE + 1, nvec,
- PCI_IRQ_MSIX);
- if (nvec < 0) {
- err = nvec;
- goto err_free_irq_info;
- }
-
- table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
-
- return 0;
-
-err_free_irq_info:
- kfree(priv->irq_info);
- return err;
-}
-
-static void mlx5_free_irq_vectors(struct mlx5_core_dev *dev)
-{
- struct mlx5_priv *priv = &dev->priv;
-
- pci_free_irq_vectors(dev->pdev);
- kfree(priv->irq_info);
-}
-
struct mlx5_reg_host_endianness {
u8 he;
u8 rsvd[15];
@@ -624,188 +580,24 @@ int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)
+u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
+ struct ptp_system_timestamp *sts)
{
u32 timer_h, timer_h1, timer_l;
timer_h = ioread32be(&dev->iseg->internal_timer_h);
+ ptp_read_system_prets(sts);
timer_l = ioread32be(&dev->iseg->internal_timer_l);
+ ptp_read_system_postts(sts);
timer_h1 = ioread32be(&dev->iseg->internal_timer_h);
- if (timer_h != timer_h1) /* wrap around */
+ if (timer_h != timer_h1) {
+ /* wrap around */
+ ptp_read_system_prets(sts);
timer_l = ioread32be(&dev->iseg->internal_timer_l);
-
- return (u64)timer_l | (u64)timer_h1 << 32;
-}
-
-static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
-{
- struct mlx5_priv *priv = &mdev->priv;
- int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
-
- if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
- mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
- return -ENOMEM;
+ ptp_read_system_postts(sts);
}
- cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
- priv->irq_info[i].mask);
-
- if (IS_ENABLED(CONFIG_SMP) &&
- irq_set_affinity_hint(irq, priv->irq_info[i].mask))
- mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
-
- return 0;
-}
-
-static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
-{
- struct mlx5_priv *priv = &mdev->priv;
- int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
-
- irq_set_affinity_hint(irq, NULL);
- free_cpumask_var(priv->irq_info[i].mask);
-}
-
-static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
-{
- int err;
- int i;
-
- for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
- err = mlx5_irq_set_affinity_hint(mdev, i);
- if (err)
- goto err_out;
- }
-
- return 0;
-
-err_out:
- for (i--; i >= 0; i--)
- mlx5_irq_clear_affinity_hint(mdev, i);
-
- return err;
-}
-
-static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
-{
- int i;
-
- for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
- mlx5_irq_clear_affinity_hint(mdev, i);
-}
-
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
- unsigned int *irqn)
-{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
- struct mlx5_eq *eq, *n;
- int err = -ENOENT;
-
- spin_lock(&table->lock);
- list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
- if (eq->index == vector) {
- *eqn = eq->eqn;
- *irqn = eq->irqn;
- err = 0;
- break;
- }
- }
- spin_unlock(&table->lock);
-
- return err;
-}
-EXPORT_SYMBOL(mlx5_vector2eqn);
-
-struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn)
-{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
- struct mlx5_eq *eq;
-
- spin_lock(&table->lock);
- list_for_each_entry(eq, &table->comp_eqs_list, list)
- if (eq->eqn == eqn) {
- spin_unlock(&table->lock);
- return eq;
- }
-
- spin_unlock(&table->lock);
-
- return ERR_PTR(-ENOENT);
-}
-
-static void free_comp_eqs(struct mlx5_core_dev *dev)
-{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
- struct mlx5_eq *eq, *n;
-
-#ifdef CONFIG_RFS_ACCEL
- if (dev->rmap) {
- free_irq_cpu_rmap(dev->rmap);
- dev->rmap = NULL;
- }
-#endif
- spin_lock(&table->lock);
- list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
- list_del(&eq->list);
- spin_unlock(&table->lock);
- if (mlx5_destroy_unmap_eq(dev, eq))
- mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
- eq->eqn);
- kfree(eq);
- spin_lock(&table->lock);
- }
- spin_unlock(&table->lock);
-}
-
-static int alloc_comp_eqs(struct mlx5_core_dev *dev)
-{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
- char name[MLX5_MAX_IRQ_NAME];
- struct mlx5_eq *eq;
- int ncomp_vec;
- int nent;
- int err;
- int i;
-
- INIT_LIST_HEAD(&table->comp_eqs_list);
- ncomp_vec = table->num_comp_vectors;
- nent = MLX5_COMP_EQ_SIZE;
-#ifdef CONFIG_RFS_ACCEL
- dev->rmap = alloc_irq_cpu_rmap(ncomp_vec);
- if (!dev->rmap)
- return -ENOMEM;
-#endif
- for (i = 0; i < ncomp_vec; i++) {
- eq = kzalloc(sizeof(*eq), GFP_KERNEL);
- if (!eq) {
- err = -ENOMEM;
- goto clean;
- }
-
-#ifdef CONFIG_RFS_ACCEL
- irq_cpu_rmap_add(dev->rmap, pci_irq_vector(dev->pdev,
- MLX5_EQ_VEC_COMP_BASE + i));
-#endif
- snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
- err = mlx5_create_map_eq(dev, eq,
- i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
- name, MLX5_EQ_TYPE_COMP);
- if (err) {
- kfree(eq);
- goto clean;
- }
- mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
- eq->index = i;
- spin_lock(&table->lock);
- list_add_tail(&eq->list, &table->comp_eqs_list);
- spin_unlock(&table->lock);
- }
-
- return 0;
-
-clean:
- free_comp_eqs(dev);
- return err;
+ return (u64)timer_l | (u64)timer_h1 << 32;
}
static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
@@ -938,28 +730,37 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
struct pci_dev *pdev = dev->pdev;
int err;
+ priv->devcom = mlx5_devcom_register_device(dev);
+ if (IS_ERR(priv->devcom))
+ dev_err(&pdev->dev, "failed to register with devcom (0x%p)\n",
+ priv->devcom);
+
err = mlx5_query_board_id(dev);
if (err) {
dev_err(&pdev->dev, "query board id failed\n");
- goto out;
+ goto err_devcom;
}
- err = mlx5_eq_init(dev);
+ err = mlx5_eq_table_init(dev);
if (err) {
dev_err(&pdev->dev, "failed to initialize eq\n");
- goto out;
+ goto err_devcom;
+ }
+
+ err = mlx5_events_init(dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to initialize events\n");
+ goto err_eq_cleanup;
}
err = mlx5_cq_debugfs_init(dev);
if (err) {
dev_err(&pdev->dev, "failed to initialize cq debugfs\n");
- goto err_eq_cleanup;
+ goto err_events_cleanup;
}
mlx5_init_qp_table(dev);
- mlx5_init_srq_table(dev);
-
mlx5_init_mkey_table(dev);
mlx5_init_reserved_gids(dev);
@@ -1013,14 +814,15 @@ err_rl_cleanup:
err_tables_cleanup:
mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_mkey_table(dev);
- mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cq_debugfs_cleanup(dev);
-
+err_events_cleanup:
+ mlx5_events_cleanup(dev);
err_eq_cleanup:
- mlx5_eq_cleanup(dev);
+ mlx5_eq_table_cleanup(dev);
+err_devcom:
+ mlx5_devcom_unregister_device(dev->priv.devcom);
-out:
return err;
}
@@ -1036,10 +838,11 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_cleanup_clock(dev);
mlx5_cleanup_reserved_gids(dev);
mlx5_cleanup_mkey_table(dev);
- mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cq_debugfs_cleanup(dev);
- mlx5_eq_cleanup(dev);
+ mlx5_events_cleanup(dev);
+ mlx5_eq_table_cleanup(dev);
+ mlx5_devcom_unregister_device(dev->priv.devcom);
}
static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
@@ -1131,16 +934,10 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto reclaim_boot_pages;
}
- err = mlx5_pagealloc_start(dev);
- if (err) {
- dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n");
- goto reclaim_boot_pages;
- }
-
err = mlx5_cmd_init_hca(dev, sw_owner_id);
if (err) {
dev_err(&pdev->dev, "init hca failed\n");
- goto err_pagealloc_stop;
+ goto reclaim_boot_pages;
}
mlx5_set_driver_version(dev);
@@ -1161,23 +958,20 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
}
}
- err = mlx5_alloc_irq_vectors(dev);
- if (err) {
- dev_err(&pdev->dev, "alloc irq vectors failed\n");
- goto err_cleanup_once;
- }
-
dev->priv.uar = mlx5_get_uars_page(dev);
if (IS_ERR(dev->priv.uar)) {
dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
err = PTR_ERR(dev->priv.uar);
- goto err_disable_msix;
+ goto err_get_uars;
}
- err = mlx5_start_eqs(dev);
+ mlx5_events_start(dev);
+ mlx5_pagealloc_start(dev);
+
+ err = mlx5_eq_table_create(dev);
if (err) {
- dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
- goto err_put_uars;
+ dev_err(&pdev->dev, "Failed to create EQs\n");
+ goto err_eq_table;
}
err = mlx5_fw_tracer_init(dev->tracer);
@@ -1186,18 +980,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_fw_tracer;
}
- err = alloc_comp_eqs(dev);
- if (err) {
- dev_err(&pdev->dev, "Failed to alloc completion EQs\n");
- goto err_comp_eqs;
- }
-
- err = mlx5_irq_set_affinity_hints(dev);
- if (err) {
- dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
- goto err_affinity_hints;
- }
-
err = mlx5_fpga_device_start(dev);
if (err) {
dev_err(&pdev->dev, "fpga device start failed %d\n", err);
@@ -1266,24 +1048,17 @@ err_ipsec_start:
mlx5_fpga_device_stop(dev);
err_fpga_start:
- mlx5_irq_clear_affinity_hints(dev);
-
-err_affinity_hints:
- free_comp_eqs(dev);
-
-err_comp_eqs:
mlx5_fw_tracer_cleanup(dev->tracer);
err_fw_tracer:
- mlx5_stop_eqs(dev);
+ mlx5_eq_table_destroy(dev);
-err_put_uars:
+err_eq_table:
+ mlx5_pagealloc_stop(dev);
+ mlx5_events_stop(dev);
mlx5_put_uars_page(dev, priv->uar);
-err_disable_msix:
- mlx5_free_irq_vectors(dev);
-
-err_cleanup_once:
+err_get_uars:
if (boot)
mlx5_cleanup_once(dev);
@@ -1294,9 +1069,6 @@ err_stop_poll:
goto out_err;
}
-err_pagealloc_stop:
- mlx5_pagealloc_stop(dev);
-
reclaim_boot_pages:
mlx5_reclaim_startup_pages(dev);
@@ -1340,21 +1112,20 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_accel_ipsec_cleanup(dev);
mlx5_accel_tls_cleanup(dev);
mlx5_fpga_device_stop(dev);
- mlx5_irq_clear_affinity_hints(dev);
- free_comp_eqs(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
- mlx5_stop_eqs(dev);
+ mlx5_eq_table_destroy(dev);
+ mlx5_pagealloc_stop(dev);
+ mlx5_events_stop(dev);
mlx5_put_uars_page(dev, priv->uar);
- mlx5_free_irq_vectors(dev);
if (cleanup)
mlx5_cleanup_once(dev);
mlx5_stop_health_poll(dev, cleanup);
+
err = mlx5_cmd_teardown_hca(dev);
if (err) {
dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
goto out;
}
- mlx5_pagealloc_stop(dev);
mlx5_reclaim_startup_pages(dev);
mlx5_core_disable_hca(dev, 0);
mlx5_cmd_cleanup(dev);
@@ -1364,12 +1135,6 @@ out:
return err;
}
-struct mlx5_core_event_handler {
- void (*event)(struct mlx5_core_dev *dev,
- enum mlx5_dev_event event,
- void *data);
-};
-
static const struct devlink_ops mlx5_devlink_ops = {
#ifdef CONFIG_MLX5_ESWITCH
.eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
@@ -1403,7 +1168,6 @@ static int init_one(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
dev->pdev = pdev;
- dev->event = mlx5_core_event;
dev->profile = &profile[prof_sel];
INIT_LIST_HEAD(&priv->ctx_list);
@@ -1411,17 +1175,6 @@ static int init_one(struct pci_dev *pdev,
mutex_init(&dev->pci_status_mutex);
mutex_init(&dev->intf_state_mutex);
- INIT_LIST_HEAD(&priv->waiting_events_list);
- priv->is_accum_events = false;
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- err = init_srcu_struct(&priv->pfault_srcu);
- if (err) {
- dev_err(&pdev->dev, "init_srcu_struct failed with error code %d\n",
- err);
- goto clean_dev;
- }
-#endif
mutex_init(&priv->bfregs.reg_head.lock);
mutex_init(&priv->bfregs.wc_head.lock);
INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
@@ -1430,7 +1183,7 @@ static int init_one(struct pci_dev *pdev,
err = mlx5_pci_init(dev, priv);
if (err) {
dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err);
- goto clean_srcu;
+ goto clean_dev;
}
err = mlx5_health_init(dev);
@@ -1439,12 +1192,14 @@ static int init_one(struct pci_dev *pdev,
goto close_pci;
}
- mlx5_pagealloc_init(dev);
+ err = mlx5_pagealloc_init(dev);
+ if (err)
+ goto err_pagealloc_init;
err = mlx5_load_one(dev, priv, true);
if (err) {
dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err);
- goto clean_health;
+ goto err_load_one;
}
request_module_nowait(MLX5_IB_MOD);
@@ -1458,16 +1213,13 @@ static int init_one(struct pci_dev *pdev,
clean_load:
mlx5_unload_one(dev, priv, true);
-clean_health:
+err_load_one:
mlx5_pagealloc_cleanup(dev);
+err_pagealloc_init:
mlx5_health_cleanup(dev);
close_pci:
mlx5_pci_close(dev, priv);
-clean_srcu:
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- cleanup_srcu_struct(&priv->pfault_srcu);
clean_dev:
-#endif
devlink_free(devlink);
return err;
@@ -1491,9 +1243,6 @@ static void remove_one(struct pci_dev *pdev)
mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
mlx5_pci_close(dev, priv);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- cleanup_srcu_struct(&priv->pfault_srcu);
-#endif
devlink_free(devlink);
}
@@ -1637,7 +1386,6 @@ succeed:
* kexec. There is no need to cleanup the mlx5_core software
* contexts.
*/
- mlx5_irq_clear_affinity_hints(dev);
mlx5_core_eq_free_irqs(dev);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 0594d0961cb3..c68dcea5985b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -38,6 +38,7 @@
#include <linux/sched.h>
#include <linux/if_link.h>
#include <linux/firmware.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/fs.h>
@@ -78,6 +79,11 @@ do { \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
+#define mlx5_core_warn_once(__dev, format, ...) \
+ dev_warn_once(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
+
#define mlx5_core_info(__dev, format, ...) \
dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__)
@@ -97,12 +103,6 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev);
-
-void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
- unsigned long param);
-void mlx5_core_page_fault(struct mlx5_core_dev *dev,
- struct mlx5_pagefault *pfault);
-void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
void mlx5_disable_device(struct mlx5_core_dev *dev);
void mlx5_recover_device(struct mlx5_core_dev *dev);
@@ -122,30 +122,10 @@ int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 element_id);
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
-u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev);
-
-int mlx5_eq_init(struct mlx5_core_dev *dev);
-void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
-int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
- int nent, u64 mask, const char *name,
- enum mlx5_eq_type type);
-int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
-int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
-int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
- u32 *out, int outlen);
-int mlx5_start_eqs(struct mlx5_core_dev *dev);
-void mlx5_stop_eqs(struct mlx5_core_dev *dev);
-/* This function should only be called after mlx5_cmd_force_teardown_hca */
-void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
-struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
-u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq);
-void mlx5_cq_tasklet_cb(unsigned long data);
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
-int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
-void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
+u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
+ struct ptp_system_timestamp *sts);
+
+void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev);
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
@@ -159,6 +139,11 @@ int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
void mlx5_lag_remove(struct mlx5_core_dev *dev);
+int mlx5_events_init(struct mlx5_core_dev *dev);
+void mlx5_events_cleanup(struct mlx5_core_dev *dev);
+void mlx5_events_start(struct mlx5_core_dev *dev);
+void mlx5_events_stop(struct mlx5_core_dev *dev);
+
void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
void mlx5_attach_device(struct mlx5_core_dev *dev);
@@ -202,10 +187,8 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
MLX5_CAP_GEN(dev, lag_master);
}
-int mlx5_lag_allow(struct mlx5_core_dev *dev);
-int mlx5_lag_forbid(struct mlx5_core_dev *dev);
-
void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
+void mlx5_lag_update(struct mlx5_core_dev *dev);
enum {
MLX5_NIC_IFC_FULL = 0,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index e36d3e3675f9..a83b517b0714 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -37,6 +37,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
+#include "lib/eq.h"
enum {
MLX5_PAGES_CANT_GIVE = 0,
@@ -433,15 +434,28 @@ static void pages_work_handler(struct work_struct *work)
kfree(req);
}
-void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
- s32 npages)
+static int req_pages_handler(struct notifier_block *nb,
+ unsigned long type, void *data)
{
struct mlx5_pages_req *req;
-
+ struct mlx5_core_dev *dev;
+ struct mlx5_priv *priv;
+ struct mlx5_eqe *eqe;
+ u16 func_id;
+ s32 npages;
+
+ priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb);
+ dev = container_of(priv, struct mlx5_core_dev, priv);
+ eqe = data;
+
+ func_id = be16_to_cpu(eqe->data.req_pages.func_id);
+ npages = be32_to_cpu(eqe->data.req_pages.num_pages);
+ mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
+ func_id, npages);
req = kzalloc(sizeof(*req), GFP_ATOMIC);
if (!req) {
mlx5_core_warn(dev, "failed to allocate pages request\n");
- return;
+ return NOTIFY_DONE;
}
req->dev = dev;
@@ -449,6 +463,7 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
req->npages = npages;
INIT_WORK(&req->work, pages_work_handler);
queue_work(dev->priv.pg_wq, &req->work);
+ return NOTIFY_OK;
}
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
@@ -524,29 +539,32 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
return 0;
}
-void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
+int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
{
dev->priv.page_root = RB_ROOT;
INIT_LIST_HEAD(&dev->priv.free_list);
+ dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
+ if (!dev->priv.pg_wq)
+ return -ENOMEM;
+
+ return 0;
}
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
{
- /* nothing */
+ destroy_workqueue(dev->priv.pg_wq);
}
-int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
+void mlx5_pagealloc_start(struct mlx5_core_dev *dev)
{
- dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
- if (!dev->priv.pg_wq)
- return -ENOMEM;
-
- return 0;
+ MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST);
+ mlx5_eq_notifier_register(dev, &dev->priv.pg_nb);
}
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
{
- destroy_workqueue(dev->priv.pg_wq);
+ mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb);
+ flush_workqueue(dev->priv.pg_wq);
}
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 31a9cbd85689..2b82f35f4c35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -915,63 +915,6 @@ void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
*enabled = !!(MLX5_GET(pcmr_reg, out, fcs_chk));
}
-static const char *mlx5_pme_status[MLX5_MODULE_STATUS_NUM] = {
- "Cable plugged", /* MLX5_MODULE_STATUS_PLUGGED = 0x1 */
- "Cable unplugged", /* MLX5_MODULE_STATUS_UNPLUGGED = 0x2 */
- "Cable error", /* MLX5_MODULE_STATUS_ERROR = 0x3 */
-};
-
-static const char *mlx5_pme_error[MLX5_MODULE_EVENT_ERROR_NUM] = {
- "Power budget exceeded",
- "Long Range for non MLNX cable",
- "Bus stuck(I2C or data shorted)",
- "No EEPROM/retry timeout",
- "Enforce part number list",
- "Unknown identifier",
- "High Temperature",
- "Bad or shorted cable/module",
- "Unknown status",
-};
-
-void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
-{
- enum port_module_event_status_type module_status;
- enum port_module_event_error_type error_type;
- struct mlx5_eqe_port_module *module_event_eqe;
- struct mlx5_priv *priv = &dev->priv;
- u8 module_num;
-
- module_event_eqe = &eqe->data.port_module;
- module_num = module_event_eqe->module;
- module_status = module_event_eqe->module_status &
- PORT_MODULE_EVENT_MODULE_STATUS_MASK;
- error_type = module_event_eqe->error_type &
- PORT_MODULE_EVENT_ERROR_TYPE_MASK;
-
- if (module_status < MLX5_MODULE_STATUS_ERROR) {
- priv->pme_stats.status_counters[module_status - 1]++;
- } else if (module_status == MLX5_MODULE_STATUS_ERROR) {
- if (error_type >= MLX5_MODULE_EVENT_ERROR_UNKNOWN)
- /* Unknown error type */
- error_type = MLX5_MODULE_EVENT_ERROR_UNKNOWN;
- priv->pme_stats.error_counters[error_type]++;
- }
-
- if (!printk_ratelimit())
- return;
-
- if (module_status < MLX5_MODULE_STATUS_ERROR)
- mlx5_core_info(dev,
- "Port module event: module %u, %s\n",
- module_num, mlx5_pme_status[module_status - 1]);
-
- else if (module_status == MLX5_MODULE_STATUS_ERROR)
- mlx5_core_info(dev,
- "Port module event[error]: module %u, %s, %s\n",
- module_num, mlx5_pme_status[module_status - 1],
- mlx5_pme_error[error_type]);
-}
-
int mlx5_query_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size)
{
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 91b8139a388d..388f205a497f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -38,11 +38,11 @@
#include <linux/mlx5/transobj.h>
#include "mlx5_core.h"
+#include "lib/eq.h"
-static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
- u32 rsn)
+static struct mlx5_core_rsc_common *
+mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
{
- struct mlx5_qp_table *table = &dev->priv.qp_table;
struct mlx5_core_rsc_common *common;
spin_lock(&table->lock);
@@ -53,11 +53,6 @@ static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
spin_unlock(&table->lock);
- if (!common) {
- mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
- rsn);
- return NULL;
- }
return common;
}
@@ -120,19 +115,57 @@ static bool is_event_type_allowed(int rsc_type, int event_type)
}
}
-void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
+static int rsc_event_notifier(struct notifier_block *nb,
+ unsigned long type, void *data)
{
- struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
+ struct mlx5_core_rsc_common *common;
+ struct mlx5_qp_table *table;
+ struct mlx5_core_dev *dev;
struct mlx5_core_dct *dct;
+ u8 event_type = (u8)type;
struct mlx5_core_qp *qp;
+ struct mlx5_priv *priv;
+ struct mlx5_eqe *eqe;
+ u32 rsn;
+
+ switch (event_type) {
+ case MLX5_EVENT_TYPE_DCT_DRAINED:
+ eqe = data;
+ rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
+ rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
+ break;
+ case MLX5_EVENT_TYPE_PATH_MIG:
+ case MLX5_EVENT_TYPE_COMM_EST:
+ case MLX5_EVENT_TYPE_SQ_DRAINED:
+ case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
+ case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+ eqe = data;
+ rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
+ rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ table = container_of(nb, struct mlx5_qp_table, nb);
+ priv = container_of(table, struct mlx5_priv, qp_table);
+ dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ mlx5_core_dbg(dev, "event (%d) arrived on resource 0x%x\n", eqe->type, rsn);
- if (!common)
- return;
+ common = mlx5_get_rsc(table, rsn);
+ if (!common) {
+ mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n", rsn);
+ return NOTIFY_OK;
+ }
if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) {
mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
event_type, rsn);
- return;
+ goto out;
}
switch (common->res) {
@@ -150,8 +183,10 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
default:
mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
}
-
+out:
mlx5_core_put_rsc(common);
+
+ return NOTIFY_OK;
}
static int create_resource_common(struct mlx5_core_dev *dev,
@@ -487,10 +522,16 @@ void mlx5_init_qp_table(struct mlx5_core_dev *dev)
spin_lock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
mlx5_qp_debugfs_init(dev);
+
+ table->nb.notifier_call = rsc_event_notifier;
+ mlx5_notifier_register(dev, &table->nb);
}
void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
{
+ struct mlx5_qp_table *table = &dev->priv.qp_table;
+
+ mlx5_notifier_unregister(dev, &table->nb);
mlx5_qp_debugfs_cleanup(dev);
}
@@ -670,3 +711,20 @@ int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
}
EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
+
+struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev,
+ int res_num,
+ enum mlx5_res_type res_type)
+{
+ u32 rsn = res_num | (res_type << MLX5_USER_INDEX_LEN);
+ struct mlx5_qp_table *table = &dev->priv.qp_table;
+
+ return mlx5_get_rsc(table, rsn);
+}
+EXPORT_SYMBOL_GPL(mlx5_core_res_hold);
+
+void mlx5_core_res_put(struct mlx5_core_rsc_common *res)
+{
+ mlx5_core_put_rsc(res);
+}
+EXPORT_SYMBOL_GPL(mlx5_core_res_put);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index a0674962f02c..6e178030d8fb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -216,20 +216,10 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!mlx5_core_is_pf(dev))
return -EPERM;
- if (num_vfs) {
- int ret;
-
- ret = mlx5_lag_forbid(dev);
- if (ret && (ret != -ENODEV))
- return ret;
- }
-
- if (num_vfs) {
+ if (num_vfs)
err = mlx5_sriov_enable(pdev, num_vfs);
- } else {
+ else
mlx5_sriov_disable(pdev);
- mlx5_lag_allow(dev);
- }
return err ? err : num_vfs;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
deleted file mode 100644
index 6a6fc9be01e6..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ /dev/null
@@ -1,716 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
-#include <linux/mlx5/srq.h>
-#include <rdma/ib_verbs.h>
-#include "mlx5_core.h"
-#include <linux/mlx5/transobj.h>
-
-void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
-{
- struct mlx5_srq_table *table = &dev->priv.srq_table;
- struct mlx5_core_srq *srq;
-
- spin_lock(&table->lock);
-
- srq = radix_tree_lookup(&table->tree, srqn);
- if (srq)
- atomic_inc(&srq->refcount);
-
- spin_unlock(&table->lock);
-
- if (!srq) {
- mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn);
- return;
- }
-
- srq->event(srq, event_type);
-
- if (atomic_dec_and_test(&srq->refcount))
- complete(&srq->free);
-}
-
-static int get_pas_size(struct mlx5_srq_attr *in)
-{
- u32 log_page_size = in->log_page_size + 12;
- u32 log_srq_size = in->log_size;
- u32 log_rq_stride = in->wqe_shift;
- u32 page_offset = in->page_offset;
- u32 po_quanta = 1 << (log_page_size - 6);
- u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
- u32 page_size = 1 << log_page_size;
- u32 rq_sz_po = rq_sz + (page_offset * po_quanta);
- u32 rq_num_pas = DIV_ROUND_UP(rq_sz_po, page_size);
-
- return rq_num_pas * sizeof(u64);
-}
-
-static void set_wq(void *wq, struct mlx5_srq_attr *in)
-{
- MLX5_SET(wq, wq, wq_signature, !!(in->flags
- & MLX5_SRQ_FLAG_WQ_SIG));
- MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size);
- MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4);
- MLX5_SET(wq, wq, log_wq_sz, in->log_size);
- MLX5_SET(wq, wq, page_offset, in->page_offset);
- MLX5_SET(wq, wq, lwm, in->lwm);
- MLX5_SET(wq, wq, pd, in->pd);
- MLX5_SET64(wq, wq, dbr_addr, in->db_record);
-}
-
-static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
-{
- MLX5_SET(srqc, srqc, wq_signature, !!(in->flags
- & MLX5_SRQ_FLAG_WQ_SIG));
- MLX5_SET(srqc, srqc, log_page_size, in->log_page_size);
- MLX5_SET(srqc, srqc, log_rq_stride, in->wqe_shift);
- MLX5_SET(srqc, srqc, log_srq_size, in->log_size);
- MLX5_SET(srqc, srqc, page_offset, in->page_offset);
- MLX5_SET(srqc, srqc, lwm, in->lwm);
- MLX5_SET(srqc, srqc, pd, in->pd);
- MLX5_SET64(srqc, srqc, dbr_addr, in->db_record);
- MLX5_SET(srqc, srqc, xrcd, in->xrcd);
- MLX5_SET(srqc, srqc, cqn, in->cqn);
-}
-
-static void get_wq(void *wq, struct mlx5_srq_attr *in)
-{
- if (MLX5_GET(wq, wq, wq_signature))
- in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
- in->log_page_size = MLX5_GET(wq, wq, log_wq_pg_sz);
- in->wqe_shift = MLX5_GET(wq, wq, log_wq_stride) - 4;
- in->log_size = MLX5_GET(wq, wq, log_wq_sz);
- in->page_offset = MLX5_GET(wq, wq, page_offset);
- in->lwm = MLX5_GET(wq, wq, lwm);
- in->pd = MLX5_GET(wq, wq, pd);
- in->db_record = MLX5_GET64(wq, wq, dbr_addr);
-}
-
-static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
-{
- if (MLX5_GET(srqc, srqc, wq_signature))
- in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
- in->log_page_size = MLX5_GET(srqc, srqc, log_page_size);
- in->wqe_shift = MLX5_GET(srqc, srqc, log_rq_stride);
- in->log_size = MLX5_GET(srqc, srqc, log_srq_size);
- in->page_offset = MLX5_GET(srqc, srqc, page_offset);
- in->lwm = MLX5_GET(srqc, srqc, lwm);
- in->pd = MLX5_GET(srqc, srqc, pd);
- in->db_record = MLX5_GET64(srqc, srqc, dbr_addr);
-}
-
-struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
-{
- struct mlx5_srq_table *table = &dev->priv.srq_table;
- struct mlx5_core_srq *srq;
-
- spin_lock(&table->lock);
-
- srq = radix_tree_lookup(&table->tree, srqn);
- if (srq)
- atomic_inc(&srq->refcount);
-
- spin_unlock(&table->lock);
-
- return srq;
-}
-EXPORT_SYMBOL(mlx5_core_get_srq);
-
-static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_srq_attr *in)
-{
- u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
- void *create_in;
- void *srqc;
- void *pas;
- int pas_size;
- int inlen;
- int err;
-
- pas_size = get_pas_size(in);
- inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
- create_in = kvzalloc(inlen, GFP_KERNEL);
- if (!create_in)
- return -ENOMEM;
-
- MLX5_SET(create_srq_in, create_in, uid, in->uid);
- srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
- pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
-
- set_srqc(srqc, in);
- memcpy(pas, in->pas, pas_size);
-
- MLX5_SET(create_srq_in, create_in, opcode,
- MLX5_CMD_OP_CREATE_SRQ);
-
- err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
- sizeof(create_out));
- kvfree(create_in);
- if (!err) {
- srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
- srq->uid = in->uid;
- }
-
- return err;
-}
-
-static int destroy_srq_cmd(struct mlx5_core_dev *dev,
- struct mlx5_core_srq *srq)
-{
- u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
- u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
-
- MLX5_SET(destroy_srq_in, srq_in, opcode,
- MLX5_CMD_OP_DESTROY_SRQ);
- MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
- MLX5_SET(destroy_srq_in, srq_in, uid, srq->uid);
-
- return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
- srq_out, sizeof(srq_out));
-}
-
-static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- u16 lwm, int is_srq)
-{
- u32 srq_in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
- u32 srq_out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
-
- MLX5_SET(arm_rq_in, srq_in, opcode, MLX5_CMD_OP_ARM_RQ);
- MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
- MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn);
- MLX5_SET(arm_rq_in, srq_in, lwm, lwm);
- MLX5_SET(arm_rq_in, srq_in, uid, srq->uid);
-
- return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
- srq_out, sizeof(srq_out));
-}
-
-static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_srq_attr *out)
-{
- u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
- u32 *srq_out;
- void *srqc;
- int err;
-
- srq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_srq_out), GFP_KERNEL);
- if (!srq_out)
- return -ENOMEM;
-
- MLX5_SET(query_srq_in, srq_in, opcode,
- MLX5_CMD_OP_QUERY_SRQ);
- MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
- err = mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
- srq_out, MLX5_ST_SZ_BYTES(query_srq_out));
- if (err)
- goto out;
-
- srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
- get_srqc(srqc, out);
- if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
- out->flags |= MLX5_SRQ_FLAG_ERR;
-out:
- kvfree(srq_out);
- return err;
-}
-
-static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
- struct mlx5_core_srq *srq,
- struct mlx5_srq_attr *in)
-{
- u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
- void *create_in;
- void *xrc_srqc;
- void *pas;
- int pas_size;
- int inlen;
- int err;
-
- pas_size = get_pas_size(in);
- inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
- create_in = kvzalloc(inlen, GFP_KERNEL);
- if (!create_in)
- return -ENOMEM;
-
- MLX5_SET(create_xrc_srq_in, create_in, uid, in->uid);
- xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
- xrc_srq_context_entry);
- pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
-
- set_srqc(xrc_srqc, in);
- MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
- memcpy(pas, in->pas, pas_size);
- MLX5_SET(create_xrc_srq_in, create_in, opcode,
- MLX5_CMD_OP_CREATE_XRC_SRQ);
-
- memset(create_out, 0, sizeof(create_out));
- err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
- sizeof(create_out));
- if (err)
- goto out;
-
- srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
- srq->uid = in->uid;
-out:
- kvfree(create_in);
- return err;
-}
-
-static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
- struct mlx5_core_srq *srq)
-{
- u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0};
- u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
-
- MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
- MLX5_CMD_OP_DESTROY_XRC_SRQ);
- MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
- MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, uid, srq->uid);
-
- return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
- xrcsrq_out, sizeof(xrcsrq_out));
-}
-
-static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
- struct mlx5_core_srq *srq, u16 lwm)
-{
- u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
- u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
-
- MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
- MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
- MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
- MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
- MLX5_SET(arm_xrc_srq_in, xrcsrq_in, uid, srq->uid);
-
- return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
- xrcsrq_out, sizeof(xrcsrq_out));
-}
-
-static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
- struct mlx5_core_srq *srq,
- struct mlx5_srq_attr *out)
-{
- u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
- u32 *xrcsrq_out;
- void *xrc_srqc;
- int err;
-
- xrcsrq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out), GFP_KERNEL);
- if (!xrcsrq_out)
- return -ENOMEM;
- memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
-
- MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
- MLX5_CMD_OP_QUERY_XRC_SRQ);
- MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
-
- err = mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), xrcsrq_out,
- MLX5_ST_SZ_BYTES(query_xrc_srq_out));
- if (err)
- goto out;
-
- xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
- xrc_srq_context_entry);
- get_srqc(xrc_srqc, out);
- if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
- out->flags |= MLX5_SRQ_FLAG_ERR;
-
-out:
- kvfree(xrcsrq_out);
- return err;
-}
-
-static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_srq_attr *in)
-{
- void *create_in;
- void *rmpc;
- void *wq;
- int pas_size;
- int inlen;
- int err;
-
- pas_size = get_pas_size(in);
- inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
- create_in = kvzalloc(inlen, GFP_KERNEL);
- if (!create_in)
- return -ENOMEM;
-
- rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
- wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
-
- MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
- MLX5_SET(create_rmp_in, create_in, uid, in->uid);
- set_wq(wq, in);
- memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
-
- err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
- if (!err)
- srq->uid = in->uid;
-
- kvfree(create_in);
- return err;
-}
-
-static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
- struct mlx5_core_srq *srq)
-{
- u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
- u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {};
-
- MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
- MLX5_SET(destroy_rmp_in, in, rmpn, srq->srqn);
- MLX5_SET(destroy_rmp_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-static int arm_rmp_cmd(struct mlx5_core_dev *dev,
- struct mlx5_core_srq *srq,
- u16 lwm)
-{
- void *in;
- void *rmpc;
- void *wq;
- void *bitmask;
- int err;
-
- in = kvzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in), GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
- bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
- wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
-
- MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
- MLX5_SET(modify_rmp_in, in, rmpn, srq->srqn);
- MLX5_SET(modify_rmp_in, in, uid, srq->uid);
- MLX5_SET(wq, wq, lwm, lwm);
- MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
- MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
-
- err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
-
- kvfree(in);
- return err;
-}
-
-static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_srq_attr *out)
-{
- u32 *rmp_out;
- void *rmpc;
- int err;
-
- rmp_out = kvzalloc(MLX5_ST_SZ_BYTES(query_rmp_out), GFP_KERNEL);
- if (!rmp_out)
- return -ENOMEM;
-
- err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
- if (err)
- goto out;
-
- rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
- get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
- if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
- out->flags |= MLX5_SRQ_FLAG_ERR;
-
-out:
- kvfree(rmp_out);
- return err;
-}
-
-static int create_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_srq_attr *in)
-{
- u32 create_out[MLX5_ST_SZ_DW(create_xrq_out)] = {0};
- void *create_in;
- void *xrqc;
- void *wq;
- int pas_size;
- int inlen;
- int err;
-
- pas_size = get_pas_size(in);
- inlen = MLX5_ST_SZ_BYTES(create_xrq_in) + pas_size;
- create_in = kvzalloc(inlen, GFP_KERNEL);
- if (!create_in)
- return -ENOMEM;
-
- xrqc = MLX5_ADDR_OF(create_xrq_in, create_in, xrq_context);
- wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
-
- set_wq(wq, in);
- memcpy(MLX5_ADDR_OF(xrqc, xrqc, wq.pas), in->pas, pas_size);
-
- if (in->type == IB_SRQT_TM) {
- MLX5_SET(xrqc, xrqc, topology, MLX5_XRQC_TOPOLOGY_TAG_MATCHING);
- if (in->flags & MLX5_SRQ_FLAG_RNDV)
- MLX5_SET(xrqc, xrqc, offload, MLX5_XRQC_OFFLOAD_RNDV);
- MLX5_SET(xrqc, xrqc,
- tag_matching_topology_context.log_matching_list_sz,
- in->tm_log_list_size);
- }
- MLX5_SET(xrqc, xrqc, user_index, in->user_index);
- MLX5_SET(xrqc, xrqc, cqn, in->cqn);
- MLX5_SET(create_xrq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRQ);
- MLX5_SET(create_xrq_in, create_in, uid, in->uid);
- err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
- sizeof(create_out));
- kvfree(create_in);
- if (!err) {
- srq->srqn = MLX5_GET(create_xrq_out, create_out, xrqn);
- srq->uid = in->uid;
- }
-
- return err;
-}
-
-static int destroy_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
-{
- u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_xrq_out)] = {0};
-
- MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ);
- MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
- MLX5_SET(destroy_xrq_in, in, uid, srq->uid);
-
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-static int arm_xrq_cmd(struct mlx5_core_dev *dev,
- struct mlx5_core_srq *srq,
- u16 lwm)
-{
- u32 out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
-
- MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
- MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ);
- MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
- MLX5_SET(arm_rq_in, in, lwm, lwm);
- MLX5_SET(arm_rq_in, in, uid, srq->uid);
-
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-static int query_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_srq_attr *out)
-{
- u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {0};
- u32 *xrq_out;
- int outlen = MLX5_ST_SZ_BYTES(query_xrq_out);
- void *xrqc;
- int err;
-
- xrq_out = kvzalloc(outlen, GFP_KERNEL);
- if (!xrq_out)
- return -ENOMEM;
-
- MLX5_SET(query_xrq_in, in, opcode, MLX5_CMD_OP_QUERY_XRQ);
- MLX5_SET(query_xrq_in, in, xrqn, srq->srqn);
-
- err = mlx5_cmd_exec(dev, in, sizeof(in), xrq_out, outlen);
- if (err)
- goto out;
-
- xrqc = MLX5_ADDR_OF(query_xrq_out, xrq_out, xrq_context);
- get_wq(MLX5_ADDR_OF(xrqc, xrqc, wq), out);
- if (MLX5_GET(xrqc, xrqc, state) != MLX5_XRQC_STATE_GOOD)
- out->flags |= MLX5_SRQ_FLAG_ERR;
- out->tm_next_tag =
- MLX5_GET(xrqc, xrqc,
- tag_matching_topology_context.append_next_index);
- out->tm_hw_phase_cnt =
- MLX5_GET(xrqc, xrqc,
- tag_matching_topology_context.hw_phase_cnt);
- out->tm_sw_phase_cnt =
- MLX5_GET(xrqc, xrqc,
- tag_matching_topology_context.sw_phase_cnt);
-
-out:
- kvfree(xrq_out);
- return err;
-}
-
-static int create_srq_split(struct mlx5_core_dev *dev,
- struct mlx5_core_srq *srq,
- struct mlx5_srq_attr *in)
-{
- if (!dev->issi)
- return create_srq_cmd(dev, srq, in);
- switch (srq->common.res) {
- case MLX5_RES_XSRQ:
- return create_xrc_srq_cmd(dev, srq, in);
- case MLX5_RES_XRQ:
- return create_xrq_cmd(dev, srq, in);
- default:
- return create_rmp_cmd(dev, srq, in);
- }
-}
-
-static int destroy_srq_split(struct mlx5_core_dev *dev,
- struct mlx5_core_srq *srq)
-{
- if (!dev->issi)
- return destroy_srq_cmd(dev, srq);
- switch (srq->common.res) {
- case MLX5_RES_XSRQ:
- return destroy_xrc_srq_cmd(dev, srq);
- case MLX5_RES_XRQ:
- return destroy_xrq_cmd(dev, srq);
- default:
- return destroy_rmp_cmd(dev, srq);
- }
-}
-
-int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_srq_attr *in)
-{
- int err;
- struct mlx5_srq_table *table = &dev->priv.srq_table;
-
- switch (in->type) {
- case IB_SRQT_XRC:
- srq->common.res = MLX5_RES_XSRQ;
- break;
- case IB_SRQT_TM:
- srq->common.res = MLX5_RES_XRQ;
- break;
- default:
- srq->common.res = MLX5_RES_SRQ;
- }
-
- err = create_srq_split(dev, srq, in);
- if (err)
- return err;
-
- atomic_set(&srq->refcount, 1);
- init_completion(&srq->free);
-
- spin_lock_irq(&table->lock);
- err = radix_tree_insert(&table->tree, srq->srqn, srq);
- spin_unlock_irq(&table->lock);
- if (err) {
- mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
- goto err_destroy_srq_split;
- }
-
- return 0;
-
-err_destroy_srq_split:
- destroy_srq_split(dev, srq);
-
- return err;
-}
-EXPORT_SYMBOL(mlx5_core_create_srq);
-
-int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
-{
- struct mlx5_srq_table *table = &dev->priv.srq_table;
- struct mlx5_core_srq *tmp;
- int err;
-
- spin_lock_irq(&table->lock);
- tmp = radix_tree_delete(&table->tree, srq->srqn);
- spin_unlock_irq(&table->lock);
- if (!tmp) {
- mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn);
- return -EINVAL;
- }
- if (tmp != srq) {
- mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn);
- return -EINVAL;
- }
-
- err = destroy_srq_split(dev, srq);
- if (err)
- return err;
-
- if (atomic_dec_and_test(&srq->refcount))
- complete(&srq->free);
- wait_for_completion(&srq->free);
-
- return 0;
-}
-EXPORT_SYMBOL(mlx5_core_destroy_srq);
-
-int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_srq_attr *out)
-{
- if (!dev->issi)
- return query_srq_cmd(dev, srq, out);
- switch (srq->common.res) {
- case MLX5_RES_XSRQ:
- return query_xrc_srq_cmd(dev, srq, out);
- case MLX5_RES_XRQ:
- return query_xrq_cmd(dev, srq, out);
- default:
- return query_rmp_cmd(dev, srq, out);
- }
-}
-EXPORT_SYMBOL(mlx5_core_query_srq);
-
-int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- u16 lwm, int is_srq)
-{
- if (!dev->issi)
- return arm_srq_cmd(dev, srq, lwm, is_srq);
- switch (srq->common.res) {
- case MLX5_RES_XSRQ:
- return arm_xrc_srq_cmd(dev, srq, lwm);
- case MLX5_RES_XRQ:
- return arm_xrq_cmd(dev, srq, lwm);
- default:
- return arm_rmp_cmd(dev, srq, lwm);
- }
-}
-EXPORT_SYMBOL(mlx5_core_arm_srq);
-
-void mlx5_init_srq_table(struct mlx5_core_dev *dev)
-{
- struct mlx5_srq_table *table = &dev->priv.srq_table;
-
- memset(table, 0, sizeof(*table));
- spin_lock_init(&table->lock);
- INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
-}
-
-void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev)
-{
- /* nothing */
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index a1ee9a8a769e..c4d4b76096dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -258,115 +258,6 @@ void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
}
EXPORT_SYMBOL(mlx5_core_destroy_tis);
-int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *rmpn)
-{
- u32 out[MLX5_ST_SZ_DW(create_rmp_out)] = {0};
- int err;
-
- MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP);
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
- if (!err)
- *rmpn = MLX5_GET(create_rmp_out, out, rmpn);
-
- return err;
-}
-
-int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen)
-{
- u32 out[MLX5_ST_SZ_DW(modify_rmp_out)] = {0};
-
- MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
- return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-}
-
-int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn)
-{
- u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {0};
-
- MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
- MLX5_SET(destroy_rmp_in, in, rmpn, rmpn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out,
- sizeof(out));
-}
-
-int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out)
-{
- u32 in[MLX5_ST_SZ_DW(query_rmp_in)] = {0};
- int outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
-
- MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP);
- MLX5_SET(query_rmp_in, in, rmpn, rmpn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
-}
-
-int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
-{
- void *in;
- void *rmpc;
- void *wq;
- void *bitmask;
- int err;
-
- in = kvzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in), GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
- bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
- wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
-
- MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
- MLX5_SET(modify_rmp_in, in, rmpn, rmpn);
- MLX5_SET(wq, wq, lwm, lwm);
- MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
- MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
-
- err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
-
- kvfree(in);
-
- return err;
-}
-
-int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *xsrqn)
-{
- u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)] = {0};
- int err;
-
- MLX5_SET(create_xrc_srq_in, in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ);
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
- if (!err)
- *xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
-
- return err;
-}
-
-int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn)
-{
- u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
-
- MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
- MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
-{
- u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
-
- MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
- MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn);
- MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
- MLX5_SET(arm_xrc_srq_in, in, op_mod,
- MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rqtn)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index cfbea66b4879..9b150ce9d315 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -1204,9 +1204,19 @@ EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport);
u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
{
- if (!mdev->sys_image_guid)
- mlx5_query_nic_vport_system_image_guid(mdev, &mdev->sys_image_guid);
+ int port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ u64 tmp = 0;
- return mdev->sys_image_guid;
+ if (mdev->sys_image_guid)
+ return mdev->sys_image_guid;
+
+ if (port_type_cap == MLX5_CAP_PORT_TYPE_ETH)
+ mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
+ else
+ mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
+
+ mdev->sys_image_guid = tmp;
+
+ return tmp;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 2dcbf1ebfd6a..953cc8efba69 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -155,7 +155,8 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
- u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) + 6;
+ /* CQE_STRIDE_128 and CQE_STRIDE_128_PAD both mean 128B stride */
+ u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) == CQE_STRIDE_64 ? 6 : 7;
u8 log_wq_sz = MLX5_GET(cqc, cqc, log_cq_size);
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index b1293d153a58..ea934a48c90a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -177,9 +177,14 @@ static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
return mlx5_cqwq_ctr2ix(wq, wq->cc);
}
-static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
+static inline struct mlx5_cqe64 *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
{
- return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
+ struct mlx5_cqe64 *cqe = mlx5_frag_buf_get_wqe(&wq->fbc, ix);
+
+ /* For 128B CQEs the data is in the last 64B */
+ cqe += wq->fbc.log_stride == 7;
+
+ return cqe;
}
static inline u32 mlx5_cqwq_get_ctr_wrap_cnt(struct mlx5_cqwq *wq, u32 ctr)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 8a291eb36c64..080ddd1942ec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -80,6 +80,7 @@ config MLXSW_SPECTRUM
depends on IPV6_GRE || IPV6_GRE=n
select GENERIC_ALLOCATOR
select PARMAN
+ select OBJAGG
select MLXFW
default m
---help---
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 1f77e97e2d7a..bbf45f10c208 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -20,7 +20,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_acl_tcam.o spectrum_acl_ctcam.o \
spectrum_acl_atcam.o spectrum_acl_erp.o \
spectrum1_acl_tcam.o spectrum2_acl_tcam.o \
- spectrum_acl.o \
+ spectrum_acl_bloom_filter.o spectrum_acl.o \
spectrum_flower.o spectrum_cnt.o \
spectrum_fid.o spectrum_ipip.o \
spectrum_acl_flex_actions.o \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f7154f358f27..ddedf8ab5b64 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -970,10 +970,11 @@ static const struct devlink_ops mlxsw_devlink_ops = {
.sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
};
-int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
- const struct mlxsw_bus *mlxsw_bus,
- void *bus_priv, bool reload,
- struct devlink *devlink)
+static int
+__mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+ const struct mlxsw_bus *mlxsw_bus,
+ void *bus_priv, bool reload,
+ struct devlink *devlink)
{
const char *device_kind = mlxsw_bus_info->device_kind;
struct mlxsw_core *mlxsw_core;
@@ -1040,6 +1041,12 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_devlink_register;
}
+ if (mlxsw_driver->params_register && !reload) {
+ err = mlxsw_driver->params_register(mlxsw_core);
+ if (err)
+ goto err_register_params;
+ }
+
err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
if (err)
goto err_hwmon_init;
@@ -1062,6 +1069,9 @@ err_driver_init:
err_thermal_init:
mlxsw_hwmon_fini(mlxsw_core->hwmon);
err_hwmon_init:
+ if (mlxsw_driver->params_unregister && !reload)
+ mlxsw_driver->params_unregister(mlxsw_core);
+err_register_params:
if (!reload)
devlink_unregister(devlink);
err_devlink_register:
@@ -1081,6 +1091,29 @@ err_bus_init:
err_devlink_alloc:
return err;
}
+
+int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+ const struct mlxsw_bus *mlxsw_bus,
+ void *bus_priv, bool reload,
+ struct devlink *devlink)
+{
+ bool called_again = false;
+ int err;
+
+again:
+ err = __mlxsw_core_bus_device_register(mlxsw_bus_info, mlxsw_bus,
+ bus_priv, reload, devlink);
+ /* -EAGAIN is returned in case the FW was updated. FW needs
+ * a reset, so lets try to call __mlxsw_core_bus_device_register()
+ * again.
+ */
+ if (err == -EAGAIN && !called_again) {
+ called_again = true;
+ goto again;
+ }
+
+ return err;
+}
EXPORT_SYMBOL(mlxsw_core_bus_device_register);
void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
@@ -1102,6 +1135,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
mlxsw_core->driver->fini(mlxsw_core);
mlxsw_thermal_fini(mlxsw_core->thermal);
mlxsw_hwmon_fini(mlxsw_core->hwmon);
+ if (mlxsw_core->driver->params_unregister && !reload)
+ mlxsw_core->driver->params_unregister(mlxsw_core);
if (!reload)
devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
@@ -1114,6 +1149,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
return;
reload_fail_deinit:
+ if (mlxsw_core->driver->params_unregister)
+ mlxsw_core->driver->params_unregister(mlxsw_core);
devlink_unregister(devlink);
devlink_resources_unregister(devlink, NULL);
devlink_free(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index c4e4971764e5..4e114f35ee0d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -282,6 +282,8 @@ struct mlxsw_driver {
const struct mlxsw_config_profile *profile,
u64 *p_single_size, u64 *p_double_size,
u64 *p_linear_size);
+ int (*params_register)(struct mlxsw_core *mlxsw_core);
+ void (*params_unregister)(struct mlxsw_core *mlxsw_core);
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
bool res_query_enabled;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index 785bf01fe2be..df78d23b3ec3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -426,15 +426,17 @@ mlxsw_sp_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values,
- char *key, char *mask, int block_start, int block_end)
+ char *key, char *mask)
{
+ unsigned int blocks_count =
+ mlxsw_afk_key_info_blocks_count_get(key_info);
char block_mask[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
char block_key[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
const struct mlxsw_afk_element_inst *elinst;
enum mlxsw_afk_element element;
int block_index, i;
- for (i = block_start; i <= block_end; i++) {
+ for (i = 0; i < blocks_count; i++) {
memset(block_key, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
memset(block_mask, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
@@ -451,10 +453,18 @@ void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
values->storage.mask);
}
- if (key)
- mlxsw_afk->ops->encode_block(block_key, i, key);
- if (mask)
- mlxsw_afk->ops->encode_block(block_mask, i, mask);
+ mlxsw_afk->ops->encode_block(key, i, block_key);
+ mlxsw_afk->ops->encode_block(mask, i, block_mask);
}
}
EXPORT_SYMBOL(mlxsw_afk_encode);
+
+void mlxsw_afk_clear(struct mlxsw_afk *mlxsw_afk, char *key,
+ int block_start, int block_end)
+{
+ int i;
+
+ for (i = block_start; i <= block_end; i++)
+ mlxsw_afk->ops->clear_block(key, i);
+}
+EXPORT_SYMBOL(mlxsw_afk_clear);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index c29c045d826d..4a625cdf3e7c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -33,6 +33,8 @@ enum mlxsw_afk_element {
MLXSW_AFK_ELEMENT_IP_TTL_,
MLXSW_AFK_ELEMENT_IP_ECN,
MLXSW_AFK_ELEMENT_IP_DSCP,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7,
MLXSW_AFK_ELEMENT_MAX,
};
@@ -87,6 +89,8 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
+ MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_8_10, 0x18, 17, 3),
+ MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_0_7, 0x18, 20, 8),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4),
@@ -188,7 +192,8 @@ struct mlxsw_afk;
struct mlxsw_afk_ops {
const struct mlxsw_afk_block *blocks;
unsigned int blocks_count;
- void (*encode_block)(char *block, int block_index, char *output);
+ void (*encode_block)(char *output, int block_index, char *block);
+ void (*clear_block)(char *output, int block_index);
};
struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
@@ -228,6 +233,8 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values,
- char *key, char *mask, int block_start, int block_end);
+ char *key, char *mask);
+void mlxsw_afk_clear(struct mlxsw_afk *mlxsw_afk, char *key,
+ int block_start, int block_end);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 6d29dc428608..61f897b40f82 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -16,6 +16,15 @@
#define MLXSW_THERMAL_MAX_TEMP 110000 /* 110C */
#define MLXSW_THERMAL_MAX_STATE 10
#define MLXSW_THERMAL_MAX_DUTY 255
+/* Minimum and maximum fan allowed speed in percent: from 20% to 100%. Values
+ * MLXSW_THERMAL_MAX_STATE + x, where x is between 2 and 10 are used for
+ * setting fan speed dynamic minimum. For example, if value is set to 14 (40%)
+ * cooling levels vector will be set to 4, 4, 4, 4, 4, 5, 6, 7, 8, 9, 10 to
+ * introduce PWM speed in percent: 40, 40, 40, 40, 40, 50, 60. 70, 80, 90, 100.
+ */
+#define MLXSW_THERMAL_SPEED_MIN (MLXSW_THERMAL_MAX_STATE + 2)
+#define MLXSW_THERMAL_SPEED_MAX (MLXSW_THERMAL_MAX_STATE * 2)
+#define MLXSW_THERMAL_SPEED_MIN_LEVEL 2 /* 20% */
struct mlxsw_thermal_trip {
int type;
@@ -68,6 +77,7 @@ struct mlxsw_thermal {
const struct mlxsw_bus_info *bus_info;
struct thermal_zone_device *tzdev;
struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
+ u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1];
struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
enum thermal_device_mode mode;
};
@@ -285,12 +295,51 @@ static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev,
struct mlxsw_thermal *thermal = cdev->devdata;
struct device *dev = thermal->bus_info->dev;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
- int err, idx;
+ unsigned long cur_state, i;
+ int idx;
+ u8 duty;
+ int err;
idx = mlxsw_get_cooling_device_idx(thermal, cdev);
if (idx < 0)
return idx;
+ /* Verify if this request is for changing allowed fan dynamical
+ * minimum. If it is - update cooling levels accordingly and update
+ * state, if current state is below the newly requested minimum state.
+ * For example, if current state is 5, and minimal state is to be
+ * changed from 4 to 6, thermal->cooling_levels[0 to 5] will be changed
+ * all from 4 to 6. And state 5 (thermal->cooling_levels[4]) should be
+ * overwritten.
+ */
+ if (state >= MLXSW_THERMAL_SPEED_MIN &&
+ state <= MLXSW_THERMAL_SPEED_MAX) {
+ state -= MLXSW_THERMAL_MAX_STATE;
+ for (i = 0; i <= MLXSW_THERMAL_MAX_STATE; i++)
+ thermal->cooling_levels[i] = max(state, i);
+
+ mlxsw_reg_mfsc_pack(mfsc_pl, idx, 0);
+ err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
+ if (err)
+ return err;
+
+ duty = mlxsw_reg_mfsc_pwm_duty_cycle_get(mfsc_pl);
+ cur_state = mlxsw_duty_to_state(duty);
+
+ /* If current fan state is lower than requested dynamical
+ * minimum, increase fan speed up to dynamical minimum.
+ */
+ if (state < cur_state)
+ return 0;
+
+ state = cur_state;
+ }
+
+ if (state > MLXSW_THERMAL_MAX_STATE)
+ return -EINVAL;
+
+ /* Normalize the state to the valid speed range. */
+ state = thermal->cooling_levels[state];
mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state));
err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
if (err) {
@@ -369,6 +418,11 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
}
}
+ /* Initialize cooling levels per PWM state. */
+ for (i = 0; i < MLXSW_THERMAL_MAX_STATE; i++)
+ thermal->cooling_levels[i] = max(MLXSW_THERMAL_SPEED_MIN_LEVEL,
+ i);
+
thermal->tzdev = thermal_zone_device_register("mlxsw",
MLXSW_THERMAL_NUM_TRIPS,
MLXSW_THERMAL_TRIP_MASK,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 5890fdfd62c3..66b8098c6fd2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1720,7 +1720,6 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const char *driver_name = pdev->driver->name;
struct mlxsw_pci *mlxsw_pci;
- bool called_again = false;
int err;
mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
@@ -1777,18 +1776,10 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->bus_info.dev = &pdev->dev;
mlxsw_pci->id = id;
-again:
err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
&mlxsw_pci_bus, mlxsw_pci, false,
NULL);
- /* -EAGAIN is returned in case the FW was updated. FW needs
- * a reset, so lets try to call mlxsw_core_bus_device_register()
- * again.
- */
- if (err == -EAGAIN && !called_again) {
- called_again = true;
- goto again;
- } else if (err) {
+ if (err) {
dev_err(&pdev->dev, "cannot register bus device\n");
goto err_bus_device_register;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index db3d2790aeec..9b48dffc9f63 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -641,6 +641,10 @@ enum mlxsw_reg_sfn_rec_type {
MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC = 0x7,
/* Aged-out MAC address on a LAG port. */
MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG = 0x8,
+ /* Learned unicast tunnel record. */
+ MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL = 0xD,
+ /* Aged-out unicast tunnel record. */
+ MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL = 0xE,
};
/* reg_sfn_rec_type
@@ -704,6 +708,66 @@ static inline void mlxsw_reg_sfn_mac_lag_unpack(char *payload, int rec_index,
*p_lag_id = mlxsw_reg_sfn_mac_lag_lag_id_get(payload, rec_index);
}
+/* reg_sfn_uc_tunnel_uip_msb
+ * When protocol is IPv4, the most significant byte of the underlay IPv4
+ * address of the remote VTEP.
+ * When protocol is IPv6, reserved.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_uip_msb, MLXSW_REG_SFN_BASE_LEN, 24,
+ 8, MLXSW_REG_SFN_REC_LEN, 0x08, false);
+
+enum mlxsw_reg_sfn_uc_tunnel_protocol {
+ MLXSW_REG_SFN_UC_TUNNEL_PROTOCOL_IPV4,
+ MLXSW_REG_SFN_UC_TUNNEL_PROTOCOL_IPV6,
+};
+
+/* reg_sfn_uc_tunnel_protocol
+ * IP protocol.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_protocol, MLXSW_REG_SFN_BASE_LEN, 27,
+ 1, MLXSW_REG_SFN_REC_LEN, 0x0C, false);
+
+/* reg_sfn_uc_tunnel_uip_lsb
+ * When protocol is IPv4, the least significant bytes of the underlay
+ * IPv4 address of the remote VTEP.
+ * When protocol is IPv6, ipv6_id to be queried from TNIPSD.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_uip_lsb, MLXSW_REG_SFN_BASE_LEN, 0,
+ 24, MLXSW_REG_SFN_REC_LEN, 0x0C, false);
+
+enum mlxsw_reg_sfn_tunnel_port {
+ MLXSW_REG_SFN_TUNNEL_PORT_NVE,
+ MLXSW_REG_SFN_TUNNEL_PORT_VPLS,
+ MLXSW_REG_SFN_TUNNEL_FLEX_TUNNEL0,
+ MLXSW_REG_SFN_TUNNEL_FLEX_TUNNEL1,
+};
+
+/* reg_sfn_uc_tunnel_port
+ * Tunnel port.
+ * Reserved on Spectrum.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, tunnel_port, MLXSW_REG_SFN_BASE_LEN, 0, 4,
+ MLXSW_REG_SFN_REC_LEN, 0x10, false);
+
+static inline void
+mlxsw_reg_sfn_uc_tunnel_unpack(char *payload, int rec_index, char *mac,
+ u16 *p_fid, u32 *p_uip,
+ enum mlxsw_reg_sfn_uc_tunnel_protocol *p_proto)
+{
+ u32 uip_msb, uip_lsb;
+
+ mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac);
+ *p_fid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index);
+ uip_msb = mlxsw_reg_sfn_uc_tunnel_uip_msb_get(payload, rec_index);
+ uip_lsb = mlxsw_reg_sfn_uc_tunnel_uip_lsb_get(payload, rec_index);
+ *p_uip = uip_msb << 24 | uip_lsb;
+ *p_proto = mlxsw_reg_sfn_uc_tunnel_protocol_get(payload, rec_index);
+}
+
/* SPMS - Switch Port MSTP/RSTP State Register
* -------------------------------------------
* Configures the spanning tree state of a physical port.
@@ -2431,6 +2495,43 @@ static inline void mlxsw_reg_pefa_unpack(char *payload, bool *p_a)
*p_a = mlxsw_reg_pefa_a_get(payload);
}
+/* PEMRBT - Policy-Engine Multicast Router Binding Table Register
+ * --------------------------------------------------------------
+ * This register is used for binding Multicast router to an ACL group
+ * that serves the MC router.
+ * This register is not supported by SwitchX/-2 and Spectrum.
+ */
+#define MLXSW_REG_PEMRBT_ID 0x3014
+#define MLXSW_REG_PEMRBT_LEN 0x14
+
+MLXSW_REG_DEFINE(pemrbt, MLXSW_REG_PEMRBT_ID, MLXSW_REG_PEMRBT_LEN);
+
+enum mlxsw_reg_pemrbt_protocol {
+ MLXSW_REG_PEMRBT_PROTO_IPV4,
+ MLXSW_REG_PEMRBT_PROTO_IPV6,
+};
+
+/* reg_pemrbt_protocol
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pemrbt, protocol, 0x00, 0, 1);
+
+/* reg_pemrbt_group_id
+ * ACL group identifier.
+ * Range 0..cap_max_acl_groups-1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pemrbt, group_id, 0x10, 0, 16);
+
+static inline void
+mlxsw_reg_pemrbt_pack(char *payload, enum mlxsw_reg_pemrbt_protocol protocol,
+ u16 group_id)
+{
+ MLXSW_REG_ZERO(pemrbt, payload);
+ mlxsw_reg_pemrbt_protocol_set(payload, protocol);
+ mlxsw_reg_pemrbt_group_id_set(payload, group_id);
+}
+
/* PTCE-V2 - Policy-Engine TCAM Entry Register Version 2
* -----------------------------------------------------
* This register is used for accessing rules within a TCAM region.
@@ -2642,7 +2743,7 @@ mlxsw_reg_perpt_pack(char *payload, u8 erpt_bank, u8 erpt_index,
mlxsw_reg_perpt_erpt_bank_set(payload, erpt_bank);
mlxsw_reg_perpt_erpt_index_set(payload, erpt_index);
mlxsw_reg_perpt_key_size_set(payload, key_size);
- mlxsw_reg_perpt_bf_bypass_set(payload, true);
+ mlxsw_reg_perpt_bf_bypass_set(payload, false);
mlxsw_reg_perpt_erp_id_set(payload, erp_id);
mlxsw_reg_perpt_erpt_base_bank_set(payload, erpt_base_bank);
mlxsw_reg_perpt_erpt_base_index_set(payload, erpt_base_index);
@@ -2834,8 +2935,9 @@ static inline void mlxsw_reg_ptce3_pack(char *payload, bool valid,
u32 priority,
const char *tcam_region_info,
const char *key, u8 erp_id,
- bool large_exists, u32 lkey_id,
- u32 action_pointer)
+ u16 delta_start, u8 delta_mask,
+ u8 delta_value, bool large_exists,
+ u32 lkey_id, u32 action_pointer)
{
MLXSW_REG_ZERO(ptce3, payload);
mlxsw_reg_ptce3_v_set(payload, valid);
@@ -2844,6 +2946,9 @@ static inline void mlxsw_reg_ptce3_pack(char *payload, bool valid,
mlxsw_reg_ptce3_tcam_region_info_memcpy_to(payload, tcam_region_info);
mlxsw_reg_ptce3_flex2_key_blocks_memcpy_to(payload, key);
mlxsw_reg_ptce3_erp_id_set(payload, erp_id);
+ mlxsw_reg_ptce3_delta_start_set(payload, delta_start);
+ mlxsw_reg_ptce3_delta_mask_set(payload, delta_mask);
+ mlxsw_reg_ptce3_delta_value_set(payload, delta_value);
mlxsw_reg_ptce3_large_exists_set(payload, large_exists);
mlxsw_reg_ptce3_large_entry_key_id_set(payload, lkey_id);
mlxsw_reg_ptce3_action_pointer_set(payload, action_pointer);
@@ -2901,7 +3006,7 @@ static inline void mlxsw_reg_percr_pack(char *payload, u16 region_id)
mlxsw_reg_percr_region_id_set(payload, region_id);
mlxsw_reg_percr_atcam_ignore_prune_set(payload, false);
mlxsw_reg_percr_ctcam_ignore_prune_set(payload, false);
- mlxsw_reg_percr_bf_bypass_set(payload, true);
+ mlxsw_reg_percr_bf_bypass_set(payload, false);
}
/* PERERP - Policy-Engine Region eRP Register
@@ -2990,6 +3095,72 @@ static inline void mlxsw_reg_pererp_pack(char *payload, u16 region_id,
mlxsw_reg_pererp_master_rp_id_set(payload, master_rp_id);
}
+/* PEABFE - Policy-Engine Algorithmic Bloom Filter Entries Register
+ * ----------------------------------------------------------------
+ * This register configures the Bloom filter entries.
+ */
+#define MLXSW_REG_PEABFE_ID 0x3022
+#define MLXSW_REG_PEABFE_BASE_LEN 0x10
+#define MLXSW_REG_PEABFE_BF_REC_LEN 0x4
+#define MLXSW_REG_PEABFE_BF_REC_MAX_COUNT 256
+#define MLXSW_REG_PEABFE_LEN (MLXSW_REG_PEABFE_BASE_LEN + \
+ MLXSW_REG_PEABFE_BF_REC_LEN * \
+ MLXSW_REG_PEABFE_BF_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(peabfe, MLXSW_REG_PEABFE_ID, MLXSW_REG_PEABFE_LEN);
+
+/* reg_peabfe_size
+ * Number of BF entries to be updated.
+ * Range 1..256
+ * Access: Op
+ */
+MLXSW_ITEM32(reg, peabfe, size, 0x00, 0, 9);
+
+/* reg_peabfe_bf_entry_state
+ * Bloom filter state
+ * 0 - Clear
+ * 1 - Set
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, peabfe, bf_entry_state,
+ MLXSW_REG_PEABFE_BASE_LEN, 31, 1,
+ MLXSW_REG_PEABFE_BF_REC_LEN, 0x00, false);
+
+/* reg_peabfe_bf_entry_bank
+ * Bloom filter bank ID
+ * Range 0..cap_max_erp_table_banks-1
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, peabfe, bf_entry_bank,
+ MLXSW_REG_PEABFE_BASE_LEN, 24, 4,
+ MLXSW_REG_PEABFE_BF_REC_LEN, 0x00, false);
+
+/* reg_peabfe_bf_entry_index
+ * Bloom filter entry index
+ * Range 0..2^cap_max_bf_log-1
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, peabfe, bf_entry_index,
+ MLXSW_REG_PEABFE_BASE_LEN, 0, 24,
+ MLXSW_REG_PEABFE_BF_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_peabfe_pack(char *payload)
+{
+ MLXSW_REG_ZERO(peabfe, payload);
+}
+
+static inline void mlxsw_reg_peabfe_rec_pack(char *payload, int rec_index,
+ u8 state, u8 bank, u32 bf_index)
+{
+ u8 num_rec = mlxsw_reg_peabfe_size_get(payload);
+
+ if (rec_index >= num_rec)
+ mlxsw_reg_peabfe_size_set(payload, rec_index + 1);
+ mlxsw_reg_peabfe_bf_entry_state_set(payload, rec_index, state);
+ mlxsw_reg_peabfe_bf_entry_bank_set(payload, rec_index, bank);
+ mlxsw_reg_peabfe_bf_entry_index_set(payload, rec_index, bf_index);
+}
+
/* IEDR - Infrastructure Entry Delete Register
* ----------------------------------------------------
* This register is used for deleting entries from the entry tables.
@@ -4231,8 +4402,11 @@ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
enum mlxsw_reg_ppcnt_grp {
MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0,
+ MLXSW_REG_PPCNT_RFC_2863_CNT = 0x1,
MLXSW_REG_PPCNT_RFC_2819_CNT = 0x2,
+ MLXSW_REG_PPCNT_RFC_3635_CNT = 0x3,
MLXSW_REG_PPCNT_EXT_CNT = 0x5,
+ MLXSW_REG_PPCNT_DISCARD_CNT = 0x6,
MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
MLXSW_REG_PPCNT_TC_CNT = 0x11,
MLXSW_REG_PPCNT_TC_CONG_TC = 0x13,
@@ -4247,6 +4421,7 @@ enum mlxsw_reg_ppcnt_grp {
* 0x2: RFC 2819 Counters
* 0x3: RFC 3635 Counters
* 0x5: Ethernet Extended Counters
+ * 0x6: Ethernet Discard Counters
* 0x8: Link Level Retransmission Counters
* 0x10: Per Priority Counters
* 0x11: Per Traffic Class Counters
@@ -4390,8 +4565,46 @@ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64);
+/* Ethernet RFC 2863 Counter Group */
+
+/* reg_ppcnt_if_in_discards
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, if_in_discards,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x10, 0, 64);
+
+/* reg_ppcnt_if_out_discards
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, if_out_discards,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x38, 0, 64);
+
+/* reg_ppcnt_if_out_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, if_out_errors,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64);
+
/* Ethernet RFC 2819 Counter Group */
+/* reg_ppcnt_ether_stats_undersize_pkts
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_undersize_pkts,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x30, 0, 64);
+
+/* reg_ppcnt_ether_stats_oversize_pkts
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_oversize_pkts,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x38, 0, 64);
+
+/* reg_ppcnt_ether_stats_fragments
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_fragments,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64);
+
/* reg_ppcnt_ether_stats_pkts64octets
* Access: RO
*/
@@ -4452,6 +4665,32 @@ MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts4096to8191octets,
MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts8192to10239octets,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0xA0, 0, 64);
+/* Ethernet RFC 3635 Counter Group */
+
+/* reg_ppcnt_dot3stats_fcs_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3stats_fcs_errors,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+
+/* reg_ppcnt_dot3stats_symbol_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3stats_symbol_errors,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
+
+/* reg_ppcnt_dot3control_in_unknown_opcodes
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3control_in_unknown_opcodes,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64);
+
+/* reg_ppcnt_dot3in_pause_frames
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3in_pause_frames,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
+
/* Ethernet Extended Counter Group Counters */
/* reg_ppcnt_ecn_marked
@@ -4460,6 +4699,80 @@ MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts8192to10239octets,
MLXSW_ITEM64(reg, ppcnt, ecn_marked,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+/* Ethernet Discard Counter Group Counters */
+
+/* reg_ppcnt_ingress_general
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_general,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64);
+
+/* reg_ppcnt_ingress_policy_engine
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_policy_engine,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+
+/* reg_ppcnt_ingress_vlan_membership
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_vlan_membership,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x10, 0, 64);
+
+/* reg_ppcnt_ingress_tag_frame_type
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_tag_frame_type,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x18, 0, 64);
+
+/* reg_ppcnt_egress_vlan_membership
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_vlan_membership,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x20, 0, 64);
+
+/* reg_ppcnt_loopback_filter
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, loopback_filter,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x28, 0, 64);
+
+/* reg_ppcnt_egress_general
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_general,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x30, 0, 64);
+
+/* reg_ppcnt_egress_hoq
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_hoq,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64);
+
+/* reg_ppcnt_egress_policy_engine
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_policy_engine,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x50, 0, 64);
+
+/* reg_ppcnt_ingress_tx_link_down
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_tx_link_down,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64);
+
+/* reg_ppcnt_egress_stp_filter
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_stp_filter,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
+
+/* reg_ppcnt_egress_sll
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_sll,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
+
/* Ethernet Per Priority Group Counters */
/* reg_ppcnt_rx_octets
@@ -4862,6 +5175,7 @@ enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR,
};
/* reg_htgt_trap_group
@@ -9357,8 +9671,10 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(ppbs),
MLXSW_REG(prcr),
MLXSW_REG(pefa),
+ MLXSW_REG(pemrbt),
MLXSW_REG(ptce2),
MLXSW_REG(perpt),
+ MLXSW_REG(peabfe),
MLXSW_REG(perar),
MLXSW_REG(ptce3),
MLXSW_REG(percr),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index 99b341539870..b8b3a01c2a9e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -41,6 +41,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB,
MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB,
MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB,
+ MLXSW_RES_ID_ACL_MAX_BF_LOG,
MLXSW_RES_ID_MAX_CPU_POLICERS,
MLXSW_RES_ID_MAX_VRS,
MLXSW_RES_ID_MAX_RIFS,
@@ -93,6 +94,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB] = 0x2951,
[MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB] = 0x2952,
[MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB] = 0x2953,
+ [MLXSW_RES_ID_ACL_MAX_BF_LOG] = 0x2960,
[MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13,
[MLXSW_RES_ID_MAX_VRS] = 0x2C01,
[MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index f84b9c02fcc5..eed1045e4d96 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -45,8 +45,8 @@
#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
#define MLXSW_SP1_FWREV_MAJOR 13
-#define MLXSW_SP1_FWREV_MINOR 1703
-#define MLXSW_SP1_FWREV_SUBMINOR 4
+#define MLXSW_SP1_FWREV_MINOR 1910
+#define MLXSW_SP1_FWREV_SUBMINOR 622
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -65,6 +65,13 @@ static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
static const char mlxsw_sp_driver_version[] = "1.0";
+static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
+};
+static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
+};
+
/* tx_hdr_version
* Tx header version.
* Must be set to 1.
@@ -323,6 +330,7 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev;
const char *fw_filename = mlxsw_sp->fw_filename;
+ union devlink_param_value value;
const struct firmware *firmware;
int err;
@@ -330,6 +338,15 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
if (!req_rev || !fw_filename)
return 0;
+ /* Don't check if devlink 'fw_load_policy' param is 'flash' */
+ err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core),
+ DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
+ &value);
+ if (err)
+ return err;
+ if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)
+ return 0;
+
/* Validate driver & FW are compatible */
if (rev->major != req_rev->major) {
WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
@@ -1123,22 +1140,40 @@ int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
return 0;
}
-static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port)
+static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool flush_default)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
- &mlxsw_sp_port->vlans_list, list)
- mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
+ &mlxsw_sp_port->vlans_list, list) {
+ if (!flush_default &&
+ mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
+ continue;
+ mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
+ }
+}
+
+static void
+mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+{
+ if (mlxsw_sp_port_vlan->bridge_port)
+ mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
+ else if (mlxsw_sp_port_vlan->fid)
+ mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
}
-static struct mlxsw_sp_port_vlan *
+struct mlxsw_sp_port_vlan *
mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- bool untagged = vid == 1;
+ bool untagged = vid == MLXSW_SP_DEFAULT_VID;
int err;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (mlxsw_sp_port_vlan)
+ return ERR_PTR(-EEXIST);
+
err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
if (err)
return ERR_PTR(err);
@@ -1150,7 +1185,6 @@ mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
}
mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
- mlxsw_sp_port_vlan->ref_count = 1;
mlxsw_sp_port_vlan->vid = vid;
list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
@@ -1161,46 +1195,17 @@ err_port_vlan_alloc:
return ERR_PTR(err);
}
-static void
-mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
u16 vid = mlxsw_sp_port_vlan->vid;
+ mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
list_del(&mlxsw_sp_port_vlan->list);
kfree(mlxsw_sp_port_vlan);
mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
}
-struct mlxsw_sp_port_vlan *
-mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
-{
- struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
-
- mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
- if (mlxsw_sp_port_vlan) {
- mlxsw_sp_port_vlan->ref_count++;
- return mlxsw_sp_port_vlan;
- }
-
- return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
-}
-
-void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
-{
- struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
-
- if (--mlxsw_sp_port_vlan->ref_count != 0)
- return;
-
- if (mlxsw_sp_port_vlan->bridge_port)
- mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
- else if (fid)
- mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
-
- mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
-}
-
static int mlxsw_sp_port_add_vid(struct net_device *dev,
__be16 __always_unused proto, u16 vid)
{
@@ -1212,7 +1217,7 @@ static int mlxsw_sp_port_add_vid(struct net_device *dev,
if (!vid)
return 0;
- return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid));
+ return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
}
static int mlxsw_sp_port_kill_vid(struct net_device *dev,
@@ -1230,7 +1235,7 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
if (!mlxsw_sp_port_vlan)
return 0;
- mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
+ mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
return 0;
}
@@ -1342,7 +1347,6 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
__be16 protocol = f->common.protocol;
const struct tc_action *a;
- LIST_HEAD(actions);
int err;
if (!tcf_exts_has_one_action(f->exts)) {
@@ -1881,8 +1885,38 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = {
+ {
+ .str = "if_in_discards",
+ .getter = mlxsw_reg_ppcnt_if_in_discards_get,
+ },
+ {
+ .str = "if_out_discards",
+ .getter = mlxsw_reg_ppcnt_if_out_discards_get,
+ },
+ {
+ .str = "if_out_errors",
+ .getter = mlxsw_reg_ppcnt_if_out_errors_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \
+ ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats)
+
static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = {
{
+ .str = "ether_stats_undersize_pkts",
+ .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get,
+ },
+ {
+ .str = "ether_stats_oversize_pkts",
+ .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get,
+ },
+ {
+ .str = "ether_stats_fragments",
+ .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get,
+ },
+ {
.str = "ether_pkts64octets",
.getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get,
},
@@ -1927,6 +1961,82 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = {
#define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \
ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats)
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = {
+ {
+ .str = "dot3stats_fcs_errors",
+ .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get,
+ },
+ {
+ .str = "dot3stats_symbol_errors",
+ .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get,
+ },
+ {
+ .str = "dot3control_in_unknown_opcodes",
+ .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get,
+ },
+ {
+ .str = "dot3in_pause_frames",
+ .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \
+ ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats)
+
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = {
+ {
+ .str = "discard_ingress_general",
+ .getter = mlxsw_reg_ppcnt_ingress_general_get,
+ },
+ {
+ .str = "discard_ingress_policy_engine",
+ .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get,
+ },
+ {
+ .str = "discard_ingress_vlan_membership",
+ .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get,
+ },
+ {
+ .str = "discard_ingress_tag_frame_type",
+ .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get,
+ },
+ {
+ .str = "discard_egress_vlan_membership",
+ .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get,
+ },
+ {
+ .str = "discard_loopback_filter",
+ .getter = mlxsw_reg_ppcnt_loopback_filter_get,
+ },
+ {
+ .str = "discard_egress_general",
+ .getter = mlxsw_reg_ppcnt_egress_general_get,
+ },
+ {
+ .str = "discard_egress_hoq",
+ .getter = mlxsw_reg_ppcnt_egress_hoq_get,
+ },
+ {
+ .str = "discard_egress_policy_engine",
+ .getter = mlxsw_reg_ppcnt_egress_policy_engine_get,
+ },
+ {
+ .str = "discard_ingress_tx_link_down",
+ .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get,
+ },
+ {
+ .str = "discard_egress_stp_filter",
+ .getter = mlxsw_reg_ppcnt_egress_stp_filter_get,
+ },
+ {
+ .str = "discard_egress_sll",
+ .getter = mlxsw_reg_ppcnt_egress_sll_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \
+ ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats)
+
static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
{
.str = "rx_octets_prio",
@@ -1979,7 +2089,10 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
+ MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \
MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \
+ MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \
+ MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \
(MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \
IEEE_8021QAZ_MAX_TCS) + \
(MLXSW_SP_PORT_HW_TC_STATS_LEN * \
@@ -2020,12 +2133,31 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+
+ for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) {
memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+ for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
mlxsw_sp_port_get_prio_strings(&p, i);
@@ -2068,10 +2200,22 @@ mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
*p_hw_stats = mlxsw_sp_port_hw_stats;
*p_len = MLXSW_SP_PORT_HW_STATS_LEN;
break;
+ case MLXSW_REG_PPCNT_RFC_2863_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats;
+ *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
+ break;
case MLXSW_REG_PPCNT_RFC_2819_CNT:
*p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats;
*p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
break;
+ case MLXSW_REG_PPCNT_RFC_3635_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats;
+ *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
+ break;
+ case MLXSW_REG_PPCNT_DISCARD_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_discard_stats;
+ *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
+ break;
case MLXSW_REG_PPCNT_PRIO_CNT:
*p_hw_stats = mlxsw_sp_port_hw_prio_stats;
*p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
@@ -2121,11 +2265,26 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev,
data, data_index);
data_index = MLXSW_SP_PORT_HW_STATS_LEN;
+ /* RFC 2863 Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
+
/* RFC 2819 Counters */
__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0,
data, data_index);
data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
+ /* RFC 3635 Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
+
+ /* Discard Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
+
/* Per-Priority Counters */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
@@ -2892,7 +3051,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_sp_port->dev = dev;
mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
mlxsw_sp_port->local_port = local_port;
- mlxsw_sp_port->pvid = 1;
+ mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
mlxsw_sp_port->split = split;
mlxsw_sp_port->mapping.module = module;
mlxsw_sp_port->mapping.width = width;
@@ -3031,13 +3190,22 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_nve_init;
}
- mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
+ err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_pvid_set;
+ }
+
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
+ MLXSW_SP_DEFAULT_VID);
if (IS_ERR(mlxsw_sp_port_vlan)) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
mlxsw_sp_port->local_port);
err = PTR_ERR(mlxsw_sp_port_vlan);
- goto err_port_vlan_get;
+ goto err_port_vlan_create;
}
+ mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
mlxsw_sp->ports[local_port] = mlxsw_sp_port;
@@ -3057,8 +3225,9 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
err_register_netdev:
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
- mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
-err_port_vlan_get:
+ mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
+err_port_vlan_create:
+err_port_pvid_set:
mlxsw_sp_port_nve_fini(mlxsw_sp_port);
err_port_nve_init:
mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
@@ -3099,7 +3268,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
- mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
+ mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
mlxsw_sp_port_nve_fini(mlxsw_sp_port);
mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
@@ -3394,10 +3563,10 @@ static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
}
-static void mlxsw_sp_rx_listener_mr_mark_func(struct sk_buff *skb,
+static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
u8 local_port, void *priv)
{
- skb->offload_mr_fwd_mark = 1;
+ skb->offload_l3_fwd_mark = 1;
skb->offload_fwd_mark = 1;
return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
}
@@ -3445,8 +3614,8 @@ out:
MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
_is_ctrl, SP_##_trap_group, DISCARD)
-#define MLXSW_SP_RXL_MR_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
- MLXSW_RXL(mlxsw_sp_rx_listener_mr_mark_func, _trap_id, _action, \
+#define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
+ MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
_is_ctrl, SP_##_trap_group, DISCARD)
#define MLXSW_SP_EVENTL(_func, _trap_id) \
@@ -3479,7 +3648,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
/* L3 traps */
MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
- MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
+ MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false),
MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
false),
@@ -3523,7 +3692,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false),
MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false),
MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
- MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
+ MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
/* NVE traps */
MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false),
MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false),
@@ -3554,6 +3723,7 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR:
rate = 128;
burst_size = 7;
break;
@@ -3639,6 +3809,7 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR:
priority = 1;
tc = 1;
break;
@@ -3841,6 +4012,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_nve_init;
}
+ err = mlxsw_sp_acl_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
+ goto err_acl_init;
+ }
+
err = mlxsw_sp_router_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
@@ -3858,12 +4035,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_netdev_notifier;
}
- err = mlxsw_sp_acl_init(mlxsw_sp);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
- goto err_acl_init;
- }
-
err = mlxsw_sp_dpipe_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
@@ -3881,12 +4052,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
err_ports_create:
mlxsw_sp_dpipe_fini(mlxsw_sp);
err_dpipe_init:
- mlxsw_sp_acl_fini(mlxsw_sp);
-err_acl_init:
unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
err_netdev_notifier:
mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
+ mlxsw_sp_acl_fini(mlxsw_sp);
+err_acl_init:
mlxsw_sp_nve_fini(mlxsw_sp);
err_nve_init:
mlxsw_sp_afa_fini(mlxsw_sp);
@@ -3922,6 +4093,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
+ mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
}
@@ -3937,6 +4109,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
+ mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
}
@@ -3947,9 +4120,9 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_ports_remove(mlxsw_sp);
mlxsw_sp_dpipe_fini(mlxsw_sp);
- mlxsw_sp_acl_fini(mlxsw_sp);
unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
mlxsw_sp_router_fini(mlxsw_sp);
+ mlxsw_sp_acl_fini(mlxsw_sp);
mlxsw_sp_nve_fini(mlxsw_sp);
mlxsw_sp_afa_fini(mlxsw_sp);
mlxsw_sp_counter_pool_fini(mlxsw_sp);
@@ -3962,16 +4135,20 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_kvdl_fini(mlxsw_sp);
}
+/* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
+ * 802.1Q FIDs
+ */
+#define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \
+ VLAN_VID_MASK - 1)
+
static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
.used_max_mid = 1,
.max_mid = MLXSW_SP_MID_MAX,
.used_flood_tables = 1,
.used_flood_mode = 1,
.flood_mode = 3,
- .max_fid_offset_flood_tables = 3,
- .fid_offset_flood_table_size = VLAN_N_VID - 1,
.max_fid_flood_tables = 3,
- .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX,
+ .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
@@ -3994,10 +4171,8 @@ static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
.used_flood_tables = 1,
.used_flood_mode = 1,
.flood_mode = 3,
- .max_fid_offset_flood_tables = 3,
- .fid_offset_flood_table_size = VLAN_N_VID - 1,
.max_fid_flood_tables = 3,
- .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX,
+ .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
@@ -4177,6 +4352,52 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
return 0;
}
+static int
+mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) &&
+ (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) {
+ NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param mlxsw_sp_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL,
+ mlxsw_sp_devlink_param_fw_load_policy_validate),
+};
+
+static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ union devlink_param_value value;
+ int err;
+
+ err = devlink_params_register(devlink, mlxsw_sp_devlink_params,
+ ARRAY_SIZE(mlxsw_sp_devlink_params));
+ if (err)
+ return err;
+
+ value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER;
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
+ value);
+ return 0;
+}
+
+static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core)
+{
+ devlink_params_unregister(priv_to_devlink(mlxsw_core),
+ mlxsw_sp_devlink_params,
+ ARRAY_SIZE(mlxsw_sp_devlink_params));
+}
+
static struct mlxsw_driver mlxsw_sp1_driver = {
.kind = mlxsw_sp1_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
@@ -4198,6 +4419,8 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp1_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
+ .params_register = mlxsw_sp_params_register,
+ .params_unregister = mlxsw_sp_params_unregister,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp1_config_profile,
.res_query_enabled = true,
@@ -4223,6 +4446,8 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
+ .params_register = mlxsw_sp_params_register,
+ .params_unregister = mlxsw_sp_params_unregister,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
.res_query_enabled = true,
@@ -4298,6 +4523,25 @@ void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
dev_put(mlxsw_sp_port->dev);
}
+static void
+mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *lag_dev)
+{
+ struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
+ struct net_device *upper_dev;
+ struct list_head *iter;
+
+ if (netif_is_bridge_port(lag_dev))
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
+
+ netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
+ if (!netif_is_bridge_port(upper_dev))
+ continue;
+ br_dev = netdev_master_upper_dev_get(upper_dev);
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
+ }
+}
+
static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
{
char sldr_pl[MLXSW_REG_SLDR_LEN];
@@ -4425,7 +4669,6 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *lag_dev)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_upper *lag;
u16 lag_id;
u8 port_index;
@@ -4459,9 +4702,8 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
lag->ref_count++;
/* Port is no longer usable as a router interface */
- mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
- if (mlxsw_sp_port_vlan->fid)
- mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
+ if (mlxsw_sp_port->default_vlan->fid)
+ mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
return 0;
@@ -4489,7 +4731,12 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
/* Any VLANs configured on the port are no longer valid */
- mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
+ mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
+ mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
+ /* Make the LAG and its directly linked uppers leave bridges they
+ * are memeber in
+ */
+ mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
if (lag->ref_count == 1)
mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
@@ -4499,9 +4746,8 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->lagged = 0;
lag->ref_count--;
- mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
/* Make sure untagged frames are allowed to ingress */
- mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
}
static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -4579,7 +4825,7 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
if (err)
goto err_port_stp_set;
- err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
true, false);
if (err)
goto err_port_vlan_set;
@@ -4611,7 +4857,7 @@ static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
vid, true);
- mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
false, false);
mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
@@ -4631,6 +4877,30 @@ static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
return num_vxlans > 1;
}
+static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
+{
+ DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
+ struct net_device *dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ u16 pvid;
+ int err;
+
+ if (!netif_is_vxlan(dev))
+ continue;
+
+ err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
+ if (err || !pvid)
+ continue;
+
+ if (test_and_set_bit(pvid, vlans))
+ return false;
+ }
+
+ return true;
+}
+
static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
struct netlink_ext_ack *extack)
{
@@ -4639,13 +4909,15 @@ static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
return false;
}
- if (br_vlan_enabled(br_dev)) {
- NL_SET_ERR_MSG_MOD(extack, "VLAN filtering can not be enabled on a bridge with a VxLAN device");
+ if (!br_vlan_enabled(br_dev) &&
+ mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
return false;
}
- if (mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
- NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
+ if (br_vlan_enabled(br_dev) &&
+ !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
return false;
}
@@ -4719,11 +4991,6 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
return -EINVAL;
}
- if (is_vlan_dev(upper_dev) &&
- vlan_dev_vlan_id(upper_dev) == 1) {
- NL_SET_ERR_MSG_MOD(extack, "Creating a VLAN device with VID 1 is unsupported: VLAN 1 carries untagged traffic");
- return -EINVAL;
- }
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
@@ -4752,6 +5019,16 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
} else if (netif_is_macvlan(upper_dev)) {
if (!info->linking)
mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
+ } else if (is_vlan_dev(upper_dev)) {
+ struct net_device *br_dev;
+
+ if (!netif_is_bridge_port(upper_dev))
+ break;
+ if (info->linking)
+ break;
+ br_dev = netdev_master_upper_dev_get(upper_dev);
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
+ br_dev);
}
break;
}
@@ -4908,6 +5185,48 @@ static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
return 0;
}
+static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
+ struct net_device *br_dev,
+ unsigned long event, void *ptr,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper_dev;
+
+ if (!mlxsw_sp)
+ return 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (!netif_is_macvlan(upper_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+ return -EOPNOTSUPP;
+ }
+ if (!info->linking)
+ break;
+ if (netif_is_macvlan(upper_dev) &&
+ !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+ return -EOPNOTSUPP;
+ }
+ break;
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (info->linking)
+ break;
+ if (netif_is_macvlan(upper_dev))
+ mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
+ break;
+ }
+
+ return 0;
+}
+
static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
unsigned long event, void *ptr)
{
@@ -4921,6 +5240,9 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
real_dev, event,
ptr, vid);
+ else if (netif_is_bridge_master(real_dev))
+ return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
+ event, ptr, vid);
return 0;
}
@@ -5020,10 +5342,21 @@ static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
if (cu_info->linking) {
if (!netif_running(dev))
return 0;
+ /* When the bridge is VLAN-aware, the VNI of the VxLAN
+ * device needs to be mapped to a VLAN, but at this
+ * point no VLANs are configured on the VxLAN device
+ */
+ if (br_vlan_enabled(upper_dev))
+ return 0;
return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
- dev, extack);
+ dev, 0, extack);
} else {
- mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, upper_dev, dev);
+ /* VLANs were already flushed, which triggered the
+ * necessary cleanup
+ */
+ if (br_vlan_enabled(upper_dev))
+ return 0;
+ mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
}
break;
case NETDEV_PRE_UP:
@@ -5034,7 +5367,7 @@ static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
return 0;
if (!mlxsw_sp_lower_get(upper_dev))
return 0;
- return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev,
+ return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
extack);
case NETDEV_DOWN:
upper_dev = netdev_master_upper_dev_get(dev);
@@ -5044,7 +5377,7 @@ static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
return 0;
if (!mlxsw_sp_lower_get(upper_dev))
return 0;
- mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, upper_dev, dev);
+ mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
break;
}
@@ -5075,8 +5408,10 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
event, ptr);
- else if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
- err = mlxsw_sp_netdevice_router_port_event(dev);
+ else if (event == NETDEV_PRE_CHANGEADDR ||
+ event == NETDEV_CHANGEADDR ||
+ event == NETDEV_CHANGEMTU)
+ err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
else if (mlxsw_sp_is_vrf_event(event, ptr))
err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
else if (mlxsw_sp_port_dev_check(dev))
@@ -5097,18 +5432,10 @@ static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
.notifier_call = mlxsw_sp_inetaddr_valid_event,
};
-static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
- .notifier_call = mlxsw_sp_inetaddr_event,
-};
-
static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
.notifier_call = mlxsw_sp_inet6addr_valid_event,
};
-static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = {
- .notifier_call = mlxsw_sp_inet6addr_event,
-};
-
static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
{0, },
@@ -5134,9 +5461,7 @@ static int __init mlxsw_sp_module_init(void)
int err;
register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
- register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
- register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
if (err)
@@ -5163,9 +5488,7 @@ err_sp1_pci_driver_register:
err_sp2_core_driver_register:
mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
err_sp1_core_driver_register:
- unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
- unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
return err;
}
@@ -5176,9 +5499,7 @@ static void __exit mlxsw_sp_module_exit(void)
mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
- unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
- unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 0875a79cbe7b..a1c32a81b011 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -8,6 +8,7 @@
#include <linux/netdevice.h>
#include <linux/rhashtable.h>
#include <linux/bitops.h>
+#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/list.h>
#include <linux/dcbnl.h>
@@ -24,6 +25,8 @@
#include "core_acl_flex_actions.h"
#include "reg.h"
+#define MLXSW_SP_DEFAULT_VID (VLAN_N_VID - 1)
+
#define MLXSW_SP_FID_8021D_MAX 1024
#define MLXSW_SP_MID_MAX 7000
@@ -80,6 +83,10 @@ enum mlxsw_sp_fid_type {
MLXSW_SP_FID_TYPE_MAX,
};
+enum mlxsw_sp_nve_type {
+ MLXSW_SP_NVE_TYPE_VXLAN,
+};
+
struct mlxsw_sp_mid {
struct list_head list;
unsigned char addr[ETH_ALEN];
@@ -127,6 +134,7 @@ struct mlxsw_sp {
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
unsigned char base_mac[ETH_ALEN];
+ const unsigned char *mac_mask;
struct mlxsw_sp_upper *lags;
int *port_to_module;
struct mlxsw_sp_sb *sb;
@@ -184,7 +192,6 @@ struct mlxsw_sp_port_vlan {
struct list_head list;
struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp_fid *fid;
- unsigned int ref_count;
u16 vid;
struct mlxsw_sp_bridge_port *bridge_port;
struct list_head bridge_vlan_node;
@@ -235,6 +242,7 @@ struct mlxsw_sp_port {
} periodic_hw_stats;
struct mlxsw_sp_port_sample *sample;
struct list_head vlans_list;
+ struct mlxsw_sp_port_vlan *default_vlan;
struct mlxsw_sp_qdisc *root_qdisc;
struct mlxsw_sp_qdisc *tclass_qdiscs;
unsigned acl_rule_count;
@@ -261,6 +269,26 @@ static inline bool mlxsw_sp_bridge_has_vxlan(struct net_device *br_dev)
return !!mlxsw_sp_bridge_vxlan_dev_find(br_dev);
}
+static inline int
+mlxsw_sp_vxlan_mapped_vid(const struct net_device *vxlan_dev, u16 *p_vid)
+{
+ struct bridge_vlan_info vinfo;
+ u16 vid = 0;
+ int err;
+
+ err = br_vlan_get_pvid(vxlan_dev, &vid);
+ if (err || !vid)
+ goto out;
+
+ err = br_vlan_get_info(vxlan_dev, vid, &vinfo);
+ if (err || !(vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED))
+ vid = 0;
+
+out:
+ *p_vid = vid;
+ return err;
+}
+
static inline bool
mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
{
@@ -358,11 +386,15 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev);
int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev,
- const struct net_device *vxlan_dev,
+ const struct net_device *vxlan_dev, u16 vid,
struct netlink_ext_ack *extack);
void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev,
const struct net_device *vxlan_dev);
+struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ u16 vid,
+ struct netlink_ext_ack *extack);
+extern struct notifier_block mlxsw_sp_switchdev_notifier;
/* spectrum.c */
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -384,8 +416,8 @@ int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
bool learn_enable);
int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
struct mlxsw_sp_port_vlan *
-mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
-void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
+mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
u16 vid_end, bool is_member, bool untagged);
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
@@ -429,15 +461,12 @@ union mlxsw_sp_l3addr {
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
+int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
+ unsigned long event, void *ptr);
void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
const struct net_device *macvlan_dev);
-int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
- unsigned long event, void *ptr);
int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
unsigned long event, void *ptr);
-int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
- unsigned long event, void *ptr);
int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
unsigned long event, void *ptr);
int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
@@ -457,7 +486,6 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
struct netdev_notifier_info *info);
void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
-void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev);
struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
@@ -538,6 +566,7 @@ struct mlxsw_sp_acl_rule_info {
unsigned int priority;
struct mlxsw_afk_element_values values;
struct mlxsw_afa_block *act_block;
+ u8 action_created:1;
unsigned int counter_index;
};
@@ -547,6 +576,7 @@ struct mlxsw_sp_acl_ruleset;
/* spectrum_acl.c */
enum mlxsw_sp_acl_profile {
MLXSW_SP_ACL_PROFILE_FLOWER,
+ MLXSW_SP_ACL_PROFILE_MR,
};
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
@@ -581,7 +611,8 @@ void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset);
struct mlxsw_sp_acl_rule_info *
-mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl);
+mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
+ struct mlxsw_afa_block *afa_block);
void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei);
void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
@@ -625,6 +656,7 @@ struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
unsigned long cookie,
+ struct mlxsw_afa_block *afa_block,
struct netlink_ext_ack *extack);
void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule);
@@ -632,6 +664,9 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule);
void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule);
+int mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule,
+ struct mlxsw_afa_block *afa_block);
struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
@@ -676,6 +711,10 @@ struct mlxsw_sp_acl_tcam_ops {
void (*entry_del)(struct mlxsw_sp *mlxsw_sp,
void *region_priv, void *chunk_priv,
void *entry_priv);
+ int (*entry_action_replace)(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei);
int (*entry_activity_get)(struct mlxsw_sp *mlxsw_sp,
void *region_priv, void *entry_priv,
bool *activity);
@@ -721,6 +760,12 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_prio_qopt_offload *p);
/* spectrum_fid.c */
+bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid);
+struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
+ u16 fid_index);
+int mlxsw_sp_fid_nve_ifindex(const struct mlxsw_sp_fid *fid, int *nve_ifindex);
+int mlxsw_sp_fid_nve_type(const struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_nve_type *p_type);
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_vni(struct mlxsw_sp *mlxsw_sp,
__be32 vni);
int mlxsw_sp_fid_vni(const struct mlxsw_sp_fid *fid, __be32 *vni);
@@ -728,9 +773,12 @@ int mlxsw_sp_fid_nve_flood_index_set(struct mlxsw_sp_fid *fid,
u32 nve_flood_index);
void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid);
bool mlxsw_sp_fid_nve_flood_index_is_set(const struct mlxsw_sp_fid *fid);
-int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni);
+int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, enum mlxsw_sp_nve_type type,
+ __be32 vni, int nve_ifindex);
void mlxsw_sp_fid_vni_clear(struct mlxsw_sp_fid *fid);
bool mlxsw_sp_fid_vni_is_set(const struct mlxsw_sp_fid *fid);
+void mlxsw_sp_fid_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
+ const struct net_device *nve_dev);
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
enum mlxsw_sp_flood_type packet_type, u8 local_port,
bool member);
@@ -738,10 +786,10 @@ int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
void mlxsw_sp_fid_port_vid_unmap(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
-enum mlxsw_sp_rif_type mlxsw_sp_fid_rif_type(const struct mlxsw_sp_fid *fid);
u16 mlxsw_sp_fid_index(const struct mlxsw_sp_fid *fid);
enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid);
void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif);
+struct mlxsw_sp_rif *mlxsw_sp_fid_rif(const struct mlxsw_sp_fid *fid);
enum mlxsw_sp_rif_type
mlxsw_sp_fid_type_rif_type(const struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_fid_type type);
@@ -749,6 +797,8 @@ u16 mlxsw_sp_fid_8021q_vid(const struct mlxsw_sp_fid *fid);
struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_get(struct mlxsw_sp *mlxsw_sp, u16 vid);
struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_get(struct mlxsw_sp *mlxsw_sp,
int br_ifindex);
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_lookup(struct mlxsw_sp *mlxsw_sp,
+ u16 vid);
struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_lookup(struct mlxsw_sp *mlxsw_sp,
int br_ifindex);
struct mlxsw_sp_fid *mlxsw_sp_fid_rfid_get(struct mlxsw_sp *mlxsw_sp,
@@ -797,10 +847,6 @@ extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops;
extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops;
/* spectrum_nve.c */
-enum mlxsw_sp_nve_type {
- MLXSW_SP_NVE_TYPE_VXLAN,
-};
-
struct mlxsw_sp_nve_params {
enum mlxsw_sp_nve_type type;
__be32 vni;
@@ -810,6 +856,9 @@ struct mlxsw_sp_nve_params {
extern const struct mlxsw_sp_nve_ops *mlxsw_sp1_nve_ops_arr[];
extern const struct mlxsw_sp_nve_ops *mlxsw_sp2_nve_ops_arr[];
+int mlxsw_sp_nve_learned_ip_resolve(struct mlxsw_sp *mlxsw_sp, u32 uip,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr);
int mlxsw_sp_nve_flood_ip_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *fid,
enum mlxsw_sp_l3proto proto,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
index 2a9eac90002e..fe270c1a26a6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
@@ -67,7 +67,7 @@ mlxsw_sp1_acl_ctcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_ctcam_chunk_init(&region->cregion,
&region->catchall.cchunk,
MLXSW_SP_ACL_TCAM_CATCHALL_PRIO);
- rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
+ rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl, NULL);
if (IS_ERR(rulei)) {
err = PTR_ERR(rulei);
goto err_rulei_create;
@@ -193,6 +193,15 @@ static void mlxsw_sp1_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
}
static int
+mlxsw_sp1_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
mlxsw_sp1_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *_region,
unsigned int offset,
@@ -240,5 +249,6 @@ const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops = {
.entry_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_entry),
.entry_add = mlxsw_sp1_acl_tcam_entry_add,
.entry_del = mlxsw_sp1_acl_tcam_entry_del,
+ .entry_action_replace = mlxsw_sp1_acl_tcam_entry_action_replace,
.entry_activity_get = mlxsw_sp1_acl_tcam_entry_activity_get,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
index 8ca77f3e8f27..234ab51916db 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
@@ -34,15 +34,15 @@ mlxsw_sp2_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregio
{
struct mlxsw_sp_acl_atcam_region *aregion;
struct mlxsw_sp_acl_atcam_entry *aentry;
- struct mlxsw_sp_acl_erp *erp;
+ struct mlxsw_sp_acl_erp_mask *erp_mask;
aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
- erp = mlxsw_sp_acl_erp_get(aregion, mask, true);
- if (IS_ERR(erp))
- return PTR_ERR(erp);
- aentry->erp = erp;
+ erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, true);
+ if (IS_ERR(erp_mask))
+ return PTR_ERR(erp_mask);
+ aentry->erp_mask = erp_mask;
return 0;
}
@@ -57,7 +57,7 @@ mlxsw_sp2_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregio
aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
- mlxsw_sp_acl_erp_put(aregion, aentry->erp);
+ mlxsw_sp_acl_erp_mask_put(aregion, aentry->erp_mask);
}
static const struct mlxsw_sp_acl_ctcam_region_ops
@@ -211,6 +211,23 @@ static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
}
static int
+mlxsw_sp2_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
+ struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
+
+ entry->act_block = rulei->act_block;
+ return mlxsw_sp_acl_atcam_entry_action_replace(mlxsw_sp,
+ &region->aregion,
+ &chunk->achunk,
+ &entry->aentry, rulei);
+}
+
+static int
mlxsw_sp2_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
void *region_priv, void *entry_priv,
bool *activity)
@@ -235,5 +252,6 @@ const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops = {
.entry_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_entry),
.entry_add = mlxsw_sp2_acl_tcam_entry_add,
.entry_del = mlxsw_sp2_acl_tcam_entry_del,
+ .entry_action_replace = mlxsw_sp2_acl_tcam_entry_action_replace,
.entry_activity_get = mlxsw_sp2_acl_tcam_entry_activity_get,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
index 4dd62478162e..e31ec75ac035 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
@@ -7,6 +7,201 @@
#include "spectrum.h"
#include "spectrum_mr.h"
+struct mlxsw_sp2_mr_tcam {
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_acl_block *acl_block;
+ struct mlxsw_sp_acl_ruleset *ruleset4;
+ struct mlxsw_sp_acl_ruleset *ruleset6;
+};
+
+struct mlxsw_sp2_mr_route {
+ struct mlxsw_sp2_mr_tcam *mr_tcam;
+};
+
+static struct mlxsw_sp_acl_ruleset *
+mlxsw_sp2_mr_tcam_proto_ruleset(struct mlxsw_sp2_mr_tcam *mr_tcam,
+ enum mlxsw_sp_l3proto proto)
+{
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ return mr_tcam->ruleset4;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ return mr_tcam->ruleset6;
+ }
+ return NULL;
+}
+
+static int mlxsw_sp2_mr_tcam_bind_group(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_reg_pemrbt_protocol protocol,
+ struct mlxsw_sp_acl_ruleset *ruleset)
+{
+ char pemrbt_pl[MLXSW_REG_PEMRBT_LEN];
+ u16 group_id;
+
+ group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
+
+ mlxsw_reg_pemrbt_pack(pemrbt_pl, protocol, group_id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pemrbt), pemrbt_pl);
+}
+
+static const enum mlxsw_afk_element mlxsw_sp2_mr_tcam_usage_ipv4[] = {
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7,
+ MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_IP_0_31,
+};
+
+static int mlxsw_sp2_mr_tcam_ipv4_init(struct mlxsw_sp2_mr_tcam *mr_tcam)
+{
+ struct mlxsw_afk_element_usage elusage;
+ int err;
+
+ /* Initialize IPv4 ACL group. */
+ mlxsw_afk_element_usage_fill(&elusage,
+ mlxsw_sp2_mr_tcam_usage_ipv4,
+ ARRAY_SIZE(mlxsw_sp2_mr_tcam_usage_ipv4));
+ mr_tcam->ruleset4 = mlxsw_sp_acl_ruleset_get(mr_tcam->mlxsw_sp,
+ mr_tcam->acl_block,
+ MLXSW_SP_L3_PROTO_IPV4,
+ MLXSW_SP_ACL_PROFILE_MR,
+ &elusage);
+
+ if (IS_ERR(mr_tcam->ruleset4))
+ return PTR_ERR(mr_tcam->ruleset4);
+
+ /* MC Router groups should be bound before routes are inserted. */
+ err = mlxsw_sp2_mr_tcam_bind_group(mr_tcam->mlxsw_sp,
+ MLXSW_REG_PEMRBT_PROTO_IPV4,
+ mr_tcam->ruleset4);
+ if (err)
+ goto err_bind_group;
+
+ return 0;
+
+err_bind_group:
+ mlxsw_sp_acl_ruleset_put(mr_tcam->mlxsw_sp, mr_tcam->ruleset4);
+ return err;
+}
+
+static void mlxsw_sp2_mr_tcam_ipv4_fini(struct mlxsw_sp2_mr_tcam *mr_tcam)
+{
+ mlxsw_sp_acl_ruleset_put(mr_tcam->mlxsw_sp, mr_tcam->ruleset4);
+}
+
+static const enum mlxsw_afk_element mlxsw_sp2_mr_tcam_usage_ipv6[] = {
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7,
+ MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+ MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+ MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+ MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_IP_96_127,
+ MLXSW_AFK_ELEMENT_DST_IP_64_95,
+ MLXSW_AFK_ELEMENT_DST_IP_32_63,
+ MLXSW_AFK_ELEMENT_DST_IP_0_31,
+};
+
+static int mlxsw_sp2_mr_tcam_ipv6_init(struct mlxsw_sp2_mr_tcam *mr_tcam)
+{
+ struct mlxsw_afk_element_usage elusage;
+ int err;
+
+ /* Initialize IPv6 ACL group */
+ mlxsw_afk_element_usage_fill(&elusage,
+ mlxsw_sp2_mr_tcam_usage_ipv6,
+ ARRAY_SIZE(mlxsw_sp2_mr_tcam_usage_ipv6));
+ mr_tcam->ruleset6 = mlxsw_sp_acl_ruleset_get(mr_tcam->mlxsw_sp,
+ mr_tcam->acl_block,
+ MLXSW_SP_L3_PROTO_IPV6,
+ MLXSW_SP_ACL_PROFILE_MR,
+ &elusage);
+
+ if (IS_ERR(mr_tcam->ruleset6))
+ return PTR_ERR(mr_tcam->ruleset6);
+
+ /* MC Router groups should be bound before routes are inserted. */
+ err = mlxsw_sp2_mr_tcam_bind_group(mr_tcam->mlxsw_sp,
+ MLXSW_REG_PEMRBT_PROTO_IPV6,
+ mr_tcam->ruleset6);
+ if (err)
+ goto err_bind_group;
+
+ return 0;
+
+err_bind_group:
+ mlxsw_sp_acl_ruleset_put(mr_tcam->mlxsw_sp, mr_tcam->ruleset6);
+ return err;
+}
+
+static void mlxsw_sp2_mr_tcam_ipv6_fini(struct mlxsw_sp2_mr_tcam *mr_tcam)
+{
+ mlxsw_sp_acl_ruleset_put(mr_tcam->mlxsw_sp, mr_tcam->ruleset6);
+}
+
+static void
+mlxsw_sp2_mr_tcam_rule_parse4(struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_mr_route_key *key)
+{
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ (char *) &key->source.addr4,
+ (char *) &key->source_mask.addr4, 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
+ (char *) &key->group.addr4,
+ (char *) &key->group_mask.addr4, 4);
+}
+
+static void
+mlxsw_sp2_mr_tcam_rule_parse6(struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_mr_route_key *key)
+{
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+ &key->source.addr6.s6_addr[0x0],
+ &key->source_mask.addr6.s6_addr[0x0], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+ &key->source.addr6.s6_addr[0x4],
+ &key->source_mask.addr6.s6_addr[0x4], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+ &key->source.addr6.s6_addr[0x8],
+ &key->source_mask.addr6.s6_addr[0x8], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ &key->source.addr6.s6_addr[0xc],
+ &key->source_mask.addr6.s6_addr[0xc], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
+ &key->group.addr6.s6_addr[0x0],
+ &key->group_mask.addr6.s6_addr[0x0], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
+ &key->group.addr6.s6_addr[0x4],
+ &key->group_mask.addr6.s6_addr[0x4], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
+ &key->group.addr6.s6_addr[0x8],
+ &key->group_mask.addr6.s6_addr[0x8], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
+ &key->group.addr6.s6_addr[0xc],
+ &key->group_mask.addr6.s6_addr[0xc], 4);
+}
+
+static void
+mlxsw_sp2_mr_tcam_rule_parse(struct mlxsw_sp_acl_rule *rule,
+ struct mlxsw_sp_mr_route_key *key,
+ unsigned int priority)
+{
+ struct mlxsw_sp_acl_rule_info *rulei;
+
+ rulei = mlxsw_sp_acl_rule_rulei(rule);
+ rulei->priority = priority;
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7,
+ key->vrid, GENMASK(7, 0));
+ mlxsw_sp_acl_rulei_keymask_u32(rulei,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10,
+ key->vrid >> 8, GENMASK(2, 0));
+ switch (key->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ return mlxsw_sp2_mr_tcam_rule_parse4(rulei, key);
+ case MLXSW_SP_L3_PROTO_IPV6:
+ return mlxsw_sp2_mr_tcam_rule_parse6(rulei, key);
+ }
+}
+
static int
mlxsw_sp2_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
void *route_priv,
@@ -14,7 +209,33 @@ mlxsw_sp2_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
struct mlxsw_afa_block *afa_block,
enum mlxsw_sp_mr_route_prio prio)
{
+ struct mlxsw_sp2_mr_route *mr_route = route_priv;
+ struct mlxsw_sp2_mr_tcam *mr_tcam = priv;
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ struct mlxsw_sp_acl_rule *rule;
+ int err;
+
+ mr_route->mr_tcam = mr_tcam;
+ ruleset = mlxsw_sp2_mr_tcam_proto_ruleset(mr_tcam, key->proto);
+ if (WARN_ON(!ruleset))
+ return -EINVAL;
+
+ rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset,
+ (unsigned long) route_priv, afa_block,
+ NULL);
+ if (IS_ERR(rule))
+ return PTR_ERR(rule);
+
+ mlxsw_sp2_mr_tcam_rule_parse(rule, key, prio);
+ err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
+ if (err)
+ goto err_rule_add;
+
return 0;
+
+err_rule_add:
+ mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
+ return err;
}
static void
@@ -22,6 +243,21 @@ mlxsw_sp2_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv,
void *route_priv,
struct mlxsw_sp_mr_route_key *key)
{
+ struct mlxsw_sp2_mr_tcam *mr_tcam = priv;
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ struct mlxsw_sp_acl_rule *rule;
+
+ ruleset = mlxsw_sp2_mr_tcam_proto_ruleset(mr_tcam, key->proto);
+ if (WARN_ON(!ruleset))
+ return;
+
+ rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset,
+ (unsigned long) route_priv);
+ if (WARN_ON(!rule))
+ return;
+
+ mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
+ mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
}
static int
@@ -30,21 +266,64 @@ mlxsw_sp2_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_route_key *key,
struct mlxsw_afa_block *afa_block)
{
- return 0;
+ struct mlxsw_sp2_mr_route *mr_route = route_priv;
+ struct mlxsw_sp2_mr_tcam *mr_tcam = mr_route->mr_tcam;
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ struct mlxsw_sp_acl_rule *rule;
+
+ ruleset = mlxsw_sp2_mr_tcam_proto_ruleset(mr_tcam, key->proto);
+ if (WARN_ON(!ruleset))
+ return -EINVAL;
+
+ rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset,
+ (unsigned long) route_priv);
+ if (WARN_ON(!rule))
+ return -EINVAL;
+
+ return mlxsw_sp_acl_rule_action_replace(mlxsw_sp, rule, afa_block);
}
static int mlxsw_sp2_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
{
+ struct mlxsw_sp2_mr_tcam *mr_tcam = priv;
+ int err;
+
+ mr_tcam->mlxsw_sp = mlxsw_sp;
+ mr_tcam->acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, NULL);
+ if (!mr_tcam->acl_block)
+ return -ENOMEM;
+
+ err = mlxsw_sp2_mr_tcam_ipv4_init(mr_tcam);
+ if (err)
+ goto err_ipv4_init;
+
+ err = mlxsw_sp2_mr_tcam_ipv6_init(mr_tcam);
+ if (err)
+ goto err_ipv6_init;
+
return 0;
+
+err_ipv6_init:
+ mlxsw_sp2_mr_tcam_ipv4_fini(mr_tcam);
+err_ipv4_init:
+ mlxsw_sp_acl_block_destroy(mr_tcam->acl_block);
+ return err;
}
static void mlxsw_sp2_mr_tcam_fini(void *priv)
{
+ struct mlxsw_sp2_mr_tcam *mr_tcam = priv;
+
+ mlxsw_sp2_mr_tcam_ipv6_fini(mr_tcam);
+ mlxsw_sp2_mr_tcam_ipv4_fini(mr_tcam);
+ mlxsw_sp_acl_block_destroy(mr_tcam->acl_block);
}
const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops = {
+ .priv_size = sizeof(struct mlxsw_sp2_mr_tcam),
.init = mlxsw_sp2_mr_tcam_init,
.fini = mlxsw_sp2_mr_tcam_fini,
+ .route_priv_size = sizeof(struct mlxsw_sp2_mr_route),
.route_create = mlxsw_sp2_mr_tcam_route_create,
.route_destroy = mlxsw_sp2_mr_tcam_route_destroy,
.route_update = mlxsw_sp2_mr_tcam_route_update,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index c4f9238591e6..695d33358988 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -435,7 +435,8 @@ u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
}
struct mlxsw_sp_acl_rule_info *
-mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
+mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
+ struct mlxsw_afa_block *afa_block)
{
struct mlxsw_sp_acl_rule_info *rulei;
int err;
@@ -443,11 +444,18 @@ mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
if (!rulei)
return NULL;
+
+ if (afa_block) {
+ rulei->act_block = afa_block;
+ return rulei;
+ }
+
rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa);
if (IS_ERR(rulei->act_block)) {
err = PTR_ERR(rulei->act_block);
goto err_afa_block_create;
}
+ rulei->action_created = 1;
return rulei;
err_afa_block_create:
@@ -457,7 +465,8 @@ err_afa_block_create:
void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
{
- mlxsw_afa_block_destroy(rulei->act_block);
+ if (rulei->action_created)
+ mlxsw_afa_block_destroy(rulei->act_block);
kfree(rulei);
}
@@ -623,6 +632,7 @@ struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
unsigned long cookie,
+ struct mlxsw_afa_block *afa_block,
struct netlink_ext_ack *extack)
{
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
@@ -639,7 +649,7 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
rule->cookie = cookie;
rule->ruleset = ruleset;
- rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
+ rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl, afa_block);
if (IS_ERR(rule->rulei)) {
err = PTR_ERR(rule->rulei);
goto err_rulei_create;
@@ -721,6 +731,21 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
ops->rule_del(mlxsw_sp, rule->priv);
}
+int mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule,
+ struct mlxsw_afa_block *afa_block)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ struct mlxsw_sp_acl_rule_info *rulei;
+
+ rulei = mlxsw_sp_acl_rule_rulei(rule);
+ rulei->act_block = afa_block;
+
+ return ops->rule_action_replace(mlxsw_sp, ruleset->priv, rule->priv,
+ rule->rulei);
+}
+
struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
index 2dda028f94db..80fb268d51a5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
@@ -14,8 +14,8 @@
#include "spectrum_acl_tcam.h"
#include "core_acl_flex_keys.h"
-#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START 6
-#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END 11
+#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_START 0
+#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_END 5
struct mlxsw_sp_acl_atcam_lkey_id_ht_key {
char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* MSB blocks */
@@ -34,7 +34,7 @@ struct mlxsw_sp_acl_atcam_region_ops {
void (*fini)(struct mlxsw_sp_acl_atcam_region *aregion);
struct mlxsw_sp_acl_atcam_lkey_id *
(*lkey_id_get)(struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_rule_info *rulei, u8 erp_id);
+ char *enc_key, u8 erp_id);
void (*lkey_id_put)(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id);
};
@@ -64,7 +64,7 @@ static const struct rhashtable_params mlxsw_sp_acl_atcam_entries_ht_params = {
static bool
mlxsw_sp_acl_atcam_is_centry(const struct mlxsw_sp_acl_atcam_entry *aentry)
{
- return mlxsw_sp_acl_erp_is_ctcam_erp(aentry->erp);
+ return mlxsw_sp_acl_erp_mask_is_ctcam(aentry->erp_mask);
}
static int
@@ -90,8 +90,7 @@ mlxsw_sp_acl_atcam_region_generic_fini(struct mlxsw_sp_acl_atcam_region *aregion
static struct mlxsw_sp_acl_atcam_lkey_id *
mlxsw_sp_acl_atcam_generic_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_rule_info *rulei,
- u8 erp_id)
+ char *enc_key, u8 erp_id)
{
struct mlxsw_sp_acl_atcam_region_generic *region_generic;
@@ -220,8 +219,7 @@ mlxsw_sp_acl_atcam_lkey_id_destroy(struct mlxsw_sp_acl_atcam_region *aregion,
static struct mlxsw_sp_acl_atcam_lkey_id *
mlxsw_sp_acl_atcam_12kb_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_rule_info *rulei,
- u8 erp_id)
+ char *enc_key, u8 erp_id)
{
struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
@@ -230,9 +228,10 @@ mlxsw_sp_acl_atcam_12kb_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
- mlxsw_afk_encode(afk, region->key_info, &rulei->values, ht_key.enc_key,
- NULL, MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START,
- MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END);
+ memcpy(ht_key.enc_key, enc_key, sizeof(ht_key.enc_key));
+ mlxsw_afk_clear(afk, ht_key.enc_key,
+ MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_START,
+ MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_END);
ht_key.erp_id = erp_id;
lkey_id = rhashtable_lookup_fast(&region_12kb->lkey_ht, &ht_key,
mlxsw_sp_acl_atcam_lkey_id_ht_params);
@@ -324,6 +323,7 @@ mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp,
aregion->region = region;
aregion->atcam = atcam;
mlxsw_sp_acl_atcam_region_type_init(aregion);
+ INIT_LIST_HEAD(&aregion->entries_list);
err = rhashtable_init(&aregion->entries_ht,
&mlxsw_sp_acl_atcam_entries_ht_params);
@@ -357,6 +357,7 @@ void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion)
mlxsw_sp_acl_erp_region_fini(aregion);
aregion->ops->fini(aregion);
rhashtable_destroy(&aregion->entries_ht);
+ WARN_ON(!list_empty(&aregion->entries_list));
}
void mlxsw_sp_acl_atcam_chunk_init(struct mlxsw_sp_acl_atcam_region *aregion,
@@ -379,7 +380,7 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei)
{
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
- u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp);
+ u8 erp_id = mlxsw_sp_acl_erp_mask_erp_id(aentry->erp_mask);
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
char ptce3_pl[MLXSW_REG_PTCE3_LEN];
u32 kvdl_index, priority;
@@ -389,7 +390,8 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- lkey_id = aregion->ops->lkey_id_get(aregion, rulei, erp_id);
+ lkey_id = aregion->ops->lkey_id_get(aregion, aentry->ht_key.enc_key,
+ erp_id);
if (IS_ERR(lkey_id))
return PTR_ERR(lkey_id);
aentry->lkey_id = lkey_id;
@@ -398,6 +400,9 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE,
priority, region->tcam_region_info,
aentry->ht_key.enc_key, erp_id,
+ aentry->delta_info.start,
+ aentry->delta_info.mask,
+ aentry->delta_info.value,
refcount_read(&lkey_id->refcnt) != 1, lkey_id->id,
kvdl_index);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
@@ -418,18 +423,51 @@ mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id = aentry->lkey_id;
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
- u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp);
+ u8 erp_id = mlxsw_sp_acl_erp_mask_erp_id(aentry->erp_mask);
+ char *enc_key = aentry->ht_key.enc_key;
char ptce3_pl[MLXSW_REG_PTCE3_LEN];
mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0,
- region->tcam_region_info, aentry->ht_key.enc_key,
- erp_id, refcount_read(&lkey_id->refcnt) != 1,
+ region->tcam_region_info,
+ enc_key, erp_id,
+ aentry->delta_info.start,
+ aentry->delta_info.mask,
+ aentry->delta_info.value,
+ refcount_read(&lkey_id->refcnt) != 1,
lkey_id->id, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
aregion->ops->lkey_id_put(aregion, lkey_id);
}
static int
+mlxsw_sp_acl_atcam_region_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id = aentry->lkey_id;
+ u8 erp_id = mlxsw_sp_acl_erp_mask_erp_id(aentry->erp_mask);
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ char ptce3_pl[MLXSW_REG_PTCE3_LEN];
+ u32 kvdl_index, priority;
+ int err;
+
+ err = mlxsw_sp_acl_tcam_priority_get(mlxsw_sp, rulei, &priority, true);
+ if (err)
+ return err;
+ kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block);
+ mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_UPDATE,
+ priority, region->tcam_region_info,
+ aentry->ht_key.enc_key, erp_id,
+ aentry->delta_info.start,
+ aentry->delta_info.mask,
+ aentry->delta_info.value,
+ refcount_read(&lkey_id->refcnt) != 1, lkey_id->id,
+ kvdl_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
+}
+
+static int
__mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_atcam_entry *aentry,
@@ -438,19 +476,36 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN] = { 0 };
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
- struct mlxsw_sp_acl_erp *erp;
- unsigned int blocks_count;
+ const struct mlxsw_sp_acl_erp_delta *delta;
+ struct mlxsw_sp_acl_erp_mask *erp_mask;
int err;
- blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
mlxsw_afk_encode(afk, region->key_info, &rulei->values,
- aentry->ht_key.enc_key, mask, 0, blocks_count - 1);
-
- erp = mlxsw_sp_acl_erp_get(aregion, mask, false);
- if (IS_ERR(erp))
- return PTR_ERR(erp);
- aentry->erp = erp;
- aentry->ht_key.erp_id = mlxsw_sp_acl_erp_id(erp);
+ aentry->full_enc_key, mask);
+
+ erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, false);
+ if (IS_ERR(erp_mask))
+ return PTR_ERR(erp_mask);
+ aentry->erp_mask = erp_mask;
+ aentry->ht_key.erp_id = mlxsw_sp_acl_erp_mask_erp_id(erp_mask);
+ memcpy(aentry->ht_key.enc_key, aentry->full_enc_key,
+ sizeof(aentry->ht_key.enc_key));
+
+ /* Compute all needed delta information and clear the delta bits
+ * from the encrypted key.
+ */
+ delta = mlxsw_sp_acl_erp_delta(aentry->erp_mask);
+ aentry->delta_info.start = mlxsw_sp_acl_erp_delta_start(delta);
+ aentry->delta_info.mask = mlxsw_sp_acl_erp_delta_mask(delta);
+ aentry->delta_info.value =
+ mlxsw_sp_acl_erp_delta_value(delta, aentry->full_enc_key);
+ mlxsw_sp_acl_erp_delta_clear(delta, aentry->ht_key.enc_key);
+
+ /* Add rule to the list of A-TCAM rules, assuming this
+ * rule is intended to A-TCAM. In case this rule does
+ * not fit into A-TCAM it will be removed from the list.
+ */
+ list_add(&aentry->list, &aregion->entries_list);
/* We can't insert identical rules into the A-TCAM, so fail and
* let the rule spill into C-TCAM
@@ -461,6 +516,13 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_rhashtable_insert;
+ /* Bloom filter must be updated here, before inserting the rule into
+ * the A-TCAM.
+ */
+ err = mlxsw_sp_acl_erp_bf_insert(mlxsw_sp, aregion, erp_mask, aentry);
+ if (err)
+ goto err_bf_insert;
+
err = mlxsw_sp_acl_atcam_region_entry_insert(mlxsw_sp, aregion, aentry,
rulei);
if (err)
@@ -469,10 +531,13 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
return 0;
err_rule_insert:
+ mlxsw_sp_acl_erp_bf_remove(mlxsw_sp, aregion, erp_mask, aentry);
+err_bf_insert:
rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
mlxsw_sp_acl_atcam_entries_ht_params);
err_rhashtable_insert:
- mlxsw_sp_acl_erp_put(aregion, erp);
+ list_del(&aentry->list);
+ mlxsw_sp_acl_erp_mask_put(aregion, erp_mask);
return err;
}
@@ -482,9 +547,21 @@ __mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam_entry *aentry)
{
mlxsw_sp_acl_atcam_region_entry_remove(mlxsw_sp, aregion, aentry);
+ mlxsw_sp_acl_erp_bf_remove(mlxsw_sp, aregion, aentry->erp_mask, aentry);
rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
mlxsw_sp_acl_atcam_entries_ht_params);
- mlxsw_sp_acl_erp_put(aregion, aentry->erp);
+ list_del(&aentry->list);
+ mlxsw_sp_acl_erp_mask_put(aregion, aentry->erp_mask);
+}
+
+static int
+__mlxsw_sp_acl_atcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return mlxsw_sp_acl_atcam_region_entry_action_replace(mlxsw_sp, aregion,
+ aentry, rulei);
}
int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
@@ -523,6 +600,29 @@ void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
__mlxsw_sp_acl_atcam_entry_del(mlxsw_sp, aregion, aentry);
}
+int
+mlxsw_sp_acl_atcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ int err;
+
+ if (mlxsw_sp_acl_atcam_is_centry(aentry))
+ err = mlxsw_sp_acl_ctcam_entry_action_replace(mlxsw_sp,
+ &aregion->cregion,
+ &achunk->cchunk,
+ &aentry->centry,
+ rulei);
+ else
+ err = __mlxsw_sp_acl_atcam_entry_action_replace(mlxsw_sp,
+ aregion, aentry,
+ rulei);
+
+ return err;
+}
+
int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
new file mode 100644
index 000000000000..505b87846acc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/refcount.h>
+
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+
+struct mlxsw_sp_acl_bf {
+ unsigned int bank_size;
+ refcount_t refcnt[0];
+};
+
+/* Bloom filter uses a crc-16 hash over chunks of data which contain 4 key
+ * blocks, eRP ID and region ID. In Spectrum-2, region key is combined of up to
+ * 12 key blocks, so there can be up to 3 chunks in the Bloom filter key,
+ * depending on the actual number of key blocks used in the region.
+ * The layout of the Bloom filter key is as follows:
+ *
+ * +-------------------------+------------------------+------------------------+
+ * | Chunk 2 Key blocks 11-8 | Chunk 1 Key blocks 7-4 | Chunk 0 Key blocks 3-0 |
+ * +-------------------------+------------------------+------------------------+
+ */
+#define MLXSW_BLOOM_KEY_CHUNKS 3
+#define MLXSW_BLOOM_KEY_LEN 69
+
+/* Each chunk size is 23 bytes. 18 bytes of it contain 4 key blocks, each is
+ * 36 bits, 2 bytes which hold eRP ID and region ID, and 3 bytes of zero
+ * padding.
+ * The layout of each chunk is as follows:
+ *
+ * +---------+----------------------+-----------------------------------+
+ * | 3 bytes | 2 bytes | 18 bytes |
+ * +---------+-----------+----------+-----------------------------------+
+ * | 183:158 | 157:148 | 147:144 | 143:0 |
+ * +---------+-----------+----------+-----------------------------------+
+ * | 0 | region ID | eRP ID | 4 Key blocks (18 Bytes) |
+ * +---------+-----------+----------+-----------------------------------+
+ */
+#define MLXSW_BLOOM_CHUNK_PAD_BYTES 3
+#define MLXSW_BLOOM_CHUNK_KEY_BYTES 18
+#define MLXSW_BLOOM_KEY_CHUNK_BYTES 23
+
+/* The offset of the key block within a chunk is 5 bytes as it comes after
+ * 3 bytes of zero padding and 16 bits of region ID and eRP ID.
+ */
+#define MLXSW_BLOOM_CHUNK_KEY_OFFSET 5
+
+/* Each chunk contains 4 key blocks. Chunk 2 uses key blocks 11-8,
+ * and we need to populate it with 4 key blocks copied from the entry encoded
+ * key. Since the encoded key contains a padding, key block 11 starts at offset
+ * 2. block 7 that is used in chunk 1 starts at offset 20 as 4 key blocks take
+ * 18 bytes.
+ * This array defines key offsets for easy access when copying key blocks from
+ * entry key to Bloom filter chunk.
+ */
+static const u8 chunk_key_offsets[MLXSW_BLOOM_KEY_CHUNKS] = {2, 20, 38};
+
+/* This table is just the CRC of each possible byte. It is
+ * computed, Msbit first, for the Bloom filter polynomial
+ * which is 0x8529 (1 + x^3 + x^5 + x^8 + x^10 + x^15 and
+ * the implicit x^16).
+ */
+static const u16 mlxsw_sp_acl_bf_crc_tab[256] = {
+0x0000, 0x8529, 0x8f7b, 0x0a52, 0x9bdf, 0x1ef6, 0x14a4, 0x918d,
+0xb297, 0x37be, 0x3dec, 0xb8c5, 0x2948, 0xac61, 0xa633, 0x231a,
+0xe007, 0x652e, 0x6f7c, 0xea55, 0x7bd8, 0xfef1, 0xf4a3, 0x718a,
+0x5290, 0xd7b9, 0xddeb, 0x58c2, 0xc94f, 0x4c66, 0x4634, 0xc31d,
+0x4527, 0xc00e, 0xca5c, 0x4f75, 0xdef8, 0x5bd1, 0x5183, 0xd4aa,
+0xf7b0, 0x7299, 0x78cb, 0xfde2, 0x6c6f, 0xe946, 0xe314, 0x663d,
+0xa520, 0x2009, 0x2a5b, 0xaf72, 0x3eff, 0xbbd6, 0xb184, 0x34ad,
+0x17b7, 0x929e, 0x98cc, 0x1de5, 0x8c68, 0x0941, 0x0313, 0x863a,
+0x8a4e, 0x0f67, 0x0535, 0x801c, 0x1191, 0x94b8, 0x9eea, 0x1bc3,
+0x38d9, 0xbdf0, 0xb7a2, 0x328b, 0xa306, 0x262f, 0x2c7d, 0xa954,
+0x6a49, 0xef60, 0xe532, 0x601b, 0xf196, 0x74bf, 0x7eed, 0xfbc4,
+0xd8de, 0x5df7, 0x57a5, 0xd28c, 0x4301, 0xc628, 0xcc7a, 0x4953,
+0xcf69, 0x4a40, 0x4012, 0xc53b, 0x54b6, 0xd19f, 0xdbcd, 0x5ee4,
+0x7dfe, 0xf8d7, 0xf285, 0x77ac, 0xe621, 0x6308, 0x695a, 0xec73,
+0x2f6e, 0xaa47, 0xa015, 0x253c, 0xb4b1, 0x3198, 0x3bca, 0xbee3,
+0x9df9, 0x18d0, 0x1282, 0x97ab, 0x0626, 0x830f, 0x895d, 0x0c74,
+0x91b5, 0x149c, 0x1ece, 0x9be7, 0x0a6a, 0x8f43, 0x8511, 0x0038,
+0x2322, 0xa60b, 0xac59, 0x2970, 0xb8fd, 0x3dd4, 0x3786, 0xb2af,
+0x71b2, 0xf49b, 0xfec9, 0x7be0, 0xea6d, 0x6f44, 0x6516, 0xe03f,
+0xc325, 0x460c, 0x4c5e, 0xc977, 0x58fa, 0xddd3, 0xd781, 0x52a8,
+0xd492, 0x51bb, 0x5be9, 0xdec0, 0x4f4d, 0xca64, 0xc036, 0x451f,
+0x6605, 0xe32c, 0xe97e, 0x6c57, 0xfdda, 0x78f3, 0x72a1, 0xf788,
+0x3495, 0xb1bc, 0xbbee, 0x3ec7, 0xaf4a, 0x2a63, 0x2031, 0xa518,
+0x8602, 0x032b, 0x0979, 0x8c50, 0x1ddd, 0x98f4, 0x92a6, 0x178f,
+0x1bfb, 0x9ed2, 0x9480, 0x11a9, 0x8024, 0x050d, 0x0f5f, 0x8a76,
+0xa96c, 0x2c45, 0x2617, 0xa33e, 0x32b3, 0xb79a, 0xbdc8, 0x38e1,
+0xfbfc, 0x7ed5, 0x7487, 0xf1ae, 0x6023, 0xe50a, 0xef58, 0x6a71,
+0x496b, 0xcc42, 0xc610, 0x4339, 0xd2b4, 0x579d, 0x5dcf, 0xd8e6,
+0x5edc, 0xdbf5, 0xd1a7, 0x548e, 0xc503, 0x402a, 0x4a78, 0xcf51,
+0xec4b, 0x6962, 0x6330, 0xe619, 0x7794, 0xf2bd, 0xf8ef, 0x7dc6,
+0xbedb, 0x3bf2, 0x31a0, 0xb489, 0x2504, 0xa02d, 0xaa7f, 0x2f56,
+0x0c4c, 0x8965, 0x8337, 0x061e, 0x9793, 0x12ba, 0x18e8, 0x9dc1,
+};
+
+static u16 mlxsw_sp_acl_bf_crc_byte(u16 crc, u8 c)
+{
+ return (crc << 8) ^ mlxsw_sp_acl_bf_crc_tab[(crc >> 8) ^ c];
+}
+
+static u16 mlxsw_sp_acl_bf_crc(const u8 *buffer, size_t len)
+{
+ u16 crc = 0;
+
+ while (len--)
+ crc = mlxsw_sp_acl_bf_crc_byte(crc, *buffer++);
+ return crc;
+}
+
+static void
+mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ char *output, u8 *len)
+{
+ struct mlxsw_afk_key_info *key_info = aregion->region->key_info;
+ u8 chunk_index, chunk_count, block_count;
+ char *chunk = output;
+ __be16 erp_region_id;
+
+ block_count = mlxsw_afk_key_info_blocks_count_get(key_info);
+ chunk_count = 1 + ((block_count - 1) >> 2);
+ erp_region_id = cpu_to_be16(aentry->ht_key.erp_id |
+ (aregion->region->id << 4));
+ for (chunk_index = MLXSW_BLOOM_KEY_CHUNKS - chunk_count;
+ chunk_index < MLXSW_BLOOM_KEY_CHUNKS; chunk_index++) {
+ memset(chunk, 0, MLXSW_BLOOM_CHUNK_PAD_BYTES);
+ memcpy(chunk + MLXSW_BLOOM_CHUNK_PAD_BYTES, &erp_region_id,
+ sizeof(erp_region_id));
+ memcpy(chunk + MLXSW_BLOOM_CHUNK_KEY_OFFSET,
+ &aentry->ht_key.enc_key[chunk_key_offsets[chunk_index]],
+ MLXSW_BLOOM_CHUNK_KEY_BYTES);
+ chunk += MLXSW_BLOOM_KEY_CHUNK_BYTES;
+ }
+ *len = chunk_count * MLXSW_BLOOM_KEY_CHUNK_BYTES;
+}
+
+static unsigned int
+mlxsw_sp_acl_bf_rule_count_index_get(struct mlxsw_sp_acl_bf *bf,
+ unsigned int erp_bank,
+ unsigned int bf_index)
+{
+ return erp_bank * bf->bank_size + bf_index;
+}
+
+static unsigned int
+mlxsw_sp_acl_bf_index_get(struct mlxsw_sp_acl_bf *bf,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ char bf_key[MLXSW_BLOOM_KEY_LEN];
+ u8 bf_size;
+
+ mlxsw_sp_acl_bf_key_encode(aregion, aentry, bf_key, &bf_size);
+ return mlxsw_sp_acl_bf_crc(bf_key, bf_size);
+}
+
+int
+mlxsw_sp_acl_bf_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_bf *bf,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ unsigned int erp_bank,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ unsigned int rule_index;
+ char *peabfe_pl;
+ u16 bf_index;
+ int err;
+
+ bf_index = mlxsw_sp_acl_bf_index_get(bf, aregion, aentry);
+ rule_index = mlxsw_sp_acl_bf_rule_count_index_get(bf, erp_bank,
+ bf_index);
+
+ if (refcount_inc_not_zero(&bf->refcnt[rule_index]))
+ return 0;
+
+ peabfe_pl = kmalloc(MLXSW_REG_PEABFE_LEN, GFP_KERNEL);
+ if (!peabfe_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_peabfe_pack(peabfe_pl);
+ mlxsw_reg_peabfe_rec_pack(peabfe_pl, 0, 1, erp_bank, bf_index);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(peabfe), peabfe_pl);
+ kfree(peabfe_pl);
+ if (err)
+ return err;
+
+ refcount_set(&bf->refcnt[rule_index], 1);
+ return 0;
+}
+
+void
+mlxsw_sp_acl_bf_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_bf *bf,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ unsigned int erp_bank,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ unsigned int rule_index;
+ char *peabfe_pl;
+ u16 bf_index;
+
+ bf_index = mlxsw_sp_acl_bf_index_get(bf, aregion, aentry);
+ rule_index = mlxsw_sp_acl_bf_rule_count_index_get(bf, erp_bank,
+ bf_index);
+
+ if (refcount_dec_and_test(&bf->refcnt[rule_index])) {
+ peabfe_pl = kmalloc(MLXSW_REG_PEABFE_LEN, GFP_KERNEL);
+ if (!peabfe_pl)
+ return;
+
+ mlxsw_reg_peabfe_pack(peabfe_pl);
+ mlxsw_reg_peabfe_rec_pack(peabfe_pl, 0, 0, erp_bank, bf_index);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(peabfe), peabfe_pl);
+ kfree(peabfe_pl);
+ }
+}
+
+struct mlxsw_sp_acl_bf *
+mlxsw_sp_acl_bf_init(struct mlxsw_sp *mlxsw_sp, unsigned int num_erp_banks)
+{
+ struct mlxsw_sp_acl_bf *bf;
+ unsigned int bf_bank_size;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_BF_LOG))
+ return ERR_PTR(-EIO);
+
+ /* Bloom filter size per erp_table_bank
+ * is 2^ACL_MAX_BF_LOG
+ */
+ bf_bank_size = 1 << MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_BF_LOG);
+ bf = kzalloc(sizeof(*bf) + bf_bank_size * num_erp_banks *
+ sizeof(*bf->refcnt), GFP_KERNEL);
+ if (!bf)
+ return ERR_PTR(-ENOMEM);
+
+ bf->bank_size = bf_bank_size;
+ return bf;
+}
+
+void mlxsw_sp_acl_bf_fini(struct mlxsw_sp_acl_bf *bf)
+{
+ kfree(bf);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
index e3c6fe8b1d40..b0f2d8e8ded0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
@@ -46,7 +46,6 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region = cregion->region;
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
char ptce2_pl[MLXSW_REG_PTCE2_LEN];
- unsigned int blocks_count;
char *act_set;
u32 priority;
char *mask;
@@ -63,9 +62,7 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
centry->parman_item.index, priority);
key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
- blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
- mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask, 0,
- blocks_count - 1);
+ mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask);
err = cregion->ops->entry_insert(cregion, centry, mask);
if (err)
@@ -92,6 +89,27 @@ mlxsw_sp_acl_ctcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
cregion->ops->entry_remove(cregion, centry);
}
+static int
+mlxsw_sp_acl_ctcam_region_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ struct mlxsw_afa_block *afa_block,
+ unsigned int priority)
+{
+ char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+ char *act_set;
+
+ mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_UPDATE,
+ cregion->region->tcam_region_info,
+ centry->parman_item.index, priority);
+
+ act_set = mlxsw_afa_block_first_set(afa_block);
+ mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+}
+
+
static int mlxsw_sp_acl_ctcam_region_parman_resize(void *priv,
unsigned long new_count)
{
@@ -194,3 +212,15 @@ void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
parman_item_remove(cregion->parman, &cchunk->parman_prio,
&centry->parman_item);
}
+
+int mlxsw_sp_acl_ctcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return mlxsw_sp_acl_ctcam_region_entry_action_replace(mlxsw_sp, cregion,
+ centry,
+ rulei->act_block,
+ rulei->priority);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
index 0a4fd3c8662a..1c19feefa5f2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
@@ -7,7 +7,7 @@
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/list.h>
-#include <linux/rhashtable.h>
+#include <linux/objagg.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
@@ -24,11 +24,14 @@ struct mlxsw_sp_acl_erp_core {
unsigned int erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX + 1];
struct gen_pool *erp_tables;
struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_acl_bf *bf;
unsigned int num_erp_banks;
};
struct mlxsw_sp_acl_erp_key {
char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN];
+#define __MASK_LEN 0x38
+#define __MASK_IDX(i) (__MASK_LEN - (i) - 1)
bool ctcam;
};
@@ -36,10 +39,8 @@ struct mlxsw_sp_acl_erp {
struct mlxsw_sp_acl_erp_key key;
u8 id;
u8 index;
- refcount_t refcnt;
DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
struct list_head list;
- struct rhash_head ht_node;
struct mlxsw_sp_acl_erp_table *erp_table;
};
@@ -53,7 +54,6 @@ struct mlxsw_sp_acl_erp_table {
DECLARE_BITMAP(erp_id_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
DECLARE_BITMAP(erp_index_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
struct list_head atcam_erps_list;
- struct rhashtable erp_ht;
struct mlxsw_sp_acl_erp_core *erp_core;
struct mlxsw_sp_acl_atcam_region *aregion;
const struct mlxsw_sp_acl_erp_table_ops *ops;
@@ -61,12 +61,8 @@ struct mlxsw_sp_acl_erp_table {
unsigned int num_atcam_erps;
unsigned int num_max_atcam_erps;
unsigned int num_ctcam_erps;
-};
-
-static const struct rhashtable_params mlxsw_sp_acl_erp_ht_params = {
- .key_len = sizeof(struct mlxsw_sp_acl_erp_key),
- .key_offset = offsetof(struct mlxsw_sp_acl_erp, key),
- .head_offset = offsetof(struct mlxsw_sp_acl_erp, ht_node),
+ unsigned int num_deltas;
+ struct objagg *objagg;
};
struct mlxsw_sp_acl_erp_table_ops {
@@ -119,14 +115,17 @@ static const struct mlxsw_sp_acl_erp_table_ops erp_no_mask_ops = {
.erp_destroy = mlxsw_sp_acl_erp_no_mask_destroy,
};
-bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp)
+static bool
+mlxsw_sp_acl_erp_table_is_used(const struct mlxsw_sp_acl_erp_table *erp_table)
{
- return erp->key.ctcam;
+ return erp_table->ops != &erp_single_mask_ops &&
+ erp_table->ops != &erp_no_mask_ops;
}
-u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp)
+static unsigned int
+mlxsw_sp_acl_erp_bank_get(const struct mlxsw_sp_acl_erp *erp)
{
- return erp->id;
+ return erp->index % erp->erp_table->erp_core->num_erp_banks;
}
static unsigned int
@@ -194,12 +193,15 @@ mlxsw_sp_acl_erp_master_mask_update(struct mlxsw_sp_acl_erp_table *erp_table)
static int
mlxsw_sp_acl_erp_master_mask_set(struct mlxsw_sp_acl_erp_table *erp_table,
- const struct mlxsw_sp_acl_erp *erp)
+ struct mlxsw_sp_acl_erp_key *key)
{
+ DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
unsigned long bit;
int err;
- for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ bitmap_from_arr32(mask_bitmap, (u32 *) key->mask,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+ for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_set(bit,
&erp_table->master_mask);
@@ -210,7 +212,7 @@ mlxsw_sp_acl_erp_master_mask_set(struct mlxsw_sp_acl_erp_table *erp_table,
return 0;
err_master_mask_update:
- for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
&erp_table->master_mask);
return err;
@@ -218,12 +220,15 @@ err_master_mask_update:
static int
mlxsw_sp_acl_erp_master_mask_clear(struct mlxsw_sp_acl_erp_table *erp_table,
- const struct mlxsw_sp_acl_erp *erp)
+ struct mlxsw_sp_acl_erp_key *key)
{
+ DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
unsigned long bit;
int err;
- for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ bitmap_from_arr32(mask_bitmap, (u32 *) key->mask,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+ for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
&erp_table->master_mask);
@@ -234,7 +239,7 @@ mlxsw_sp_acl_erp_master_mask_clear(struct mlxsw_sp_acl_erp_table *erp_table,
return 0;
err_master_mask_update:
- for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_set(bit,
&erp_table->master_mask);
return err;
@@ -256,26 +261,16 @@ mlxsw_sp_acl_erp_generic_create(struct mlxsw_sp_acl_erp_table *erp_table,
goto err_erp_id_get;
memcpy(&erp->key, key, sizeof(*key));
- bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask,
- MLXSW_SP_ACL_TCAM_MASK_LEN);
list_add(&erp->list, &erp_table->atcam_erps_list);
- refcount_set(&erp->refcnt, 1);
erp_table->num_atcam_erps++;
erp->erp_table = erp_table;
- err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp);
+ err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &erp->key);
if (err)
goto err_master_mask_set;
- err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node,
- mlxsw_sp_acl_erp_ht_params);
- if (err)
- goto err_rhashtable_insert;
-
return erp;
-err_rhashtable_insert:
- mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
err_master_mask_set:
erp_table->num_atcam_erps--;
list_del(&erp->list);
@@ -290,9 +285,7 @@ mlxsw_sp_acl_erp_generic_destroy(struct mlxsw_sp_acl_erp *erp)
{
struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
- rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
- mlxsw_sp_acl_erp_ht_params);
- mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key);
erp_table->num_atcam_erps--;
list_del(&erp->list);
mlxsw_sp_acl_erp_id_put(erp_table, erp->id);
@@ -525,6 +518,48 @@ err_table_relocate:
}
static int
+mlxsw_acl_erp_table_bf_add(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion = erp_table->aregion;
+ unsigned int erp_bank = mlxsw_sp_acl_erp_bank_get(erp);
+ struct mlxsw_sp_acl_atcam_entry *aentry;
+ int err;
+
+ list_for_each_entry(aentry, &aregion->entries_list, list) {
+ err = mlxsw_sp_acl_bf_entry_add(aregion->region->mlxsw_sp,
+ erp_table->erp_core->bf,
+ aregion, erp_bank, aentry);
+ if (err)
+ goto bf_entry_add_err;
+ }
+
+ return 0;
+
+bf_entry_add_err:
+ list_for_each_entry_continue_reverse(aentry, &aregion->entries_list,
+ list)
+ mlxsw_sp_acl_bf_entry_del(aregion->region->mlxsw_sp,
+ erp_table->erp_core->bf,
+ aregion, erp_bank, aentry);
+ return err;
+}
+
+static void
+mlxsw_acl_erp_table_bf_del(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion = erp_table->aregion;
+ unsigned int erp_bank = mlxsw_sp_acl_erp_bank_get(erp);
+ struct mlxsw_sp_acl_atcam_entry *aentry;
+
+ list_for_each_entry_reverse(aentry, &aregion->entries_list, list)
+ mlxsw_sp_acl_bf_entry_del(aregion->region->mlxsw_sp,
+ erp_table->erp_core->bf,
+ aregion, erp_bank, aentry);
+}
+
+static int
mlxsw_sp_acl_erp_region_table_trans(struct mlxsw_sp_acl_erp_table *erp_table)
{
struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
@@ -548,16 +583,24 @@ mlxsw_sp_acl_erp_region_table_trans(struct mlxsw_sp_acl_erp_table *erp_table)
goto err_table_master_rp;
}
- /* Maintain the same eRP bank for the master RP, so that we
- * wouldn't need to update the bloom filter
+ /* Make sure the master RP is using a valid index, as
+ * only a single eRP row is currently allocated.
*/
- master_rp->index = master_rp->index % erp_core->num_erp_banks;
+ master_rp->index = 0;
__set_bit(master_rp->index, erp_table->erp_index_bitmap);
err = mlxsw_sp_acl_erp_table_erp_add(erp_table, master_rp);
if (err)
goto err_table_master_rp_add;
+ /* Update Bloom filter before enabling eRP table, as rules
+ * on the master RP were not set to Bloom filter up to this
+ * point.
+ */
+ err = mlxsw_acl_erp_table_bf_add(erp_table, master_rp);
+ if (err)
+ goto err_table_bf_add;
+
err = mlxsw_sp_acl_erp_table_enable(erp_table, false);
if (err)
goto err_table_enable;
@@ -565,6 +608,8 @@ mlxsw_sp_acl_erp_region_table_trans(struct mlxsw_sp_acl_erp_table *erp_table)
return 0;
err_table_enable:
+ mlxsw_acl_erp_table_bf_del(erp_table, master_rp);
+err_table_bf_add:
mlxsw_sp_acl_erp_table_erp_del(master_rp);
err_table_master_rp_add:
__clear_bit(master_rp->index, erp_table->erp_index_bitmap);
@@ -585,6 +630,7 @@ mlxsw_sp_acl_erp_region_master_mask_trans(struct mlxsw_sp_acl_erp_table *erp_tab
master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table);
if (!master_rp)
return;
+ mlxsw_acl_erp_table_bf_del(erp_table, master_rp);
mlxsw_sp_acl_erp_table_erp_del(master_rp);
__clear_bit(master_rp->index, erp_table->erp_index_bitmap);
mlxsw_sp_acl_erp_table_free(erp_core, erp_table->num_max_atcam_erps,
@@ -647,9 +693,55 @@ mlxsw_sp_acl_erp_region_ctcam_disable(struct mlxsw_sp_acl_erp_table *erp_table)
mlxsw_sp_acl_erp_table_enable(erp_table, false);
}
+static int
+__mlxsw_sp_acl_erp_table_other_inc(struct mlxsw_sp_acl_erp_table *erp_table,
+ unsigned int *inc_num)
+{
+ int err;
+
+ /* If there are C-TCAM eRP or deltas in use we need to transition
+ * the region to use eRP table, if it is not already done
+ */
+ if (!mlxsw_sp_acl_erp_table_is_used(erp_table)) {
+ err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
+ if (err)
+ return err;
+ }
+
+ /* When C-TCAM or deltas are used, the eRP table must be used */
+ if (erp_table->ops != &erp_multiple_masks_ops)
+ erp_table->ops = &erp_multiple_masks_ops;
+
+ (*inc_num)++;
+
+ return 0;
+}
+
+static int mlxsw_sp_acl_erp_ctcam_inc(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ return __mlxsw_sp_acl_erp_table_other_inc(erp_table,
+ &erp_table->num_ctcam_erps);
+}
+
+static int mlxsw_sp_acl_erp_delta_inc(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ return __mlxsw_sp_acl_erp_table_other_inc(erp_table,
+ &erp_table->num_deltas);
+}
+
static void
-mlxsw_sp_acl_erp_ctcam_table_ops_set(struct mlxsw_sp_acl_erp_table *erp_table)
+__mlxsw_sp_acl_erp_table_other_dec(struct mlxsw_sp_acl_erp_table *erp_table,
+ unsigned int *dec_num)
{
+ (*dec_num)--;
+
+ /* If there are no C-TCAM eRP or deltas in use, the state we
+ * transition to depends on the number of A-TCAM eRPs currently
+ * in use.
+ */
+ if (erp_table->num_ctcam_erps > 0 || erp_table->num_deltas > 0)
+ return;
+
switch (erp_table->num_atcam_erps) {
case 2:
/* Keep using the eRP table, but correctly set the
@@ -683,9 +775,21 @@ mlxsw_sp_acl_erp_ctcam_table_ops_set(struct mlxsw_sp_acl_erp_table *erp_table)
}
}
+static void mlxsw_sp_acl_erp_ctcam_dec(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ __mlxsw_sp_acl_erp_table_other_dec(erp_table,
+ &erp_table->num_ctcam_erps);
+}
+
+static void mlxsw_sp_acl_erp_delta_dec(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ __mlxsw_sp_acl_erp_table_other_dec(erp_table,
+ &erp_table->num_deltas);
+}
+
static struct mlxsw_sp_acl_erp *
-__mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
- struct mlxsw_sp_acl_erp_key *key)
+mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
{
struct mlxsw_sp_acl_erp *erp;
int err;
@@ -697,89 +801,41 @@ __mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
memcpy(&erp->key, key, sizeof(*key));
bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask,
MLXSW_SP_ACL_TCAM_MASK_LEN);
- refcount_set(&erp->refcnt, 1);
- erp_table->num_ctcam_erps++;
- erp->erp_table = erp_table;
- err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp);
+ err = mlxsw_sp_acl_erp_ctcam_inc(erp_table);
if (err)
- goto err_master_mask_set;
+ goto err_erp_ctcam_inc;
+
+ erp->erp_table = erp_table;
- err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node,
- mlxsw_sp_acl_erp_ht_params);
+ err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &erp->key);
if (err)
- goto err_rhashtable_insert;
+ goto err_master_mask_set;
err = mlxsw_sp_acl_erp_region_ctcam_enable(erp_table);
if (err)
goto err_erp_region_ctcam_enable;
- /* When C-TCAM is used, the eRP table must be used */
- erp_table->ops = &erp_multiple_masks_ops;
-
return erp;
err_erp_region_ctcam_enable:
- rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
- mlxsw_sp_acl_erp_ht_params);
-err_rhashtable_insert:
- mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key);
err_master_mask_set:
- erp_table->num_ctcam_erps--;
+ mlxsw_sp_acl_erp_ctcam_dec(erp_table);
+err_erp_ctcam_inc:
kfree(erp);
return ERR_PTR(err);
}
-static struct mlxsw_sp_acl_erp *
-mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
- struct mlxsw_sp_acl_erp_key *key)
-{
- struct mlxsw_sp_acl_erp *erp;
- int err;
-
- /* There is a special situation where we need to spill rules
- * into the C-TCAM, yet the region is still using a master
- * mask and thus not performing a lookup in the C-TCAM. This
- * can happen when two rules that only differ in priority - and
- * thus sharing the same key - are programmed. In this case
- * we transition the region to use an eRP table
- */
- err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
- if (err)
- return ERR_PTR(err);
-
- erp = __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
- if (IS_ERR(erp)) {
- err = PTR_ERR(erp);
- goto err_erp_create;
- }
-
- return erp;
-
-err_erp_create:
- mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
- return ERR_PTR(err);
-}
-
static void
mlxsw_sp_acl_erp_ctcam_mask_destroy(struct mlxsw_sp_acl_erp *erp)
{
struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
mlxsw_sp_acl_erp_region_ctcam_disable(erp_table);
- rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
- mlxsw_sp_acl_erp_ht_params);
- mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
- erp_table->num_ctcam_erps--;
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key);
+ mlxsw_sp_acl_erp_ctcam_dec(erp_table);
kfree(erp);
-
- /* Once the last C-TCAM eRP was destroyed, the state we
- * transition to depends on the number of A-TCAM eRPs currently
- * in use
- */
- if (erp_table->num_ctcam_erps > 0)
- return;
- mlxsw_sp_acl_erp_ctcam_table_ops_set(erp_table);
}
static struct mlxsw_sp_acl_erp *
@@ -790,7 +846,7 @@ mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
int err;
if (key->ctcam)
- return __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
+ return mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
/* Expand the eRP table for the new eRP, if needed */
err = mlxsw_sp_acl_erp_table_expand(erp_table);
@@ -838,7 +894,8 @@ mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
mlxsw_sp_acl_erp_generic_destroy(erp);
- if (erp_table->num_atcam_erps == 2 && erp_table->num_ctcam_erps == 0)
+ if (erp_table->num_atcam_erps == 2 && erp_table->num_ctcam_erps == 0 &&
+ erp_table->num_deltas == 0)
erp_table->ops = &erp_two_masks_ops;
}
@@ -940,13 +997,12 @@ mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
WARN_ON(1);
}
-struct mlxsw_sp_acl_erp *
-mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
- const char *mask, bool ctcam)
+struct mlxsw_sp_acl_erp_mask *
+mlxsw_sp_acl_erp_mask_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ const char *mask, bool ctcam)
{
- struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
struct mlxsw_sp_acl_erp_key key;
- struct mlxsw_sp_acl_erp *erp;
+ struct objagg_obj *objagg_obj;
/* eRPs are allocated from a shared resource, but currently all
* allocations are done under RTNL.
@@ -955,29 +1011,276 @@ mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
memcpy(key.mask, mask, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
key.ctcam = ctcam;
- erp = rhashtable_lookup_fast(&erp_table->erp_ht, &key,
- mlxsw_sp_acl_erp_ht_params);
- if (erp) {
- refcount_inc(&erp->refcnt);
- return erp;
- }
+ objagg_obj = objagg_obj_get(aregion->erp_table->objagg, &key);
+ if (IS_ERR(objagg_obj))
+ return ERR_CAST(objagg_obj);
+ return (struct mlxsw_sp_acl_erp_mask *) objagg_obj;
+}
+
+void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp_mask *erp_mask)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
- return erp_table->ops->erp_create(erp_table, &key);
+ ASSERT_RTNL();
+ objagg_obj_put(aregion->erp_table->objagg, objagg_obj);
}
-void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_erp *erp)
+int mlxsw_sp_acl_erp_bf_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp_mask *erp_mask,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
{
- struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+ const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj);
+ unsigned int erp_bank;
ASSERT_RTNL();
+ if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table))
+ return 0;
+
+ erp_bank = mlxsw_sp_acl_erp_bank_get(erp);
+ return mlxsw_sp_acl_bf_entry_add(mlxsw_sp,
+ erp->erp_table->erp_core->bf,
+ aregion, erp_bank, aentry);
+}
+
+void mlxsw_sp_acl_erp_bf_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp_mask *erp_mask,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+ const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj);
+ unsigned int erp_bank;
- if (!refcount_dec_and_test(&erp->refcnt))
+ ASSERT_RTNL();
+ if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table))
return;
- erp_table->ops->erp_destroy(erp_table, erp);
+ erp_bank = mlxsw_sp_acl_erp_bank_get(erp);
+ mlxsw_sp_acl_bf_entry_del(mlxsw_sp,
+ erp->erp_table->erp_core->bf,
+ aregion, erp_bank, aentry);
+}
+
+bool
+mlxsw_sp_acl_erp_mask_is_ctcam(const struct mlxsw_sp_acl_erp_mask *erp_mask)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+ const struct mlxsw_sp_acl_erp_key *key = objagg_obj_raw(objagg_obj);
+
+ return key->ctcam;
+}
+
+u8 mlxsw_sp_acl_erp_mask_erp_id(const struct mlxsw_sp_acl_erp_mask *erp_mask)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+ const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj);
+
+ return erp->id;
+}
+
+struct mlxsw_sp_acl_erp_delta {
+ struct mlxsw_sp_acl_erp_key key;
+ u16 start;
+ u8 mask;
+};
+
+u16 mlxsw_sp_acl_erp_delta_start(const struct mlxsw_sp_acl_erp_delta *delta)
+{
+ return delta->start;
+}
+
+u8 mlxsw_sp_acl_erp_delta_mask(const struct mlxsw_sp_acl_erp_delta *delta)
+{
+ return delta->mask;
+}
+
+u8 mlxsw_sp_acl_erp_delta_value(const struct mlxsw_sp_acl_erp_delta *delta,
+ const char *enc_key)
+{
+ u16 start = delta->start;
+ u8 mask = delta->mask;
+ u16 tmp;
+
+ if (!mask)
+ return 0;
+
+ tmp = (unsigned char) enc_key[__MASK_IDX(start / 8)];
+ if (start / 8 + 1 < __MASK_LEN)
+ tmp |= (unsigned char) enc_key[__MASK_IDX(start / 8 + 1)] << 8;
+ tmp >>= start % 8;
+ tmp &= mask;
+ return tmp;
+}
+
+void mlxsw_sp_acl_erp_delta_clear(const struct mlxsw_sp_acl_erp_delta *delta,
+ const char *enc_key)
+{
+ u16 start = delta->start;
+ u8 mask = delta->mask;
+ unsigned char *byte;
+ u16 tmp;
+
+ tmp = mask;
+ tmp <<= start % 8;
+ tmp = ~tmp;
+
+ byte = (unsigned char *) &enc_key[__MASK_IDX(start / 8)];
+ *byte &= tmp & 0xff;
+ if (start / 8 + 1 < __MASK_LEN) {
+ byte = (unsigned char *) &enc_key[__MASK_IDX(start / 8 + 1)];
+ *byte &= (tmp >> 8) & 0xff;
+ }
+}
+
+static const struct mlxsw_sp_acl_erp_delta
+mlxsw_sp_acl_erp_delta_default = {};
+
+const struct mlxsw_sp_acl_erp_delta *
+mlxsw_sp_acl_erp_delta(const struct mlxsw_sp_acl_erp_mask *erp_mask)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+ const struct mlxsw_sp_acl_erp_delta *delta;
+
+ delta = objagg_obj_delta_priv(objagg_obj);
+ if (!delta)
+ delta = &mlxsw_sp_acl_erp_delta_default;
+ return delta;
+}
+
+static int
+mlxsw_sp_acl_erp_delta_fill(const struct mlxsw_sp_acl_erp_key *parent_key,
+ const struct mlxsw_sp_acl_erp_key *key,
+ u16 *delta_start, u8 *delta_mask)
+{
+ int offset = 0;
+ int si = -1;
+ u16 pmask;
+ u16 mask;
+ int i;
+
+ /* The difference between 2 masks can be up to 8 consecutive bits. */
+ for (i = 0; i < __MASK_LEN; i++) {
+ if (parent_key->mask[__MASK_IDX(i)] == key->mask[__MASK_IDX(i)])
+ continue;
+ if (si == -1)
+ si = i;
+ else if (si != i - 1)
+ return -EINVAL;
+ }
+ if (si == -1) {
+ /* The masks are the same, this cannot happen.
+ * That means the caller is broken.
+ */
+ WARN_ON(1);
+ *delta_start = 0;
+ *delta_mask = 0;
+ return 0;
+ }
+ pmask = (unsigned char) parent_key->mask[__MASK_IDX(si)];
+ mask = (unsigned char) key->mask[__MASK_IDX(si)];
+ if (si + 1 < __MASK_LEN) {
+ pmask |= (unsigned char) parent_key->mask[__MASK_IDX(si + 1)] << 8;
+ mask |= (unsigned char) key->mask[__MASK_IDX(si + 1)] << 8;
+ }
+
+ if ((pmask ^ mask) & pmask)
+ return -EINVAL;
+ mask &= ~pmask;
+ while (!(mask & (1 << offset)))
+ offset++;
+ while (!(mask & 1))
+ mask >>= 1;
+ if (mask & 0xff00)
+ return -EINVAL;
+
+ *delta_start = si * 8 + offset;
+ *delta_mask = mask;
+
+ return 0;
+}
+
+static void *mlxsw_sp_acl_erp_delta_create(void *priv, void *parent_obj,
+ void *obj)
+{
+ struct mlxsw_sp_acl_erp_key *parent_key = parent_obj;
+ struct mlxsw_sp_acl_atcam_region *aregion = priv;
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+ struct mlxsw_sp_acl_erp_key *key = obj;
+ struct mlxsw_sp_acl_erp_delta *delta;
+ u16 delta_start;
+ u8 delta_mask;
+ int err;
+
+ if (parent_key->ctcam || key->ctcam)
+ return ERR_PTR(-EINVAL);
+ err = mlxsw_sp_acl_erp_delta_fill(parent_key, key,
+ &delta_start, &delta_mask);
+ if (err)
+ return ERR_PTR(-EINVAL);
+
+ delta = kzalloc(sizeof(*delta), GFP_KERNEL);
+ if (!delta)
+ return ERR_PTR(-ENOMEM);
+ delta->start = delta_start;
+ delta->mask = delta_mask;
+
+ err = mlxsw_sp_acl_erp_delta_inc(erp_table);
+ if (err)
+ goto err_erp_delta_inc;
+
+ memcpy(&delta->key, key, sizeof(*key));
+ err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &delta->key);
+ if (err)
+ goto err_master_mask_set;
+
+ return delta;
+
+err_master_mask_set:
+ mlxsw_sp_acl_erp_delta_dec(erp_table);
+err_erp_delta_inc:
+ kfree(delta);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_acl_erp_delta_destroy(void *priv, void *delta_priv)
+{
+ struct mlxsw_sp_acl_erp_delta *delta = delta_priv;
+ struct mlxsw_sp_acl_atcam_region *aregion = priv;
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, &delta->key);
+ mlxsw_sp_acl_erp_delta_dec(erp_table);
+ kfree(delta);
+}
+
+static void *mlxsw_sp_acl_erp_root_create(void *priv, void *obj)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion = priv;
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+ struct mlxsw_sp_acl_erp_key *key = obj;
+
+ return erp_table->ops->erp_create(erp_table, key);
+}
+
+static void mlxsw_sp_acl_erp_root_destroy(void *priv, void *root_priv)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion = priv;
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+
+ erp_table->ops->erp_destroy(erp_table, root_priv);
}
+static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = {
+ .obj_size = sizeof(struct mlxsw_sp_acl_erp_key),
+ .delta_create = mlxsw_sp_acl_erp_delta_create,
+ .delta_destroy = mlxsw_sp_acl_erp_delta_destroy,
+ .root_create = mlxsw_sp_acl_erp_root_create,
+ .root_destroy = mlxsw_sp_acl_erp_root_destroy,
+};
+
static struct mlxsw_sp_acl_erp_table *
mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
{
@@ -988,9 +1291,12 @@ mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
if (!erp_table)
return ERR_PTR(-ENOMEM);
- err = rhashtable_init(&erp_table->erp_ht, &mlxsw_sp_acl_erp_ht_params);
- if (err)
- goto err_rhashtable_init;
+ erp_table->objagg = objagg_create(&mlxsw_sp_acl_erp_objagg_ops,
+ aregion);
+ if (IS_ERR(erp_table->objagg)) {
+ err = PTR_ERR(erp_table->objagg);
+ goto err_objagg_create;
+ }
erp_table->erp_core = aregion->atcam->erp_core;
erp_table->ops = &erp_no_mask_ops;
@@ -999,7 +1305,7 @@ mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
return erp_table;
-err_rhashtable_init:
+err_objagg_create:
kfree(erp_table);
return ERR_PTR(err);
}
@@ -1008,7 +1314,7 @@ static void
mlxsw_sp_acl_erp_table_destroy(struct mlxsw_sp_acl_erp_table *erp_table)
{
WARN_ON(!list_empty(&erp_table->atcam_erps_list));
- rhashtable_destroy(&erp_table->erp_ht);
+ objagg_destroy(erp_table->objagg);
kfree(erp_table);
}
@@ -1118,6 +1424,12 @@ static int mlxsw_sp_acl_erp_tables_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_gen_pool_add;
+ erp_core->bf = mlxsw_sp_acl_bf_init(mlxsw_sp, erp_core->num_erp_banks);
+ if (IS_ERR(erp_core->bf)) {
+ err = PTR_ERR(erp_core->bf);
+ goto err_bf_init;
+ }
+
/* Different regions require masks of different sizes */
err = mlxsw_sp_acl_erp_tables_sizes_query(mlxsw_sp, erp_core);
if (err)
@@ -1126,6 +1438,8 @@ static int mlxsw_sp_acl_erp_tables_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_erp_tables_sizes_query:
+ mlxsw_sp_acl_bf_fini(erp_core->bf);
+err_bf_init:
err_gen_pool_add:
gen_pool_destroy(erp_core->erp_tables);
return err;
@@ -1134,6 +1448,7 @@ err_gen_pool_add:
static void mlxsw_sp_acl_erp_tables_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_erp_core *erp_core)
{
+ mlxsw_sp_acl_bf_fini(erp_core->bf);
gen_pool_destroy(erp_core->erp_tables);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
index d409b09ba8df..2a998dea4f39 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
@@ -98,8 +98,8 @@ static const struct mlxsw_afk_block mlxsw_sp1_afk_blocks[] = {
#define MLXSW_SP1_AFK_KEY_BLOCK_SIZE 16
-static void mlxsw_sp1_afk_encode_block(char *block, int block_index,
- char *output)
+static void mlxsw_sp1_afk_encode_block(char *output, int block_index,
+ char *block)
{
unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE;
char *output_indexed = output + offset;
@@ -107,10 +107,19 @@ static void mlxsw_sp1_afk_encode_block(char *block, int block_index,
memcpy(output_indexed, block, MLXSW_SP1_AFK_KEY_BLOCK_SIZE);
}
+static void mlxsw_sp1_afk_clear_block(char *output, int block_index)
+{
+ unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE;
+ char *output_indexed = output + offset;
+
+ memset(output_indexed, 0, MLXSW_SP1_AFK_KEY_BLOCK_SIZE);
+}
+
const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = {
.blocks = mlxsw_sp1_afk_blocks,
.blocks_count = ARRAY_SIZE(mlxsw_sp1_afk_blocks),
.encode_block = mlxsw_sp1_afk_encode_block,
+ .clear_block = mlxsw_sp1_afk_clear_block,
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = {
@@ -158,6 +167,11 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = {
MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x04, 16, 8),
};
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_0_7, 0x04, 24, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_8_10, 0x00, 0, 3),
+};
+
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x04, 4),
};
@@ -201,6 +215,7 @@ static const struct mlxsw_afk_block mlxsw_sp2_afk_blocks[] = {
MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0),
MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1),
MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2),
+ MLXSW_AFK_BLOCK(0x3C, mlxsw_sp_afk_element_info_ipv4_4),
MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0),
MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1),
MLXSW_AFK_BLOCK(0x42, mlxsw_sp_afk_element_info_ipv6_2),
@@ -263,10 +278,9 @@ static const struct mlxsw_sp2_afk_block_layout mlxsw_sp2_afk_blocks_layout[] = {
MLXSW_SP2_AFK_BLOCK_LAYOUT(block11, 0x00, 12),
};
-static void mlxsw_sp2_afk_encode_block(char *block, int block_index,
- char *output)
+static void __mlxsw_sp2_afk_block_value_set(char *output, int block_index,
+ u64 block_value)
{
- u64 block_value = mlxsw_sp2_afk_block_value_get(block);
const struct mlxsw_sp2_afk_block_layout *block_layout;
if (WARN_ON(block_index < 0 ||
@@ -278,8 +292,22 @@ static void mlxsw_sp2_afk_encode_block(char *block, int block_index,
&block_layout->item, 0, block_value);
}
+static void mlxsw_sp2_afk_encode_block(char *output, int block_index,
+ char *block)
+{
+ u64 block_value = mlxsw_sp2_afk_block_value_get(block);
+
+ __mlxsw_sp2_afk_block_value_set(output, block_index, block_value);
+}
+
+static void mlxsw_sp2_afk_clear_block(char *output, int block_index)
+{
+ __mlxsw_sp2_afk_block_value_set(output, block_index, 0);
+}
+
const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = {
.blocks = mlxsw_sp2_afk_blocks,
.blocks_count = ARRAY_SIZE(mlxsw_sp2_afk_blocks),
.encode_block = mlxsw_sp2_afk_encode_block,
+ .clear_block = mlxsw_sp2_afk_clear_block,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index e171513bb32a..fe230acf92a9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -95,8 +95,9 @@ int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
return -EIO;
- max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE);
- if (rulei->priority > max_priority)
+ /* Priority range is 1..cap_kvd_size-1. */
+ max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1;
+ if (rulei->priority >= max_priority)
return -EINVAL;
/* Unlike in TC, in HW, higher number means higher priority. */
@@ -779,6 +780,20 @@ static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
}
static int
+mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_group *group,
+ struct mlxsw_sp_acl_tcam_entry *entry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
+ struct mlxsw_sp_acl_tcam_region *region = chunk->region;
+
+ return ops->entry_action_replace(mlxsw_sp, region->priv, chunk->priv,
+ entry->priv, rulei);
+}
+
+static int
mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_entry *entry,
bool *activity)
@@ -848,6 +863,15 @@ struct mlxsw_sp_acl_tcam_flower_rule {
struct mlxsw_sp_acl_tcam_entry entry;
};
+struct mlxsw_sp_acl_tcam_mr_ruleset {
+ struct mlxsw_sp_acl_tcam_chunk *chunk;
+ struct mlxsw_sp_acl_tcam_group group;
+};
+
+struct mlxsw_sp_acl_tcam_mr_rule {
+ struct mlxsw_sp_acl_tcam_entry entry;
+};
+
static int
mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
@@ -930,6 +954,15 @@ mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
}
static int
+mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
+ void *ruleset_priv,
+ void *rule_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
void *rule_priv, bool *activity)
{
@@ -949,12 +982,146 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
.rule_priv_size = mlxsw_sp_acl_tcam_flower_rule_priv_size,
.rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
.rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
+ .rule_action_replace = mlxsw_sp_acl_tcam_flower_rule_action_replace,
.rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
};
+static int
+mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam,
+ void *ruleset_priv,
+ struct mlxsw_afk_element_usage *tmplt_elusage)
+{
+ struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
+ int err;
+
+ err = mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
+ mlxsw_sp_acl_tcam_patterns,
+ MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
+ tmplt_elusage);
+ if (err)
+ return err;
+
+ /* For most of the TCAM clients it would make sense to take a tcam chunk
+ * only when the first rule is written. This is not the case for
+ * multicast router as it is required to bind the multicast router to a
+ * specific ACL Group ID which must exist in HW before multicast router
+ * is initialized.
+ */
+ ruleset->chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, &ruleset->group,
+ 1, tmplt_elusage);
+ if (IS_ERR(ruleset->chunk)) {
+ err = PTR_ERR(ruleset->chunk);
+ goto err_chunk_get;
+ }
+
+ return 0;
+
+err_chunk_get:
+ mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv)
+{
+ struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
+
+ mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, ruleset->chunk);
+ mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
+}
+
+static int
+mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ /* Binding is done when initializing multicast router */
+ return 0;
+}
+
+static void
+mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
+ void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+}
+
+static u16
+mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv)
+{
+ struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
+
+ return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
+}
+
+static size_t mlxsw_sp_acl_tcam_mr_rule_priv_size(struct mlxsw_sp *mlxsw_sp)
+{
+ return sizeof(struct mlxsw_sp_acl_tcam_mr_rule) +
+ mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp);
+}
+
+static int
+mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+ void *rule_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
+ struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
+
+ return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group,
+ &rule->entry, rulei);
+}
+
+static void
+mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
+{
+ struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
+
+ mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
+}
+
+static int
+mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
+ void *ruleset_priv, void *rule_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
+ struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
+
+ return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp, &ruleset->group,
+ &rule->entry, rulei);
+}
+
+static int
+mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
+ void *rule_priv, bool *activity)
+{
+ struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
+
+ return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry,
+ activity);
+}
+
+static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = {
+ .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_mr_ruleset),
+ .ruleset_add = mlxsw_sp_acl_tcam_mr_ruleset_add,
+ .ruleset_del = mlxsw_sp_acl_tcam_mr_ruleset_del,
+ .ruleset_bind = mlxsw_sp_acl_tcam_mr_ruleset_bind,
+ .ruleset_unbind = mlxsw_sp_acl_tcam_mr_ruleset_unbind,
+ .ruleset_group_id = mlxsw_sp_acl_tcam_mr_ruleset_group_id,
+ .rule_priv_size = mlxsw_sp_acl_tcam_mr_rule_priv_size,
+ .rule_add = mlxsw_sp_acl_tcam_mr_rule_add,
+ .rule_del = mlxsw_sp_acl_tcam_mr_rule_del,
+ .rule_action_replace = mlxsw_sp_acl_tcam_mr_rule_action_replace,
+ .rule_activity_get = mlxsw_sp_acl_tcam_mr_rule_activity_get,
+};
+
static const struct mlxsw_sp_acl_profile_ops *
mlxsw_sp_acl_tcam_profile_ops_arr[] = {
[MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
+ [MLXSW_SP_ACL_PROFILE_MR] = &mlxsw_sp_acl_tcam_mr_ops,
};
const struct mlxsw_sp_acl_profile_ops *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 219a4e26c332..0f1a9dee63de 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -48,6 +48,9 @@ struct mlxsw_sp_acl_profile_ops {
void *ruleset_priv, void *rule_priv,
struct mlxsw_sp_acl_rule_info *rulei);
void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
+ int (*rule_action_replace)(struct mlxsw_sp *mlxsw_sp,
+ void *ruleset_priv, void *rule_priv,
+ struct mlxsw_sp_acl_rule_info *rulei);
int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
bool *activity);
};
@@ -121,6 +124,11 @@ void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ctcam_region *cregion,
struct mlxsw_sp_acl_ctcam_chunk *cchunk,
struct mlxsw_sp_acl_ctcam_entry *centry);
+int mlxsw_sp_acl_ctcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ struct mlxsw_sp_acl_rule_info *rulei);
static inline unsigned int
mlxsw_sp_acl_ctcam_entry_offset(struct mlxsw_sp_acl_ctcam_entry *centry)
{
@@ -144,6 +152,7 @@ struct mlxsw_sp_acl_atcam {
struct mlxsw_sp_acl_atcam_region {
struct rhashtable entries_ht; /* A-TCAM only */
+ struct list_head entries_list; /* A-TCAM only */
struct mlxsw_sp_acl_ctcam_region cregion;
const struct mlxsw_sp_acl_atcam_region_ops *ops;
struct mlxsw_sp_acl_tcam_region *region;
@@ -154,7 +163,9 @@ struct mlxsw_sp_acl_atcam_region {
};
struct mlxsw_sp_acl_atcam_entry_ht_key {
- char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key */
+ char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key,
+ * minus delta bits.
+ */
u8 erp_id;
};
@@ -164,10 +175,17 @@ struct mlxsw_sp_acl_atcam_chunk {
struct mlxsw_sp_acl_atcam_entry {
struct rhash_head ht_node;
+ struct list_head list; /* Member in entries_list */
struct mlxsw_sp_acl_atcam_entry_ht_key ht_key;
+ char full_enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key */
+ struct {
+ u16 start;
+ u8 mask;
+ u8 value;
+ } delta_info;
struct mlxsw_sp_acl_ctcam_entry centry;
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
- struct mlxsw_sp_acl_erp *erp;
+ struct mlxsw_sp_acl_erp_mask *erp_mask;
};
static inline struct mlxsw_sp_acl_atcam_region *
@@ -204,20 +222,45 @@ void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_atcam_chunk *achunk,
struct mlxsw_sp_acl_atcam_entry *aentry);
+int mlxsw_sp_acl_atcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam);
void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam);
-struct mlxsw_sp_acl_erp;
+struct mlxsw_sp_acl_erp_delta;
-bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp);
-u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp);
-struct mlxsw_sp_acl_erp *
-mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
- const char *mask, bool ctcam);
-void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_erp *erp);
+u16 mlxsw_sp_acl_erp_delta_start(const struct mlxsw_sp_acl_erp_delta *delta);
+u8 mlxsw_sp_acl_erp_delta_mask(const struct mlxsw_sp_acl_erp_delta *delta);
+u8 mlxsw_sp_acl_erp_delta_value(const struct mlxsw_sp_acl_erp_delta *delta,
+ const char *enc_key);
+void mlxsw_sp_acl_erp_delta_clear(const struct mlxsw_sp_acl_erp_delta *delta,
+ const char *enc_key);
+
+struct mlxsw_sp_acl_erp_mask;
+
+bool
+mlxsw_sp_acl_erp_mask_is_ctcam(const struct mlxsw_sp_acl_erp_mask *erp_mask);
+u8 mlxsw_sp_acl_erp_mask_erp_id(const struct mlxsw_sp_acl_erp_mask *erp_mask);
+const struct mlxsw_sp_acl_erp_delta *
+mlxsw_sp_acl_erp_delta(const struct mlxsw_sp_acl_erp_mask *erp_mask);
+struct mlxsw_sp_acl_erp_mask *
+mlxsw_sp_acl_erp_mask_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ const char *mask, bool ctcam);
+void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp_mask *erp_mask);
+int mlxsw_sp_acl_erp_bf_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp_mask *erp_mask,
+ struct mlxsw_sp_acl_atcam_entry *aentry);
+void mlxsw_sp_acl_erp_bf_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp_mask *erp_mask,
+ struct mlxsw_sp_acl_atcam_entry *aentry);
int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion);
void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion);
int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp,
@@ -225,4 +268,22 @@ int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp,
void mlxsw_sp_acl_erps_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam);
+struct mlxsw_sp_acl_bf;
+
+int
+mlxsw_sp_acl_bf_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_bf *bf,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ unsigned int erp_bank,
+ struct mlxsw_sp_acl_atcam_entry *aentry);
+void
+mlxsw_sp_acl_bf_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_bf *bf,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ unsigned int erp_bank,
+ struct mlxsw_sp_acl_atcam_entry *aentry);
+struct mlxsw_sp_acl_bf *
+mlxsw_sp_acl_bf_init(struct mlxsw_sp *mlxsw_sp, unsigned int num_erp_banks);
+void mlxsw_sp_acl_bf_fini(struct mlxsw_sp_acl_bf *bf);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index a3db033d7399..055cc6943b34 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -15,6 +15,7 @@
struct mlxsw_sp_fid_family;
struct mlxsw_sp_fid_core {
+ struct rhashtable fid_ht;
struct rhashtable vni_ht;
struct mlxsw_sp_fid_family *fid_family_arr[MLXSW_SP_FID_TYPE_MAX];
unsigned int *port_fid_mappings;
@@ -26,10 +27,13 @@ struct mlxsw_sp_fid {
unsigned int ref_count;
u16 fid_index;
struct mlxsw_sp_fid_family *fid_family;
+ struct rhash_head ht_node;
struct rhash_head vni_ht_node;
+ enum mlxsw_sp_nve_type nve_type;
__be32 vni;
u32 nve_flood_index;
+ int nve_ifindex;
u8 vni_valid:1,
nve_flood_index_valid:1;
};
@@ -44,6 +48,12 @@ struct mlxsw_sp_fid_8021d {
int br_ifindex;
};
+static const struct rhashtable_params mlxsw_sp_fid_ht_params = {
+ .key_len = sizeof_field(struct mlxsw_sp_fid, fid_index),
+ .key_offset = offsetof(struct mlxsw_sp_fid, fid_index),
+ .head_offset = offsetof(struct mlxsw_sp_fid, ht_node),
+};
+
static const struct rhashtable_params mlxsw_sp_fid_vni_ht_params = {
.key_len = sizeof_field(struct mlxsw_sp_fid, vni),
.key_offset = offsetof(struct mlxsw_sp_fid, vni),
@@ -75,6 +85,8 @@ struct mlxsw_sp_fid_ops {
int (*nve_flood_index_set)(struct mlxsw_sp_fid *fid,
u32 nve_flood_index);
void (*nve_flood_index_clear)(struct mlxsw_sp_fid *fid);
+ void (*fdb_clear_offload)(const struct mlxsw_sp_fid *fid,
+ const struct net_device *nve_dev);
};
struct mlxsw_sp_fid_family {
@@ -89,6 +101,7 @@ struct mlxsw_sp_fid_family {
enum mlxsw_sp_rif_type rif_type;
const struct mlxsw_sp_fid_ops *ops;
struct mlxsw_sp *mlxsw_sp;
+ u8 lag_vid_valid:1;
};
static const int mlxsw_sp_sfgc_uc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
@@ -113,6 +126,45 @@ static const int *mlxsw_sp_packet_type_sfgc_types[] = {
[MLXSW_SP_FLOOD_TYPE_MC] = mlxsw_sp_sfgc_mc_packet_types,
};
+bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid)
+{
+ return fid->fid_family->lag_vid_valid;
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
+ u16 fid_index)
+{
+ struct mlxsw_sp_fid *fid;
+
+ fid = rhashtable_lookup_fast(&mlxsw_sp->fid_core->fid_ht, &fid_index,
+ mlxsw_sp_fid_ht_params);
+ if (fid)
+ fid->ref_count++;
+
+ return fid;
+}
+
+int mlxsw_sp_fid_nve_ifindex(const struct mlxsw_sp_fid *fid, int *nve_ifindex)
+{
+ if (!fid->vni_valid)
+ return -EINVAL;
+
+ *nve_ifindex = fid->nve_ifindex;
+
+ return 0;
+}
+
+int mlxsw_sp_fid_nve_type(const struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_nve_type *p_type)
+{
+ if (!fid->vni_valid)
+ return -EINVAL;
+
+ *p_type = fid->nve_type;
+
+ return 0;
+}
+
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_vni(struct mlxsw_sp *mlxsw_sp,
__be32 vni)
{
@@ -173,7 +225,8 @@ bool mlxsw_sp_fid_nve_flood_index_is_set(const struct mlxsw_sp_fid *fid)
return fid->nve_flood_index_valid;
}
-int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni)
+int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, enum mlxsw_sp_nve_type type,
+ __be32 vni, int nve_ifindex)
{
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
@@ -183,6 +236,8 @@ int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni)
if (WARN_ON(!ops->vni_set || fid->vni_valid))
return -EINVAL;
+ fid->nve_type = type;
+ fid->nve_ifindex = nve_ifindex;
fid->vni = vni;
err = rhashtable_lookup_insert_fast(&mlxsw_sp->fid_core->vni_ht,
&fid->vni_ht_node,
@@ -224,6 +279,16 @@ bool mlxsw_sp_fid_vni_is_set(const struct mlxsw_sp_fid *fid)
return fid->vni_valid;
}
+void mlxsw_sp_fid_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
+ const struct net_device *nve_dev)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
+
+ if (ops->fdb_clear_offload)
+ ops->fdb_clear_offload(fid, nve_dev);
+}
+
static const struct mlxsw_sp_flood_table *
mlxsw_sp_fid_flood_table_lookup(const struct mlxsw_sp_fid *fid,
enum mlxsw_sp_flood_type packet_type)
@@ -284,11 +349,6 @@ void mlxsw_sp_fid_port_vid_unmap(struct mlxsw_sp_fid *fid,
fid->fid_family->ops->port_vid_unmap(fid, mlxsw_sp_port, vid);
}
-enum mlxsw_sp_rif_type mlxsw_sp_fid_rif_type(const struct mlxsw_sp_fid *fid)
-{
- return fid->fid_family->rif_type;
-}
-
u16 mlxsw_sp_fid_index(const struct mlxsw_sp_fid *fid)
{
return fid->fid_index;
@@ -304,6 +364,11 @@ void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif)
fid->rif = rif;
}
+struct mlxsw_sp_rif *mlxsw_sp_fid_rif(const struct mlxsw_sp_fid *fid)
+{
+ return fid->rif;
+}
+
enum mlxsw_sp_rif_type
mlxsw_sp_fid_type_rif_type(const struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_fid_type type)
@@ -568,7 +633,7 @@ mlxsw_sp_fid_8021d_compare(const struct mlxsw_sp_fid *fid, const void *arg)
static u16 mlxsw_sp_fid_8021d_flood_index(const struct mlxsw_sp_fid *fid)
{
- return fid->fid_index - fid->fid_family->start_index;
+ return fid->fid_index - VLAN_N_VID;
}
static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
@@ -713,6 +778,13 @@ static void mlxsw_sp_fid_8021d_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
fid->vni_valid, 0, false);
}
+static void
+mlxsw_sp_fid_8021d_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
+ const struct net_device *nve_dev)
+{
+ br_fdb_clear_offload(nve_dev, 0);
+}
+
static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
.setup = mlxsw_sp_fid_8021d_setup,
.configure = mlxsw_sp_fid_8021d_configure,
@@ -726,6 +798,7 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
.vni_clear = mlxsw_sp_fid_8021d_vni_clear,
.nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
.nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
+ .fdb_clear_offload = mlxsw_sp_fid_8021d_fdb_clear_offload,
};
static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021d_flood_tables[] = {
@@ -759,6 +832,48 @@ static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021d_family = {
.nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
.rif_type = MLXSW_SP_RIF_TYPE_FID,
.ops = &mlxsw_sp_fid_8021d_ops,
+ .lag_vid_valid = 1,
+};
+
+static void
+mlxsw_sp_fid_8021q_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
+ const struct net_device *nve_dev)
+{
+ br_fdb_clear_offload(nve_dev, mlxsw_sp_fid_8021q_vid(fid));
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_emu_ops = {
+ .setup = mlxsw_sp_fid_8021q_setup,
+ .configure = mlxsw_sp_fid_8021d_configure,
+ .deconfigure = mlxsw_sp_fid_8021d_deconfigure,
+ .index_alloc = mlxsw_sp_fid_8021d_index_alloc,
+ .compare = mlxsw_sp_fid_8021q_compare,
+ .flood_index = mlxsw_sp_fid_8021d_flood_index,
+ .port_vid_map = mlxsw_sp_fid_8021d_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_8021d_vni_set,
+ .vni_clear = mlxsw_sp_fid_8021d_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
+ .fdb_clear_offload = mlxsw_sp_fid_8021q_fdb_clear_offload,
+};
+
+/* There are 4K-2 emulated 802.1Q FIDs, starting right after the 802.1D FIDs */
+#define MLXSW_SP_FID_8021Q_EMU_START (VLAN_N_VID + MLXSW_SP_FID_8021D_MAX)
+#define MLXSW_SP_FID_8021Q_EMU_END (MLXSW_SP_FID_8021Q_EMU_START + \
+ VLAN_VID_MASK - 2)
+
+/* Range and flood configuration must match mlxsw_config_profile */
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021q_emu_family = {
+ .type = MLXSW_SP_FID_TYPE_8021Q,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
+ .start_index = MLXSW_SP_FID_8021Q_EMU_START,
+ .end_index = MLXSW_SP_FID_8021Q_EMU_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
+ .ops = &mlxsw_sp_fid_8021q_emu_ops,
+ .lag_vid_valid = 1,
};
static int mlxsw_sp_fid_rfid_configure(struct mlxsw_sp_fid *fid)
@@ -888,7 +1003,7 @@ static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
};
static const struct mlxsw_sp_fid_family *mlxsw_sp_fid_family_arr[] = {
- [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp_fid_8021q_family,
+ [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp_fid_8021q_emu_family,
[MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp_fid_8021d_family,
[MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
[MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp_fid_dummy_family,
@@ -944,10 +1059,17 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_configure;
+ err = rhashtable_insert_fast(&mlxsw_sp->fid_core->fid_ht, &fid->ht_node,
+ mlxsw_sp_fid_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
list_add(&fid->list, &fid_family->fids_list);
fid->ref_count++;
return fid;
+err_rhashtable_insert:
+ fid->fid_family->ops->deconfigure(fid);
err_configure:
__clear_bit(fid_index - fid_family->start_index,
fid_family->fids_bitmap);
@@ -959,19 +1081,18 @@ err_index_alloc:
void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid)
{
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
- if (--fid->ref_count == 1 && fid->rif) {
- /* Destroy the associated RIF and let it drop the last
- * reference on the FID.
- */
- return mlxsw_sp_rif_destroy(fid->rif);
- } else if (fid->ref_count == 0) {
- list_del(&fid->list);
- fid->fid_family->ops->deconfigure(fid);
- __clear_bit(fid->fid_index - fid_family->start_index,
- fid_family->fids_bitmap);
- kfree(fid);
- }
+ if (--fid->ref_count != 0)
+ return;
+
+ list_del(&fid->list);
+ rhashtable_remove_fast(&mlxsw_sp->fid_core->fid_ht,
+ &fid->ht_node, mlxsw_sp_fid_ht_params);
+ fid->fid_family->ops->deconfigure(fid);
+ __clear_bit(fid->fid_index - fid_family->start_index,
+ fid_family->fids_bitmap);
+ kfree(fid);
}
struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_get(struct mlxsw_sp *mlxsw_sp, u16 vid)
@@ -985,6 +1106,12 @@ struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_get(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_fid_get(mlxsw_sp, MLXSW_SP_FID_TYPE_8021D, &br_ifindex);
}
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_lookup(struct mlxsw_sp *mlxsw_sp,
+ u16 vid)
+{
+ return mlxsw_sp_fid_lookup(mlxsw_sp, MLXSW_SP_FID_TYPE_8021Q, &vid);
+}
+
struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_lookup(struct mlxsw_sp *mlxsw_sp,
int br_ifindex)
{
@@ -1126,9 +1253,13 @@ int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp)
return -ENOMEM;
mlxsw_sp->fid_core = fid_core;
+ err = rhashtable_init(&fid_core->fid_ht, &mlxsw_sp_fid_ht_params);
+ if (err)
+ goto err_rhashtable_fid_init;
+
err = rhashtable_init(&fid_core->vni_ht, &mlxsw_sp_fid_vni_ht_params);
if (err)
- goto err_rhashtable_init;
+ goto err_rhashtable_vni_init;
fid_core->port_fid_mappings = kcalloc(max_ports, sizeof(unsigned int),
GFP_KERNEL);
@@ -1157,7 +1288,9 @@ err_fid_ops_register:
kfree(fid_core->port_fid_mappings);
err_alloc_port_fid_mappings:
rhashtable_destroy(&fid_core->vni_ht);
-err_rhashtable_init:
+err_rhashtable_vni_init:
+ rhashtable_destroy(&fid_core->fid_ht);
+err_rhashtable_fid_init:
kfree(fid_core);
return err;
}
@@ -1172,5 +1305,6 @@ void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
fid_core->fid_family_arr[i]);
kfree(fid_core->port_fid_mappings);
rhashtable_destroy(&fid_core->vni_ht);
+ rhashtable_destroy(&fid_core->fid_ht);
kfree(fid_core);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 8d211972c5e9..ff072358d950 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -406,7 +406,7 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
if (IS_ERR(ruleset))
return PTR_ERR(ruleset);
- rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie,
+ rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, NULL,
f->common.extack);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index b5b54b41349a..0a31fff2516e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -174,6 +174,20 @@ mlxsw_sp_nve_mc_record_ops_arr[] = {
[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_nve_mc_record_ipv6_ops,
};
+int mlxsw_sp_nve_learned_ip_resolve(struct mlxsw_sp *mlxsw_sp, u32 uip,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr)
+{
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ addr->addr4 = cpu_to_be32(uip);
+ return 0;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+}
+
static struct mlxsw_sp_nve_mc_list *
mlxsw_sp_nve_mc_list_find(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_nve_mc_list_key *key)
@@ -775,6 +789,21 @@ static void mlxsw_sp_nve_fdb_flush_by_fid(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
}
+static void mlxsw_sp_nve_fdb_clear_offload(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fid *fid,
+ const struct net_device *nve_dev,
+ __be32 vni)
+{
+ const struct mlxsw_sp_nve_ops *ops;
+ enum mlxsw_sp_nve_type type;
+
+ if (WARN_ON(mlxsw_sp_fid_nve_type(fid, &type)))
+ return;
+
+ ops = mlxsw_sp->nve->nve_ops_arr[type];
+ ops->fdb_clear_offload(nve_dev, vni);
+}
+
int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
struct mlxsw_sp_nve_params *params,
struct netlink_ext_ack *extack)
@@ -803,7 +832,8 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
return err;
}
- err = mlxsw_sp_fid_vni_set(fid, params->vni);
+ err = mlxsw_sp_fid_vni_set(fid, params->type, params->vni,
+ params->dev->ifindex);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to set VNI on FID");
goto err_fid_vni_set;
@@ -811,8 +841,16 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
nve->config = config;
+ err = ops->fdb_replay(params->dev, params->vni);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to offload the FDB");
+ goto err_fdb_replay;
+ }
+
return 0;
+err_fdb_replay:
+ mlxsw_sp_fid_vni_clear(fid);
err_fid_vni_set:
mlxsw_sp_nve_tunnel_fini(mlxsw_sp);
return err;
@@ -822,9 +860,27 @@ void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *fid)
{
u16 fid_index = mlxsw_sp_fid_index(fid);
+ struct net_device *nve_dev;
+ int nve_ifindex;
+ __be32 vni;
mlxsw_sp_nve_flood_ip_flush(mlxsw_sp, fid);
mlxsw_sp_nve_fdb_flush_by_fid(mlxsw_sp, fid_index);
+
+ if (WARN_ON(mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex) ||
+ mlxsw_sp_fid_vni(fid, &vni)))
+ goto out;
+
+ nve_dev = dev_get_by_index(&init_net, nve_ifindex);
+ if (!nve_dev)
+ goto out;
+
+ mlxsw_sp_nve_fdb_clear_offload(mlxsw_sp, fid, nve_dev, vni);
+ mlxsw_sp_fid_fdb_clear_offload(fid, nve_dev);
+
+ dev_put(nve_dev);
+
+out:
mlxsw_sp_fid_vni_clear(fid);
mlxsw_sp_nve_tunnel_fini(mlxsw_sp);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
index 4cc3297e13d6..02937ea95bc3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
@@ -41,6 +41,8 @@ struct mlxsw_sp_nve_ops {
int (*init)(struct mlxsw_sp_nve *nve,
const struct mlxsw_sp_nve_config *config);
void (*fini)(struct mlxsw_sp_nve *nve);
+ int (*fdb_replay)(const struct net_device *nve_dev, __be32 vni);
+ void (*fdb_clear_offload)(const struct net_device *nve_dev, __be32 vni);
};
extern const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
index d21c7be5b1c9..74e564c4ac19 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
@@ -17,7 +17,8 @@
#define MLXSW_SP_NVE_VXLAN_PARSING_DEPTH 128
#define MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH 96
-#define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS VXLAN_F_UDP_ZERO_CSUM_TX
+#define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \
+ VXLAN_F_LEARN)
static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
const struct net_device *dev,
@@ -61,11 +62,6 @@ static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
return false;
}
- if (cfg->flags & VXLAN_F_LEARN) {
- NL_SET_ERR_MSG_MOD(extack, "VxLAN: Learning is not supported");
- return false;
- }
-
if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) {
NL_SET_ERR_MSG_MOD(extack, "VxLAN: UDP checksum is not supported");
return false;
@@ -215,12 +211,30 @@ static void mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
config->udp_dport);
}
+static int
+mlxsw_sp_nve_vxlan_fdb_replay(const struct net_device *nve_dev, __be32 vni)
+{
+ if (WARN_ON(!netif_is_vxlan(nve_dev)))
+ return -EINVAL;
+ return vxlan_fdb_replay(nve_dev, vni, &mlxsw_sp_switchdev_notifier);
+}
+
+static void
+mlxsw_sp_nve_vxlan_clear_offload(const struct net_device *nve_dev, __be32 vni)
+{
+ if (WARN_ON(!netif_is_vxlan(nve_dev)))
+ return;
+ vxlan_fdb_clear_offload(nve_dev, vni);
+}
+
const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
.type = MLXSW_SP_NVE_TYPE_VXLAN,
.can_offload = mlxsw_sp1_nve_vxlan_can_offload,
.nve_config = mlxsw_sp_nve_vxlan_config,
.init = mlxsw_sp1_nve_vxlan_init,
.fini = mlxsw_sp1_nve_vxlan_fini,
+ .fdb_replay = mlxsw_sp_nve_vxlan_fdb_replay,
+ .fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
};
static bool mlxsw_sp2_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
@@ -246,4 +260,6 @@ const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = {
.nve_config = mlxsw_sp_nve_vxlan_config,
.init = mlxsw_sp2_nve_vxlan_init,
.fini = mlxsw_sp2_nve_vxlan_fini,
+ .fdb_replay = mlxsw_sp_nve_vxlan_fdb_replay,
+ .fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 6ebf99cc3154..98e5ffd71b91 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -15,6 +15,7 @@
#include <linux/gcd.h>
#include <linux/random.h>
#include <linux/if_macvlan.h>
+#include <linux/refcount.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
@@ -70,6 +71,8 @@ struct mlxsw_sp_router {
bool aborted;
struct notifier_block fib_nb;
struct notifier_block netevent_nb;
+ struct notifier_block inetaddr_nb;
+ struct notifier_block inet6addr_nb;
const struct mlxsw_sp_rif_ops **rif_ops_arr;
const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
};
@@ -104,6 +107,7 @@ struct mlxsw_sp_rif_params {
struct mlxsw_sp_rif_subport {
struct mlxsw_sp_rif common;
+ refcount_t ref_count;
union {
u16 system_port;
u16 lag_id;
@@ -136,6 +140,7 @@ struct mlxsw_sp_rif_ops {
void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
};
+static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *lpm_tree);
@@ -6297,6 +6302,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
err = -ENOMEM;
goto err_rif_alloc;
}
+ dev_hold(rif->dev);
rif->mlxsw_sp = mlxsw_sp;
rif->ops = ops;
@@ -6335,6 +6341,7 @@ err_configure:
if (fid)
mlxsw_sp_fid_put(fid);
err_fid_get:
+ dev_put(rif->dev);
kfree(rif);
err_rif_alloc:
err_rif_index_alloc:
@@ -6343,7 +6350,7 @@ err_rif_index_alloc:
return ERR_PTR(err);
}
-void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
+static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
{
const struct mlxsw_sp_rif_ops *ops = rif->ops;
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
@@ -6362,6 +6369,7 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
if (fid)
/* Loopback RIFs are not associated with a FID. */
mlxsw_sp_fid_put(fid);
+ dev_put(rif->dev);
kfree(rif);
vr->rif_count--;
mlxsw_sp_vr_put(mlxsw_sp, vr);
@@ -6392,6 +6400,40 @@ mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
params->system_port = mlxsw_sp_port->local_port;
}
+static struct mlxsw_sp_rif_subport *
+mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
+{
+ return container_of(rif, struct mlxsw_sp_rif_subport, common);
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_rif_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif_subport *rif_subport;
+ struct mlxsw_sp_rif *rif;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
+ if (!rif)
+ return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
+
+ rif_subport = mlxsw_sp_rif_subport_rif(rif);
+ refcount_inc(&rif_subport->ref_count);
+ return rif;
+}
+
+static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_rif_subport *rif_subport;
+
+ rif_subport = mlxsw_sp_rif_subport_rif(rif);
+ if (!refcount_dec_and_test(&rif_subport->ref_count))
+ return;
+
+ mlxsw_sp_rif_destroy(rif);
+}
+
static int
mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
struct net_device *l3_dev,
@@ -6399,22 +6441,18 @@ mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_rif_params params = {
+ .dev = l3_dev,
+ };
u16 vid = mlxsw_sp_port_vlan->vid;
struct mlxsw_sp_rif *rif;
struct mlxsw_sp_fid *fid;
int err;
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
- if (!rif) {
- struct mlxsw_sp_rif_params params = {
- .dev = l3_dev,
- };
-
- mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
- rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
- if (IS_ERR(rif))
- return PTR_ERR(rif);
- }
+ mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
+ rif = mlxsw_sp_rif_subport_get(mlxsw_sp, &params, extack);
+ if (IS_ERR(rif))
+ return PTR_ERR(rif);
/* FID was already created, just take a reference */
fid = rif->ops->fid_get(rif, extack);
@@ -6441,6 +6479,7 @@ err_port_vid_learning_set:
mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
err_fid_port_vid_map:
mlxsw_sp_fid_put(fid);
+ mlxsw_sp_rif_subport_put(rif);
return err;
}
@@ -6449,6 +6488,7 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
u16 vid = mlxsw_sp_port_vlan->vid;
if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
@@ -6458,10 +6498,8 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
- /* If router port holds the last reference on the rFID, then the
- * associated Sub-port RIF will be destroyed.
- */
mlxsw_sp_fid_put(fid);
+ mlxsw_sp_rif_subport_put(rif);
}
static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
@@ -6497,8 +6535,8 @@ static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
netif_is_ovs_port(port_dev))
return 0;
- return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
- extack);
+ return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
+ MLXSW_SP_DEFAULT_VID, extack);
}
static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
@@ -6531,15 +6569,15 @@ static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
if (netif_is_bridge_port(lag_dev))
return 0;
- return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
- extack);
+ return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
+ MLXSW_SP_DEFAULT_VID, extack);
}
-static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
+static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev,
unsigned long event,
struct netlink_ext_ack *extack)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
struct mlxsw_sp_rif_params params = {
.dev = l3_dev,
};
@@ -6560,7 +6598,8 @@ static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
return 0;
}
-static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
+static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *vlan_dev,
unsigned long event,
struct netlink_ext_ack *extack)
{
@@ -6577,7 +6616,8 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
vid, extack);
else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
- return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
+ return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
+ extack);
return 0;
}
@@ -6678,16 +6718,11 @@ void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fid_index(rif->fid), false);
}
-static int mlxsw_sp_inetaddr_macvlan_event(struct net_device *macvlan_dev,
+static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *macvlan_dev,
unsigned long event,
struct netlink_ext_ack *extack)
{
- struct mlxsw_sp *mlxsw_sp;
-
- mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
- if (!mlxsw_sp)
- return 0;
-
switch (event) {
case NETDEV_UP:
return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
@@ -6699,7 +6734,35 @@ static int mlxsw_sp_inetaddr_macvlan_event(struct net_device *macvlan_dev,
return 0;
}
-static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
+static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev,
+ const unsigned char *dev_addr,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif *rif;
+ int i;
+
+ /* A RIF is not created for macvlan netdevs. Their MAC is used to
+ * populate the FDB
+ */
+ if (netif_is_macvlan(dev))
+ return 0;
+
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
+ rif = mlxsw_sp->router->rifs[i];
+ if (rif && rif->dev != dev &&
+ !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
+ mlxsw_sp->mac_mask)) {
+ NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev,
unsigned long event,
struct netlink_ext_ack *extack)
{
@@ -6708,21 +6771,24 @@ static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
else if (netif_is_lag_master(dev))
return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
else if (netif_is_bridge_master(dev))
- return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
+ return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
+ extack);
else if (is_vlan_dev(dev))
- return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
+ return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
+ extack);
else if (netif_is_macvlan(dev))
- return mlxsw_sp_inetaddr_macvlan_event(dev, event, extack);
+ return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
+ extack);
else
return 0;
}
-int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
+static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
{
struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
struct net_device *dev = ifa->ifa_dev->dev;
- struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_router *router;
struct mlxsw_sp_rif *rif;
int err = 0;
@@ -6730,15 +6796,12 @@ int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
if (event == NETDEV_UP)
goto out;
- mlxsw_sp = mlxsw_sp_lower_get(dev);
- if (!mlxsw_sp)
- goto out;
-
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
+ rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
+ err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
out:
return notifier_from_errno(err);
}
@@ -6760,13 +6823,19 @@ int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
+ err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
+ ivi->extack);
+ if (err)
+ goto out;
+
+ err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
out:
return notifier_from_errno(err);
}
struct mlxsw_sp_inet6addr_event_work {
struct work_struct work;
+ struct mlxsw_sp *mlxsw_sp;
struct net_device *dev;
unsigned long event;
};
@@ -6775,21 +6844,18 @@ static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
{
struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
+ struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
struct net_device *dev = inet6addr_work->dev;
unsigned long event = inet6addr_work->event;
- struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif *rif;
rtnl_lock();
- mlxsw_sp = mlxsw_sp_lower_get(dev);
- if (!mlxsw_sp)
- goto out;
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- __mlxsw_sp_inetaddr_event(dev, event, NULL);
+ __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
out:
rtnl_unlock();
dev_put(dev);
@@ -6797,25 +6863,25 @@ out:
}
/* Called with rcu_read_lock() */
-int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
+static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
{
struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
struct net_device *dev = if6->idev->dev;
+ struct mlxsw_sp_router *router;
/* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
if (event == NETDEV_UP)
return NOTIFY_DONE;
- if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
- return NOTIFY_DONE;
-
inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
if (!inet6addr_work)
return NOTIFY_BAD;
+ router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
+ inet6addr_work->mlxsw_sp = router->mlxsw_sp;
inet6addr_work->dev = dev;
inet6addr_work->event = event;
dev_hold(dev);
@@ -6841,7 +6907,12 @@ int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
+ err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
+ i6vi->extack);
+ if (err)
+ goto out;
+
+ err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
out:
return notifier_from_errno(err);
}
@@ -6863,20 +6934,14 @@ static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
-int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
+static int
+mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif)
{
- struct mlxsw_sp *mlxsw_sp;
- struct mlxsw_sp_rif *rif;
+ struct net_device *dev = rif->dev;
u16 fid_index;
int err;
- mlxsw_sp = mlxsw_sp_lower_get(dev);
- if (!mlxsw_sp)
- return 0;
-
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- if (!rif)
- return 0;
fid_index = mlxsw_sp_fid_index(rif->fid);
err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
@@ -6920,6 +6985,41 @@ err_rif_edit:
return err;
}
+static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
+ struct netdev_notifier_pre_changeaddr_info *info)
+{
+ struct netlink_ext_ack *extack;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+ return mlxsw_sp_router_port_check_rif_addr(rif->mlxsw_sp, rif->dev,
+ info->dev_addr, extack);
+}
+
+int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif *rif;
+
+ mlxsw_sp = mlxsw_sp_lower_get(dev);
+ if (!mlxsw_sp)
+ return 0;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!rif)
+ return 0;
+
+ switch (event) {
+ case NETDEV_CHANGEMTU: /* fall through */
+ case NETDEV_CHANGEADDR:
+ return mlxsw_sp_router_port_change_event(mlxsw_sp, rif);
+ case NETDEV_PRE_CHANGEADDR:
+ return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
+ }
+
+ return 0;
+}
+
static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
struct net_device *l3_dev,
struct netlink_ext_ack *extack)
@@ -6931,9 +7031,10 @@ static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
*/
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
if (rif)
- __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
+ __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
+ extack);
- return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
+ return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
}
static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
@@ -6944,7 +7045,7 @@ static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
if (!rif)
return;
- __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
+ __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
}
int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
@@ -6998,18 +7099,13 @@ static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
__mlxsw_sp_rif_macvlan_flush, rif);
}
-static struct mlxsw_sp_rif_subport *
-mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
-{
- return container_of(rif, struct mlxsw_sp_rif_subport, common);
-}
-
static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
const struct mlxsw_sp_rif_params *params)
{
struct mlxsw_sp_rif_subport *rif_subport;
rif_subport = mlxsw_sp_rif_subport_rif(rif);
+ refcount_set(&rif_subport->ref_count, 1);
rif_subport->vid = params->vid;
rif_subport->lag = params->lag;
if (params->lag)
@@ -7164,11 +7260,15 @@ static struct mlxsw_sp_fid *
mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
struct netlink_ext_ack *extack)
{
+ struct net_device *br_dev = rif->dev;
u16 vid;
int err;
if (is_vlan_dev(rif->dev)) {
vid = vlan_dev_vlan_id(rif->dev);
+ br_dev = vlan_dev_real_dev(rif->dev);
+ if (WARN_ON(!netif_is_bridge_master(br_dev)))
+ return ERR_PTR(-EINVAL);
} else {
err = br_vlan_get_pvid(rif->dev, &vid);
if (err < 0 || !vid) {
@@ -7177,7 +7277,7 @@ mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
}
}
- return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
+ return mlxsw_sp_bridge_fid_get(rif->mlxsw_sp, br_dev, vid, extack);
}
static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
@@ -7267,7 +7367,7 @@ static struct mlxsw_sp_fid *
mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
struct netlink_ext_ack *extack)
{
- return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
+ return mlxsw_sp_bridge_fid_get(rif->mlxsw_sp, rif->dev, 0, extack);
}
static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
@@ -7293,6 +7393,15 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
.fdb_del = mlxsw_sp_rif_fid_fdb_del,
};
+static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
+ .type = MLXSW_SP_RIF_TYPE_VLAN,
+ .rif_size = sizeof(struct mlxsw_sp_rif),
+ .configure = mlxsw_sp_rif_fid_configure,
+ .deconfigure = mlxsw_sp_rif_fid_deconfigure,
+ .fid_get = mlxsw_sp_rif_vlan_fid_get,
+ .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
+};
+
static struct mlxsw_sp_rif_ipip_lb *
mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
{
@@ -7361,7 +7470,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
- [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
+ [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
[MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
};
@@ -7552,6 +7661,16 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp->router = router;
router->mlxsw_sp = mlxsw_sp;
+ router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
+ err = register_inetaddr_notifier(&router->inetaddr_nb);
+ if (err)
+ goto err_register_inetaddr_notifier;
+
+ router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
+ err = register_inet6addr_notifier(&router->inet6addr_nb);
+ if (err)
+ goto err_register_inet6addr_notifier;
+
INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
@@ -7637,6 +7756,10 @@ err_ipips_init:
err_rifs_init:
__mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
+ unregister_inet6addr_notifier(&router->inet6addr_nb);
+err_register_inet6addr_notifier:
+ unregister_inetaddr_notifier(&router->inetaddr_nb);
+err_register_inetaddr_notifier:
kfree(mlxsw_sp->router);
return err;
}
@@ -7654,5 +7777,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_ipips_fini(mlxsw_sp);
mlxsw_sp_rifs_fini(mlxsw_sp);
__mlxsw_sp_router_fini(mlxsw_sp);
+ unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
+ unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
kfree(mlxsw_sp->router);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index d965fd275c90..ad5a9b9e1466 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -383,7 +383,7 @@ mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry)
}
static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = {
- .can_handle = is_gretap_dev,
+ .can_handle = netif_is_gretap,
.parms = mlxsw_sp_span_entry_gretap4_parms,
.configure = mlxsw_sp_span_entry_gretap4_configure,
.deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure,
@@ -484,7 +484,7 @@ mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry)
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
- .can_handle = is_ip6gretap_dev,
+ .can_handle = netif_is_ip6gretap,
.parms = mlxsw_sp_span_entry_gretap6_parms,
.configure = mlxsw_sp_span_entry_gretap6_configure,
.deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 50080c60a279..1bd2c6e15f8d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -85,13 +85,11 @@ struct mlxsw_sp_bridge_ops {
struct mlxsw_sp_bridge_port *bridge_port,
struct mlxsw_sp_port *mlxsw_sp_port);
int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
- const struct net_device *vxlan_dev,
+ const struct net_device *vxlan_dev, u16 vid,
struct netlink_ext_ack *extack);
- void (*vxlan_leave)(struct mlxsw_sp_bridge_device *bridge_device,
- const struct net_device *vxlan_dev);
struct mlxsw_sp_fid *
(*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
- u16 vid);
+ u16 vid, struct netlink_ext_ack *extack);
struct mlxsw_sp_fid *
(*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
u16 vid);
@@ -292,30 +290,6 @@ mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
kfree(bridge_port);
}
-static bool
-mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
- bridge_port)
-{
- struct net_device *dev = bridge_port->dev;
- struct mlxsw_sp *mlxsw_sp;
-
- if (is_vlan_dev(dev))
- mlxsw_sp = mlxsw_sp_lower_get(vlan_dev_real_dev(dev));
- else
- mlxsw_sp = mlxsw_sp_lower_get(dev);
-
- /* In case ports were pulled from out of a bridged LAG, then
- * it's possible the reference count isn't zero, yet the bridge
- * port should be destroyed, as it's no longer an upper of ours.
- */
- if (!mlxsw_sp && list_empty(&bridge_port->vlans_list))
- return true;
- else if (bridge_port->ref_count == 0)
- return true;
- else
- return false;
-}
-
static struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
struct net_device *brport_dev)
@@ -353,8 +327,7 @@ static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
{
struct mlxsw_sp_bridge_device *bridge_device;
- bridge_port->ref_count--;
- if (!mlxsw_sp_bridge_port_should_destroy(bridge_port))
+ if (--bridge_port->ref_count != 0)
return;
bridge_device = bridge_port->bridge_device;
mlxsw_sp_bridge_port_destroy(bridge_port);
@@ -935,7 +908,8 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
static int
mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
- struct mlxsw_sp_bridge_port *bridge_port)
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp_bridge_device *bridge_device;
@@ -945,7 +919,7 @@ mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
int err;
bridge_device = bridge_port->bridge_device;
- fid = bridge_device->ops->fid_get(bridge_device, vid);
+ fid = bridge_device->ops->fid_get(bridge_device, vid, extack);
if (IS_ERR(fid))
return PTR_ERR(fid);
@@ -1013,7 +987,8 @@ mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
static int
mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
- struct mlxsw_sp_bridge_port *bridge_port)
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp_bridge_vlan *bridge_vlan;
@@ -1021,12 +996,11 @@ mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
int err;
/* No need to continue if only VLAN flags were changed */
- if (mlxsw_sp_port_vlan->bridge_port) {
- mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
+ if (mlxsw_sp_port_vlan->bridge_port)
return 0;
- }
- err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port);
+ err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port,
+ extack);
if (err)
return err;
@@ -1103,16 +1077,33 @@ mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
static int
mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port,
- u16 vid, bool is_untagged, bool is_pvid)
+ u16 vid, bool is_untagged, bool is_pvid,
+ struct netlink_ext_ack *extack,
+ struct switchdev_trans *trans)
{
u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
u16 old_pvid = mlxsw_sp_port->pvid;
int err;
- mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid);
- if (IS_ERR(mlxsw_sp_port_vlan))
- return PTR_ERR(mlxsw_sp_port_vlan);
+ /* The only valid scenario in which a port-vlan already exists, is if
+ * the VLAN flags were changed and the port-vlan is associated with the
+ * correct bridge port
+ */
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (mlxsw_sp_port_vlan &&
+ mlxsw_sp_port_vlan->bridge_port != bridge_port)
+ return -EEXIST;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if (!mlxsw_sp_port_vlan) {
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
+ vid);
+ if (IS_ERR(mlxsw_sp_port_vlan))
+ return PTR_ERR(mlxsw_sp_port_vlan);
+ }
err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
is_untagged);
@@ -1123,7 +1114,8 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
goto err_port_pvid_set;
- err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
+ err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
+ extack);
if (err)
goto err_port_vlan_bridge_join;
@@ -1134,7 +1126,7 @@ err_port_vlan_bridge_join:
err_port_pvid_set:
mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
err_port_vlan_set:
- mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
+ mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
return err;
}
@@ -1173,7 +1165,8 @@ mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
{
bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
@@ -1195,9 +1188,6 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
return err;
}
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_port))
return -EINVAL;
@@ -1210,7 +1200,7 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
vid, flag_untagged,
- flag_pvid);
+ flag_pvid, extack, trans);
if (err)
return err;
}
@@ -1779,7 +1769,8 @@ static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans)
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
const struct switchdev_obj_port_vlan *vlan;
@@ -1788,7 +1779,8 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
- err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans);
+ err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans,
+ extack);
if (switchdev_trans_ph_prepare(trans)) {
/* The event is emitted before the changes are actually
@@ -1826,7 +1818,7 @@ mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
- mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
+ mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
}
static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -1974,8 +1966,6 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
.switchdev_port_attr_get = mlxsw_sp_port_attr_get,
.switchdev_port_attr_set = mlxsw_sp_port_attr_set,
- .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
- .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
};
static int
@@ -1984,19 +1974,14 @@ mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_port *mlxsw_sp_port,
struct netlink_ext_ack *extack)
{
- struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
-
if (is_vlan_dev(bridge_port->dev)) {
NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
return -EINVAL;
}
- mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
- if (WARN_ON(!mlxsw_sp_port_vlan))
- return -EINVAL;
-
- /* Let VLAN-aware bridge take care of its own VLANs */
- mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
+ /* Port is no longer usable as a router interface */
+ if (mlxsw_sp_port->default_vlan->fid)
+ mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
return 0;
}
@@ -2006,41 +1991,133 @@ mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
struct mlxsw_sp_port *mlxsw_sp_port)
{
- mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
/* Make sure untagged frames are allowed to ingress */
- mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
}
static int
mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
- const struct net_device *vxlan_dev,
+ const struct net_device *vxlan_dev, u16 vid,
struct netlink_ext_ack *extack)
{
- WARN_ON(1);
- return -EINVAL;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+ struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
+ struct mlxsw_sp_nve_params params = {
+ .type = MLXSW_SP_NVE_TYPE_VXLAN,
+ .vni = vxlan->cfg.vni,
+ .dev = vxlan_dev,
+ };
+ struct mlxsw_sp_fid *fid;
+ int err;
+
+ /* If the VLAN is 0, we need to find the VLAN that is configured as
+ * PVID and egress untagged on the bridge port of the VxLAN device.
+ * It is possible no such VLAN exists
+ */
+ if (!vid) {
+ err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev, &vid);
+ if (err || !vid)
+ return err;
+ }
+
+ /* If no other port is member in the VLAN, then the FID does not exist.
+ * NVE will be enabled on the FID once a port joins the VLAN
+ */
+ fid = mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
+ if (!fid)
+ return 0;
+
+ if (mlxsw_sp_fid_vni_is_set(fid)) {
+ err = -EINVAL;
+ goto err_vni_exists;
+ }
+
+ err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
+ if (err)
+ goto err_nve_fid_enable;
+
+ /* The tunnel port does not hold a reference on the FID. Only
+ * local ports and the router port
+ */
+ mlxsw_sp_fid_put(fid);
+
+ return 0;
+
+err_nve_fid_enable:
+err_vni_exists:
+ mlxsw_sp_fid_put(fid);
+ return err;
}
-static void
-mlxsw_sp_bridge_8021q_vxlan_leave(struct mlxsw_sp_bridge_device *bridge_device,
- const struct net_device *vxlan_dev)
+static struct net_device *
+mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid)
{
+ struct net_device *dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ u16 pvid;
+ int err;
+
+ if (!netif_is_vxlan(dev))
+ continue;
+
+ err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
+ if (err || pvid != vid)
+ continue;
+
+ return dev;
+ }
+
+ return NULL;
}
static struct mlxsw_sp_fid *
mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
- u16 vid)
+ u16 vid, struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+ struct net_device *vxlan_dev;
+ struct mlxsw_sp_fid *fid;
+ int err;
+
+ fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
+ if (IS_ERR(fid))
+ return fid;
+
+ if (mlxsw_sp_fid_vni_is_set(fid))
+ return fid;
+
+ /* Find the VxLAN device that has the specified VLAN configured as
+ * PVID and egress untagged. There can be at most one such device
+ */
+ vxlan_dev = mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev,
+ vid);
+ if (!vxlan_dev)
+ return fid;
+
+ if (!netif_running(vxlan_dev))
+ return fid;
+
+ err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid,
+ extack);
+ if (err)
+ goto err_vxlan_join;
+
+ return fid;
- return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
+err_vxlan_join:
+ mlxsw_sp_fid_put(fid);
+ return ERR_PTR(err);
}
static struct mlxsw_sp_fid *
mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
u16 vid)
{
- WARN_ON(1);
- return NULL;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+
+ return mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
}
static u16
@@ -2054,7 +2131,6 @@ static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
.port_join = mlxsw_sp_bridge_8021q_port_join,
.port_leave = mlxsw_sp_bridge_8021q_port_leave,
.vxlan_join = mlxsw_sp_bridge_8021q_vxlan_join,
- .vxlan_leave = mlxsw_sp_bridge_8021q_vxlan_leave,
.fid_get = mlxsw_sp_bridge_8021q_fid_get,
.fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup,
.fid_vid = mlxsw_sp_bridge_8021q_fid_vid,
@@ -2087,7 +2163,7 @@ mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
struct net_device *dev = bridge_port->dev;
u16 vid;
- vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
+ vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
if (WARN_ON(!mlxsw_sp_port_vlan))
return -EINVAL;
@@ -2101,7 +2177,8 @@ mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
if (mlxsw_sp_port_vlan->fid)
mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
- return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
+ return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
+ extack);
}
static void
@@ -2113,9 +2190,9 @@ mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
struct net_device *dev = bridge_port->dev;
u16 vid;
- vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
+ vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
- if (!mlxsw_sp_port_vlan)
+ if (!mlxsw_sp_port_vlan || !mlxsw_sp_port_vlan->bridge_port)
return;
mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
@@ -2123,7 +2200,7 @@ mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
static int
mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
- const struct net_device *vxlan_dev,
+ const struct net_device *vxlan_dev, u16 vid,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
@@ -2162,29 +2239,9 @@ err_vni_exists:
return err;
}
-static void
-mlxsw_sp_bridge_8021d_vxlan_leave(struct mlxsw_sp_bridge_device *bridge_device,
- const struct net_device *vxlan_dev)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
- struct mlxsw_sp_fid *fid;
-
- fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
- if (WARN_ON(!fid))
- return;
-
- /* If the VxLAN device is down, then the FID does not have a VNI */
- if (!mlxsw_sp_fid_vni_is_set(fid))
- goto out;
-
- mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
-out:
- mlxsw_sp_fid_put(fid);
-}
-
static struct mlxsw_sp_fid *
mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
- u16 vid)
+ u16 vid, struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
struct net_device *vxlan_dev;
@@ -2205,7 +2262,8 @@ mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
if (!netif_running(vxlan_dev))
return fid;
- err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, NULL);
+ err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, 0,
+ extack);
if (err)
goto err_vxlan_join;
@@ -2240,7 +2298,6 @@ static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
.port_join = mlxsw_sp_bridge_8021d_port_join,
.port_leave = mlxsw_sp_bridge_8021d_port_leave,
.vxlan_join = mlxsw_sp_bridge_8021d_vxlan_join,
- .vxlan_leave = mlxsw_sp_bridge_8021d_vxlan_leave,
.fid_get = mlxsw_sp_bridge_8021d_fid_get,
.fid_lookup = mlxsw_sp_bridge_8021d_fid_lookup,
.fid_vid = mlxsw_sp_bridge_8021d_fid_vid,
@@ -2295,7 +2352,7 @@ void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev,
- const struct net_device *vxlan_dev,
+ const struct net_device *vxlan_dev, u16 vid,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_bridge_device *bridge_device;
@@ -2304,20 +2361,102 @@ int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(!bridge_device))
return -EINVAL;
- return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, extack);
+ return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
+ extack);
}
void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev,
const struct net_device *vxlan_dev)
{
+ struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
+ struct mlxsw_sp_fid *fid;
+
+ /* If the VxLAN device is down, then the FID does not have a VNI */
+ fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan->cfg.vni);
+ if (!fid)
+ return;
+
+ mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
+ mlxsw_sp_fid_put(fid);
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ u16 vid,
+ struct netlink_ext_ack *extack)
+{
struct mlxsw_sp_bridge_device *bridge_device;
bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
if (WARN_ON(!bridge_device))
- return;
+ return ERR_PTR(-EINVAL);
+
+ return bridge_device->ops->fid_get(bridge_device, vid, extack);
+}
+
+static void
+mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
+ enum mlxsw_sp_l3proto *proto,
+ union mlxsw_sp_l3addr *addr)
+{
+ if (vxlan_addr->sa.sa_family == AF_INET) {
+ addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
+ *proto = MLXSW_SP_L3_PROTO_IPV4;
+ } else {
+ addr->addr6 = vxlan_addr->sin6.sin6_addr;
+ *proto = MLXSW_SP_L3_PROTO_IPV6;
+ }
+}
- bridge_device->ops->vxlan_leave(bridge_device, vxlan_dev);
+static void
+mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,
+ const union mlxsw_sp_l3addr *addr,
+ union vxlan_addr *vxlan_addr)
+{
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ vxlan_addr->sa.sa_family = AF_INET;
+ vxlan_addr->sin.sin_addr.s_addr = addr->addr4;
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ vxlan_addr->sa.sa_family = AF_INET6;
+ vxlan_addr->sin6.sin6_addr = addr->addr6;
+ break;
+ }
+}
+
+static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
+ const char *mac,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr,
+ __be32 vni, bool adding)
+{
+ struct switchdev_notifier_vxlan_fdb_info info;
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ enum switchdev_notifier_type type;
+
+ type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE :
+ SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE;
+ mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip);
+ info.remote_port = vxlan->cfg.dst_port;
+ info.remote_vni = vni;
+ info.remote_ifindex = 0;
+ ether_addr_copy(info.eth_addr, mac);
+ info.vni = vni;
+ info.offloaded = adding;
+ call_switchdev_notifiers(type, dev, &info.info);
+}
+
+static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
+ const char *mac,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr,
+ __be32 vni,
+ bool adding)
+{
+ if (netif_is_vxlan(dev))
+ mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni,
+ adding);
}
static void
@@ -2428,7 +2567,8 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
bridge_device = bridge_port->bridge_device;
vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
- lag_vid = mlxsw_sp_port_vlan->vid;
+ lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ?
+ mlxsw_sp_port_vlan->vid : 0;
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
@@ -2451,6 +2591,122 @@ just_remove:
goto do_fdb_op;
}
+static int
+__mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fid *fid,
+ bool adding,
+ struct net_device **nve_dev,
+ u16 *p_vid, __be32 *p_vni)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct net_device *br_dev, *dev;
+ int nve_ifindex;
+ int err;
+
+ err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_vni(fid, p_vni);
+ if (err)
+ return err;
+
+ dev = __dev_get_by_index(&init_net, nve_ifindex);
+ if (!dev)
+ return -EINVAL;
+ *nve_dev = dev;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (adding && !br_port_flag_is_set(dev, BR_LEARNING))
+ return -EINVAL;
+
+ if (adding && netif_is_vxlan(dev)) {
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+
+ if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
+ return -EINVAL;
+ }
+
+ br_dev = netdev_master_upper_dev_get(dev);
+ if (!br_dev)
+ return -EINVAL;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (!bridge_device)
+ return -EINVAL;
+
+ *p_vid = bridge_device->ops->fid_vid(bridge_device, fid);
+
+ return 0;
+}
+
+static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
+ char *sfn_pl,
+ int rec_index,
+ bool adding)
+{
+ enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto;
+ enum switchdev_notifier_type type;
+ struct net_device *nve_dev;
+ union mlxsw_sp_l3addr addr;
+ struct mlxsw_sp_fid *fid;
+ char mac[ETH_ALEN];
+ u16 fid_index, vid;
+ __be32 vni;
+ u32 uip;
+ int err;
+
+ mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index,
+ &uip, &sfn_proto);
+
+ fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index);
+ if (!fid)
+ goto err_fid_lookup;
+
+ err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip,
+ (enum mlxsw_sp_l3proto) sfn_proto,
+ &addr);
+ if (err)
+ goto err_ip_resolve;
+
+ err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding,
+ &nve_dev, &vid, &vni);
+ if (err)
+ goto err_fdb_process;
+
+ err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
+ (enum mlxsw_sp_l3proto) sfn_proto,
+ &addr, adding, true);
+ if (err)
+ goto err_fdb_op;
+
+ mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac,
+ (enum mlxsw_sp_l3proto) sfn_proto,
+ &addr, vni, adding);
+
+ type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
+ SWITCHDEV_FDB_DEL_TO_BRIDGE;
+ mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding);
+
+ mlxsw_sp_fid_put(fid);
+
+ return;
+
+err_fdb_op:
+err_fdb_process:
+err_ip_resolve:
+ mlxsw_sp_fid_put(fid);
+err_fid_lookup:
+ /* Remove an FDB entry in case we cannot process it. Otherwise the
+ * device will keep sending the same notification over and over again.
+ */
+ mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
+ (enum mlxsw_sp_l3proto) sfn_proto, &addr,
+ false, true);
+}
+
static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
char *sfn_pl, int rec_index)
{
@@ -2471,6 +2727,14 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
rec_index, false);
break;
+ case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL:
+ mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
+ rec_index, true);
+ break;
+ case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL:
+ mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
+ rec_index, false);
+ break;
}
}
@@ -2526,20 +2790,6 @@ struct mlxsw_sp_switchdev_event_work {
};
static void
-mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
- enum mlxsw_sp_l3proto *proto,
- union mlxsw_sp_l3addr *addr)
-{
- if (vxlan_addr->sa.sa_family == AF_INET) {
- addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
- *proto = MLXSW_SP_L3_PROTO_IPV4;
- } else {
- addr->addr6 = vxlan_addr->sin6.sin6_addr;
- *proto = MLXSW_SP_L3_PROTO_IPV6;
- }
-}
-
-static void
mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_switchdev_event_work *
switchdev_work,
@@ -2604,7 +2854,8 @@ mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
return;
- if (!switchdev_work->fdb_info.added_by_user)
+ if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
+ !switchdev_work->fdb_info.added_by_user)
return;
if (!netif_running(dev))
@@ -2947,10 +3198,274 @@ err_addr_alloc:
return NOTIFY_BAD;
}
-static struct notifier_block mlxsw_sp_switchdev_notifier = {
+struct notifier_block mlxsw_sp_switchdev_notifier = {
.notifier_call = mlxsw_sp_switchdev_event,
};
+static int
+mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev, u16 vid,
+ bool flag_untagged, bool flag_pvid,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
+{
+ struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
+ __be32 vni = vxlan->cfg.vni;
+ struct mlxsw_sp_fid *fid;
+ u16 old_vid;
+ int err;
+
+ /* We cannot have the same VLAN as PVID and egress untagged on multiple
+ * VxLAN devices. Note that we get this notification before the VLAN is
+ * actually added to the bridge's database, so it is not possible for
+ * the lookup function to return 'vxlan_dev'
+ */
+ if (flag_untagged && flag_pvid &&
+ mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid))
+ return -EINVAL;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if (!netif_running(vxlan_dev))
+ return 0;
+
+ /* First case: FID is not associated with this VNI, but the new VLAN
+ * is both PVID and egress untagged. Need to enable NVE on the FID, if
+ * it exists
+ */
+ fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
+ if (!fid) {
+ if (!flag_untagged || !flag_pvid)
+ return 0;
+ return mlxsw_sp_bridge_8021q_vxlan_join(bridge_device,
+ vxlan_dev, vid, extack);
+ }
+
+ /* Second case: FID is associated with the VNI and the VLAN associated
+ * with the FID is the same as the notified VLAN. This means the flags
+ * (PVID / egress untagged) were toggled and that NVE should be
+ * disabled on the FID
+ */
+ old_vid = mlxsw_sp_fid_8021q_vid(fid);
+ if (vid == old_vid) {
+ if (WARN_ON(flag_untagged && flag_pvid)) {
+ mlxsw_sp_fid_put(fid);
+ return -EINVAL;
+ }
+ mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ mlxsw_sp_fid_put(fid);
+ return 0;
+ }
+
+ /* Third case: A new VLAN was configured on the VxLAN device, but this
+ * VLAN is not PVID, so there is nothing to do.
+ */
+ if (!flag_pvid) {
+ mlxsw_sp_fid_put(fid);
+ return 0;
+ }
+
+ /* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
+ * mapped to the VNI should be unmapped
+ */
+ mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ mlxsw_sp_fid_put(fid);
+
+ /* Fifth case: The new VLAN is also egress untagged, which means the
+ * VLAN needs to be mapped to the VNI
+ */
+ if (!flag_untagged)
+ return 0;
+
+ err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid,
+ extack);
+ if (err)
+ goto err_vxlan_join;
+
+ return 0;
+
+err_vxlan_join:
+ mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, old_vid,
+ NULL);
+ return err;
+}
+
+static void
+mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev, u16 vid)
+{
+ struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
+ __be32 vni = vxlan->cfg.vni;
+ struct mlxsw_sp_fid *fid;
+
+ if (!netif_running(vxlan_dev))
+ return;
+
+ fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
+ if (!fid)
+ return;
+
+ /* A different VLAN than the one mapped to the VNI is deleted */
+ if (mlxsw_sp_fid_8021q_vid(fid) != vid)
+ goto out;
+
+ mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+
+out:
+ mlxsw_sp_fid_put(fid);
+}
+
+static int
+mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
+ struct switchdev_notifier_port_obj_info *
+ port_obj_info)
+{
+ struct switchdev_obj_port_vlan *vlan =
+ SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
+ bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct switchdev_trans *trans = port_obj_info->trans;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct netlink_ext_ack *extack;
+ struct mlxsw_sp *mlxsw_sp;
+ struct net_device *br_dev;
+ u16 vid;
+
+ extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
+ br_dev = netdev_master_upper_dev_get(vxlan_dev);
+ if (!br_dev)
+ return 0;
+
+ mlxsw_sp = mlxsw_sp_lower_get(br_dev);
+ if (!mlxsw_sp)
+ return 0;
+
+ port_obj_info->handled = true;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (!bridge_device)
+ return -EINVAL;
+
+ if (!bridge_device->vlan_enabled)
+ return 0;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ int err;
+
+ err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
+ vxlan_dev, vid,
+ flag_untagged,
+ flag_pvid, trans,
+ extack);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
+ struct switchdev_notifier_port_obj_info *
+ port_obj_info)
+{
+ struct switchdev_obj_port_vlan *vlan =
+ SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp *mlxsw_sp;
+ struct net_device *br_dev;
+ u16 vid;
+
+ br_dev = netdev_master_upper_dev_get(vxlan_dev);
+ if (!br_dev)
+ return;
+
+ mlxsw_sp = mlxsw_sp_lower_get(br_dev);
+ if (!mlxsw_sp)
+ return;
+
+ port_obj_info->handled = true;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (!bridge_device)
+ return;
+
+ if (!bridge_device->vlan_enabled)
+ return;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
+ mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device,
+ vxlan_dev, vid);
+}
+
+static int
+mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device *vxlan_dev,
+ struct switchdev_notifier_port_obj_info *
+ port_obj_info)
+{
+ int err = 0;
+
+ switch (port_obj_info->obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = mlxsw_sp_switchdev_vxlan_vlans_add(vxlan_dev,
+ port_obj_info);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static void
+mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device *vxlan_dev,
+ struct switchdev_notifier_port_obj_info *
+ port_obj_info)
+{
+ switch (port_obj_info->obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ mlxsw_sp_switchdev_vxlan_vlans_del(vxlan_dev, port_obj_info);
+ break;
+ default:
+ break;
+ }
+}
+
+static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err = 0;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ if (netif_is_vxlan(dev))
+ err = mlxsw_sp_switchdev_handle_vxlan_obj_add(dev, ptr);
+ else
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ mlxsw_sp_port_dev_check,
+ mlxsw_sp_port_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ if (netif_is_vxlan(dev))
+ mlxsw_sp_switchdev_handle_vxlan_obj_del(dev, ptr);
+ else
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ mlxsw_sp_port_dev_check,
+ mlxsw_sp_port_obj_del);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = {
+ .notifier_call = mlxsw_sp_switchdev_blocking_event,
+};
+
u8
mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
{
@@ -2960,6 +3475,7 @@ mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
+ struct notifier_block *nb;
int err;
err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
@@ -2974,17 +3490,33 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
return err;
}
+ nb = &mlxsw_sp_switchdev_blocking_notifier;
+ err = register_switchdev_blocking_notifier(nb);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n");
+ goto err_register_switchdev_blocking_notifier;
+ }
+
INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
return 0;
+
+err_register_switchdev_blocking_notifier:
+ unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
+ return err;
}
static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
{
+ struct notifier_block *nb;
+
cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
- unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
+ nb = &mlxsw_sp_switchdev_blocking_notifier;
+ unregister_switchdev_blocking_notifier(nb);
+
+ unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
}
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index c84074fa4c95..0dca2fa51dc3 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -15,6 +15,7 @@
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/skbuff.h>
+#include <linux/iopoll.h>
#include <net/arp.h>
#include <net/netevent.h>
#include <net/rtnetlink.h>
@@ -22,6 +23,9 @@
#include "ocelot.h"
+#define TABLE_UPDATE_SLEEP_US 10
+#define TABLE_UPDATE_TIMEOUT_US 100000
+
/* MAC table entry types.
* ENTRYTYPE_NORMAL is subject to aging.
* ENTRYTYPE_LOCKED is not subject to aging.
@@ -41,23 +45,20 @@ struct ocelot_mact_entry {
enum macaccess_entry_type type;
};
-static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot)
+static inline u32 ocelot_mact_read_macaccess(struct ocelot *ocelot)
{
- unsigned int val, timeout = 10;
-
- /* Wait for the issued mac table command to be completed, or timeout.
- * When the command read from ANA_TABLES_MACACCESS is
- * MACACCESS_CMD_IDLE, the issued command completed successfully.
- */
- do {
- val = ocelot_read(ocelot, ANA_TABLES_MACACCESS);
- val &= ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M;
- } while (val != MACACCESS_CMD_IDLE && timeout--);
+ return ocelot_read(ocelot, ANA_TABLES_MACACCESS);
+}
- if (!timeout)
- return -ETIMEDOUT;
+static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot)
+{
+ u32 val;
- return 0;
+ return readx_poll_timeout(ocelot_mact_read_macaccess,
+ ocelot, val,
+ (val & ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M) ==
+ MACACCESS_CMD_IDLE,
+ TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
}
static void ocelot_mact_select(struct ocelot *ocelot,
@@ -129,23 +130,21 @@ static void ocelot_mact_init(struct ocelot *ocelot)
ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS);
}
-static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
+static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot)
{
- unsigned int val, timeout = 10;
-
- /* Wait for the issued vlan table command to be completed, or timeout.
- * When the command read from ANA_TABLES_VLANACCESS is
- * VLANACCESS_CMD_IDLE, the issued command completed successfully.
- */
- do {
- val = ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
- val &= ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M;
- } while (val != ANA_TABLES_VLANACCESS_CMD_IDLE && timeout--);
+ return ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
+}
- if (!timeout)
- return -ETIMEDOUT;
+static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
+{
+ u32 val;
- return 0;
+ return readx_poll_timeout(ocelot_vlant_read_vlanaccess,
+ ocelot,
+ val,
+ (val & ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M) ==
+ ANA_TABLES_VLANACCESS_CMD_IDLE,
+ TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
}
static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask)
@@ -1293,7 +1292,8 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev,
static int ocelot_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans)
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
{
int ret = 0;
@@ -1337,8 +1337,6 @@ static int ocelot_port_obj_del(struct net_device *dev,
static const struct switchdev_ops ocelot_port_switchdev_ops = {
.switchdev_port_attr_get = ocelot_port_attr_get,
.switchdev_port_attr_set = ocelot_port_attr_set,
- .switchdev_port_obj_add = ocelot_port_obj_add,
- .switchdev_port_obj_del = ocelot_port_obj_del,
};
static int ocelot_port_bridge_join(struct ocelot_port *ocelot_port,
@@ -1595,6 +1593,34 @@ struct notifier_block ocelot_netdevice_nb __read_mostly = {
};
EXPORT_SYMBOL(ocelot_netdevice_nb);
+static int ocelot_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ /* Blocking events. */
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ ocelot_netdevice_dev_check,
+ ocelot_port_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ ocelot_netdevice_dev_check,
+ ocelot_port_obj_del);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = {
+ .notifier_call = ocelot_switchdev_blocking_event,
+};
+EXPORT_SYMBOL(ocelot_switchdev_blocking_nb);
+
int ocelot_probe_port(struct ocelot *ocelot, u8 port,
void __iomem *regs,
struct phy_device *phy)
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 62c7c8eb00d9..086775f7b52f 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -499,5 +499,6 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
struct phy_device *phy);
extern struct notifier_block ocelot_netdevice_nb;
+extern struct notifier_block ocelot_switchdev_blocking_nb;
#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 4c23d18bbf44..ca3ea2fbfcd0 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -12,6 +12,7 @@
#include <linux/of_platform.h>
#include <linux/mfd/syscon.h>
#include <linux/skbuff.h>
+#include <net/switchdev.h>
#include "ocelot.h"
@@ -328,6 +329,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
}
register_netdevice_notifier(&ocelot_netdevice_nb);
+ register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
dev_info(&pdev->dev, "Ocelot switch probed\n");
@@ -342,6 +344,7 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
struct ocelot *ocelot = platform_get_drvdata(pdev);
ocelot_deinit(ocelot);
+ unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
unregister_netdevice_notifier(&ocelot_netdevice_nb);
return 0;
diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig
index c26e0f70c494..7df20561e3fa 100644
--- a/drivers/net/ethernet/neterion/Kconfig
+++ b/drivers/net/ethernet/neterion/Kconfig
@@ -26,7 +26,7 @@ config S2IO
on its age.
More specific information on configuring the driver is in
- <file:Documentation/networking/s2io.txt>.
+ <file:Documentation/networking/device_drivers/neterion/s2io.txt>.
To compile this driver as a module, choose M here. The module
will be called s2io.
@@ -41,7 +41,7 @@ config VXGE
labeled as either one, depending on its age.
More specific information on configuring the driver is in
- <file:Documentation/networking/vxge.txt>.
+ <file:Documentation/networking/device_drivers/neterion/vxge.txt>.
To compile this driver as a module, choose M here. The module
will be called vxge.
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index f7a0d1d5885e..59e77e3086bb 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -1695,17 +1695,10 @@ exit:
*/
void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
{
- struct __vxge_hw_fifo_txdl_priv *txdl_priv;
- u32 max_frags;
struct __vxge_hw_channel *channel;
channel = &fifo->channel;
- txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
- (struct vxge_hw_fifo_txd *)txdlh);
-
- max_frags = fifo->config->max_frags;
-
vxge_hw_channel_dtr_free(channel, txdlh);
}
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 4afb10375397..47c708f08ade 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -56,7 +56,9 @@ endif
ifeq ($(CONFIG_NFP_APP_ABM_NIC),y)
nfp-objs += \
+ abm/cls.o \
abm/ctrl.o \
+ abm/qdisc.o \
abm/main.o
endif
diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c
new file mode 100644
index 000000000000..9852080cf454
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
+
+#include <linux/bitfield.h>
+#include <net/pkt_cls.h>
+
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfp_app.h"
+#include "../nfp_net_repr.h"
+#include "main.h"
+
+struct nfp_abm_u32_match {
+ u32 handle;
+ u32 band;
+ u8 mask;
+ u8 val;
+ struct list_head list;
+};
+
+static bool
+nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode,
+ __be16 proto, struct netlink_ext_ack *extack)
+{
+ struct tc_u32_key *k;
+ unsigned int tos_off;
+
+ if (knode->exts && tcf_exts_has_actions(knode->exts)) {
+ NL_SET_ERR_MSG_MOD(extack, "action offload not supported");
+ return false;
+ }
+ if (knode->link_handle) {
+ NL_SET_ERR_MSG_MOD(extack, "linking not supported");
+ return false;
+ }
+ if (knode->sel->flags != TC_U32_TERMINAL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "flags must be equal to TC_U32_TERMINAL");
+ return false;
+ }
+ if (knode->sel->off || knode->sel->offshift || knode->sel->offmask ||
+ knode->sel->offoff || knode->fshift) {
+ NL_SET_ERR_MSG_MOD(extack, "variable offseting not supported");
+ return false;
+ }
+ if (knode->sel->hoff || knode->sel->hmask) {
+ NL_SET_ERR_MSG_MOD(extack, "hashing not supported");
+ return false;
+ }
+ if (knode->val || knode->mask) {
+ NL_SET_ERR_MSG_MOD(extack, "matching on mark not supported");
+ return false;
+ }
+ if (knode->res && knode->res->class) {
+ NL_SET_ERR_MSG_MOD(extack, "setting non-0 class not supported");
+ return false;
+ }
+ if (knode->res && knode->res->classid >= abm->num_bands) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "classid higher than number of bands");
+ return false;
+ }
+ if (knode->sel->nkeys != 1) {
+ NL_SET_ERR_MSG_MOD(extack, "exactly one key required");
+ return false;
+ }
+
+ switch (proto) {
+ case htons(ETH_P_IP):
+ tos_off = 16;
+ break;
+ case htons(ETH_P_IPV6):
+ tos_off = 20;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "only IP and IPv6 supported as filter protocol");
+ return false;
+ }
+
+ k = &knode->sel->keys[0];
+ if (k->offmask) {
+ NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offseting not supported");
+ return false;
+ }
+ if (k->off) {
+ NL_SET_ERR_MSG_MOD(extack, "only DSCP fields can be matched");
+ return false;
+ }
+ if (k->val & ~k->mask) {
+ NL_SET_ERR_MSG_MOD(extack, "mask does not cover the key");
+ return false;
+ }
+ if (be32_to_cpu(k->mask) >> tos_off & ~abm->dscp_mask) {
+ NL_SET_ERR_MSG_MOD(extack, "only high DSCP class selector bits can be used");
+ nfp_err(abm->app->cpp,
+ "u32 offload: requested mask %x FW can support only %x\n",
+ be32_to_cpu(k->mask) >> tos_off, abm->dscp_mask);
+ return false;
+ }
+
+ return true;
+}
+
+/* This filter list -> map conversion is O(n * m), we expect single digit or
+ * low double digit number of prios and likewise for the filters. Also u32
+ * doesn't report stats, so it's really only setup time cost.
+ */
+static unsigned int
+nfp_abm_find_band_for_prio(struct nfp_abm_link *alink, unsigned int prio)
+{
+ struct nfp_abm_u32_match *iter;
+
+ list_for_each_entry(iter, &alink->dscp_map, list)
+ if ((prio & iter->mask) == iter->val)
+ return iter->band;
+
+ return alink->def_band;
+}
+
+static int nfp_abm_update_band_map(struct nfp_abm_link *alink)
+{
+ unsigned int i, bits_per_prio, prios_per_word, base_shift;
+ struct nfp_abm *abm = alink->abm;
+ u32 field_mask;
+
+ alink->has_prio = !list_empty(&alink->dscp_map);
+
+ bits_per_prio = roundup_pow_of_two(order_base_2(abm->num_bands));
+ field_mask = (1 << bits_per_prio) - 1;
+ prios_per_word = sizeof(u32) * BITS_PER_BYTE / bits_per_prio;
+
+ /* FW mask applies from top bits */
+ base_shift = 8 - order_base_2(abm->num_prios);
+
+ for (i = 0; i < abm->num_prios; i++) {
+ unsigned int offset;
+ u32 *word;
+ u8 band;
+
+ word = &alink->prio_map[i / prios_per_word];
+ offset = (i % prios_per_word) * bits_per_prio;
+
+ band = nfp_abm_find_band_for_prio(alink, i << base_shift);
+
+ *word &= ~(field_mask << offset);
+ *word |= band << offset;
+ }
+
+ /* Qdisc offload status may change if has_prio changed */
+ nfp_abm_qdisc_offload_update(alink);
+
+ return nfp_abm_ctrl_prio_map_update(alink, alink->prio_map);
+}
+
+static void
+nfp_abm_u32_knode_delete(struct nfp_abm_link *alink,
+ struct tc_cls_u32_knode *knode)
+{
+ struct nfp_abm_u32_match *iter;
+
+ list_for_each_entry(iter, &alink->dscp_map, list)
+ if (iter->handle == knode->handle) {
+ list_del(&iter->list);
+ kfree(iter);
+ nfp_abm_update_band_map(alink);
+ return;
+ }
+}
+
+static int
+nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
+ struct tc_cls_u32_knode *knode,
+ __be16 proto, struct netlink_ext_ack *extack)
+{
+ struct nfp_abm_u32_match *match = NULL, *iter;
+ unsigned int tos_off;
+ u8 mask, val;
+ int err;
+
+ if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack))
+ goto err_delete;
+
+ tos_off = proto == htons(ETH_P_IP) ? 16 : 20;
+
+ /* Extract the DSCP Class Selector bits */
+ val = be32_to_cpu(knode->sel->keys[0].val) >> tos_off & 0xff;
+ mask = be32_to_cpu(knode->sel->keys[0].mask) >> tos_off & 0xff;
+
+ /* Check if there is no conflicting mapping and find match by handle */
+ list_for_each_entry(iter, &alink->dscp_map, list) {
+ u32 cmask;
+
+ if (iter->handle == knode->handle) {
+ match = iter;
+ continue;
+ }
+
+ cmask = iter->mask & mask;
+ if ((iter->val & cmask) == (val & cmask) &&
+ iter->band != knode->res->classid) {
+ NL_SET_ERR_MSG_MOD(extack, "conflict with already offloaded filter");
+ goto err_delete;
+ }
+ }
+
+ if (!match) {
+ match = kzalloc(sizeof(*match), GFP_KERNEL);
+ if (!match)
+ return -ENOMEM;
+ list_add(&match->list, &alink->dscp_map);
+ }
+ match->handle = knode->handle;
+ match->band = knode->res->classid;
+ match->mask = mask;
+ match->val = val;
+
+ err = nfp_abm_update_band_map(alink);
+ if (err)
+ goto err_delete;
+
+ return 0;
+
+err_delete:
+ nfp_abm_u32_knode_delete(alink, knode);
+ return -EOPNOTSUPP;
+}
+
+static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct tc_cls_u32_offload *cls_u32 = type_data;
+ struct nfp_repr *repr = cb_priv;
+ struct nfp_abm_link *alink;
+
+ alink = repr->app_priv;
+
+ if (type != TC_SETUP_CLSU32) {
+ NL_SET_ERR_MSG_MOD(cls_u32->common.extack,
+ "only offload of u32 classifier supported");
+ return -EOPNOTSUPP;
+ }
+ if (!tc_cls_can_offload_and_chain0(repr->netdev, &cls_u32->common))
+ return -EOPNOTSUPP;
+
+ if (cls_u32->common.protocol != htons(ETH_P_IP) &&
+ cls_u32->common.protocol != htons(ETH_P_IPV6)) {
+ NL_SET_ERR_MSG_MOD(cls_u32->common.extack,
+ "only IP and IPv6 supported as filter protocol");
+ return -EOPNOTSUPP;
+ }
+
+ switch (cls_u32->command) {
+ case TC_CLSU32_NEW_KNODE:
+ case TC_CLSU32_REPLACE_KNODE:
+ return nfp_abm_u32_knode_replace(alink, &cls_u32->knode,
+ cls_u32->common.protocol,
+ cls_u32->common.extack);
+ case TC_CLSU32_DELETE_KNODE:
+ nfp_abm_u32_knode_delete(alink, &cls_u32->knode);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr,
+ struct tc_block_offload *f)
+{
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block,
+ nfp_abm_setup_tc_block_cb,
+ repr, repr, f->extack);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, nfp_abm_setup_tc_block_cb,
+ repr);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
index 3c661f422688..9584f03f3efa 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2018 Netronome Systems, Inc. */
+#include <linux/bitops.h>
#include <linux/kernel.h>
+#include <linux/log2.h>
#include "../nfpcore/nfp_cpp.h"
#include "../nfpcore/nfp_nffw.h"
@@ -11,38 +13,58 @@
#include "../nfp_net.h"
#include "main.h"
-#define NFP_QLVL_SYM_NAME "_abi_nfd_out_q_lvls_%u"
+#define NFP_NUM_PRIOS_SYM_NAME "_abi_pci_dscp_num_prio_%u"
+#define NFP_NUM_BANDS_SYM_NAME "_abi_pci_dscp_num_band_%u"
+#define NFP_ACT_MASK_SYM_NAME "_abi_nfd_out_q_actions_%u"
+
+#define NFP_RED_SUPPORT_SYM_NAME "_abi_nfd_out_red_offload_%u"
+
+#define NFP_QLVL_SYM_NAME "_abi_nfd_out_q_lvls_%u%s"
#define NFP_QLVL_STRIDE 16
#define NFP_QLVL_BLOG_BYTES 0
#define NFP_QLVL_BLOG_PKTS 4
#define NFP_QLVL_THRS 8
+#define NFP_QLVL_ACT 12
-#define NFP_QMSTAT_SYM_NAME "_abi_nfdqm%u_stats"
+#define NFP_QMSTAT_SYM_NAME "_abi_nfdqm%u_stats%s"
#define NFP_QMSTAT_STRIDE 32
#define NFP_QMSTAT_NON_STO 0
#define NFP_QMSTAT_STO 8
#define NFP_QMSTAT_DROP 16
#define NFP_QMSTAT_ECN 24
+#define NFP_Q_STAT_SYM_NAME "_abi_nfd_rxq_stats%u%s"
+#define NFP_Q_STAT_STRIDE 16
+#define NFP_Q_STAT_PKTS 0
+#define NFP_Q_STAT_BYTES 8
+
+#define NFP_NET_ABM_MBOX_CMD NFP_NET_CFG_MBOX_SIMPLE_CMD
+#define NFP_NET_ABM_MBOX_RET NFP_NET_CFG_MBOX_SIMPLE_RET
+#define NFP_NET_ABM_MBOX_DATALEN NFP_NET_CFG_MBOX_SIMPLE_VAL
+#define NFP_NET_ABM_MBOX_RESERVED (NFP_NET_CFG_MBOX_SIMPLE_VAL + 4)
+#define NFP_NET_ABM_MBOX_DATA (NFP_NET_CFG_MBOX_SIMPLE_VAL + 8)
+
static int
nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
- unsigned int stride, unsigned int offset, unsigned int i,
- bool is_u64, u64 *res)
+ unsigned int stride, unsigned int offset, unsigned int band,
+ unsigned int queue, bool is_u64, u64 *res)
{
struct nfp_cpp *cpp = alink->abm->app->cpp;
u64 val, sym_offset;
+ unsigned int qid;
u32 val32;
int err;
- sym_offset = (alink->queue_base + i) * stride + offset;
+ qid = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
+
+ sym_offset = qid * stride + offset;
if (is_u64)
err = __nfp_rtsym_readq(cpp, sym, 3, 0, sym_offset, &val);
else
err = __nfp_rtsym_readl(cpp, sym, 3, 0, sym_offset, &val32);
if (err) {
- nfp_err(cpp,
- "RED offload reading stat failed on vNIC %d queue %d\n",
- alink->id, i);
+ nfp_err(cpp, "RED offload reading stat failed on vNIC %d band %d queue %d (+ %d)\n",
+ alink->id, band, queue, alink->queue_base);
return err;
}
@@ -50,175 +72,179 @@ nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
return 0;
}
-static int
-nfp_abm_ctrl_stat_all(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
- unsigned int stride, unsigned int offset, bool is_u64,
- u64 *res)
+int __nfp_abm_ctrl_set_q_lvl(struct nfp_abm *abm, unsigned int id, u32 val)
{
- u64 val, sum = 0;
- unsigned int i;
+ struct nfp_cpp *cpp = abm->app->cpp;
+ u64 sym_offset;
int err;
- for (i = 0; i < alink->vnic->max_rx_rings; i++) {
- err = nfp_abm_ctrl_stat(alink, sym, stride, offset, i,
- is_u64, &val);
- if (err)
- return err;
- sum += val;
+ __clear_bit(id, abm->threshold_undef);
+ if (abm->thresholds[id] == val)
+ return 0;
+
+ sym_offset = id * NFP_QLVL_STRIDE + NFP_QLVL_THRS;
+ err = __nfp_rtsym_writel(cpp, abm->q_lvls, 4, 0, sym_offset, val);
+ if (err) {
+ nfp_err(cpp,
+ "RED offload setting level failed on subqueue %d\n",
+ id);
+ return err;
}
- *res = sum;
+ abm->thresholds[id] = val;
return 0;
}
-int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, u32 val)
+int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, u32 val)
{
- struct nfp_cpp *cpp = alink->abm->app->cpp;
+ unsigned int threshold;
+
+ threshold = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
+
+ return __nfp_abm_ctrl_set_q_lvl(alink->abm, threshold, val);
+}
+
+int __nfp_abm_ctrl_set_q_act(struct nfp_abm *abm, unsigned int id,
+ enum nfp_abm_q_action act)
+{
+ struct nfp_cpp *cpp = abm->app->cpp;
u64 sym_offset;
int err;
- sym_offset = (alink->queue_base + i) * NFP_QLVL_STRIDE + NFP_QLVL_THRS;
- err = __nfp_rtsym_writel(cpp, alink->abm->q_lvls, 4, 0,
- sym_offset, val);
+ if (abm->actions[id] == act)
+ return 0;
+
+ sym_offset = id * NFP_QLVL_STRIDE + NFP_QLVL_ACT;
+ err = __nfp_rtsym_writel(cpp, abm->q_lvls, 4, 0, sym_offset, act);
if (err) {
- nfp_err(cpp, "RED offload setting level failed on vNIC %d queue %d\n",
- alink->id, i);
+ nfp_err(cpp,
+ "RED offload setting action failed on subqueue %d\n",
+ id);
return err;
}
+ abm->actions[id] = act;
return 0;
}
-int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val)
+int nfp_abm_ctrl_set_q_act(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, enum nfp_abm_q_action act)
+{
+ unsigned int qid;
+
+ qid = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
+
+ return __nfp_abm_ctrl_set_q_act(alink->abm, qid, act);
+}
+
+u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int queue)
{
- int i, err;
+ unsigned int band;
+ u64 val, sum = 0;
- for (i = 0; i < alink->vnic->max_rx_rings; i++) {
- err = nfp_abm_ctrl_set_q_lvl(alink, i, val);
- if (err)
- return err;
+ for (band = 0; band < alink->abm->num_bands; band++) {
+ if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
+ NFP_QMSTAT_STRIDE, NFP_QMSTAT_NON_STO,
+ band, queue, true, &val))
+ return 0;
+ sum += val;
}
- return 0;
+ return sum;
}
-u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i)
+u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int queue)
{
- u64 val;
+ unsigned int band;
+ u64 val, sum = 0;
- if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE,
- NFP_QMSTAT_NON_STO, i, true, &val))
- return 0;
- return val;
+ for (band = 0; band < alink->abm->num_bands; band++) {
+ if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
+ NFP_QMSTAT_STRIDE, NFP_QMSTAT_STO,
+ band, queue, true, &val))
+ return 0;
+ sum += val;
+ }
+
+ return sum;
}
-u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i)
+static int
+nfp_abm_ctrl_stat_basic(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, unsigned int off, u64 *val)
{
- u64 val;
+ if (!nfp_abm_has_prio(alink->abm)) {
+ if (!band) {
+ unsigned int id = alink->queue_base + queue;
+
+ *val = nn_readq(alink->vnic,
+ NFP_NET_CFG_RXR_STATS(id) + off);
+ } else {
+ *val = 0;
+ }
- if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE,
- NFP_QMSTAT_STO, i, true, &val))
return 0;
- return val;
+ } else {
+ return nfp_abm_ctrl_stat(alink, alink->abm->q_stats,
+ NFP_Q_STAT_STRIDE, off, band, queue,
+ true, val);
+ }
}
-int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i,
- struct nfp_alink_stats *stats)
+int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, struct nfp_alink_stats *stats)
{
int err;
- stats->tx_pkts = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i));
- stats->tx_bytes = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8);
+ err = nfp_abm_ctrl_stat_basic(alink, band, queue, NFP_Q_STAT_PKTS,
+ &stats->tx_pkts);
+ if (err)
+ return err;
- err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls,
- NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES,
- i, false, &stats->backlog_bytes);
+ err = nfp_abm_ctrl_stat_basic(alink, band, queue, NFP_Q_STAT_BYTES,
+ &stats->tx_bytes);
+ if (err)
+ return err;
+
+ err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls, NFP_QLVL_STRIDE,
+ NFP_QLVL_BLOG_BYTES, band, queue, false,
+ &stats->backlog_bytes);
if (err)
return err;
err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls,
NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS,
- i, false, &stats->backlog_pkts);
+ band, queue, false, &stats->backlog_pkts);
if (err)
return err;
err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
- i, true, &stats->drops);
+ band, queue, true, &stats->drops);
if (err)
return err;
return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
- i, true, &stats->overlimits);
+ band, queue, true, &stats->overlimits);
}
-int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink,
- struct nfp_alink_stats *stats)
-{
- u64 pkts = 0, bytes = 0;
- int i, err;
-
- for (i = 0; i < alink->vnic->max_rx_rings; i++) {
- pkts += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i));
- bytes += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8);
- }
- stats->tx_pkts = pkts;
- stats->tx_bytes = bytes;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls,
- NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES,
- false, &stats->backlog_bytes);
- if (err)
- return err;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls,
- NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS,
- false, &stats->backlog_pkts);
- if (err)
- return err;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
- true, &stats->drops);
- if (err)
- return err;
-
- return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
- true, &stats->overlimits);
-}
-
-int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i,
+int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink,
+ unsigned int band, unsigned int queue,
struct nfp_alink_xstats *xstats)
{
int err;
err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
- i, true, &xstats->pdrop);
+ band, queue, true, &xstats->pdrop);
if (err)
return err;
return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
- i, true, &xstats->ecn_marked);
-}
-
-int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink,
- struct nfp_alink_xstats *xstats)
-{
- int err;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
- true, &xstats->pdrop);
- if (err)
- return err;
-
- return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
- true, &xstats->ecn_marked);
+ band, queue, true, &xstats->ecn_marked);
}
int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm)
@@ -233,10 +259,64 @@ int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm)
NULL, 0, NULL, 0);
}
-void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink)
+int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed)
+{
+ struct nfp_net *nn = alink->vnic;
+ unsigned int i;
+ int err;
+
+ /* Write data_len and wipe reserved */
+ nn_writeq(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATALEN,
+ alink->abm->prio_map_len);
+
+ for (i = 0; i < alink->abm->prio_map_len; i += sizeof(u32))
+ nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATA + i,
+ packed[i / sizeof(u32)]);
+
+ err = nfp_net_reconfig_mbox(nn,
+ NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET);
+ if (err)
+ nfp_err(alink->abm->app->cpp,
+ "setting DSCP -> VQ map failed with error %d\n", err);
+ return err;
+}
+
+static int nfp_abm_ctrl_prio_check_params(struct nfp_abm_link *alink)
+{
+ struct nfp_abm *abm = alink->abm;
+ struct nfp_net *nn = alink->vnic;
+ unsigned int min_mbox_sz;
+
+ if (!nfp_abm_has_prio(alink->abm))
+ return 0;
+
+ min_mbox_sz = NFP_NET_ABM_MBOX_DATA + alink->abm->prio_map_len;
+ if (nn->tlv_caps.mbox_len < min_mbox_sz) {
+ nfp_err(abm->app->pf->cpp, "vNIC mailbox too small for prio offload: %u, need: %u\n",
+ nn->tlv_caps.mbox_len, min_mbox_sz);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int nfp_abm_ctrl_read_params(struct nfp_abm_link *alink)
{
alink->queue_base = nn_readl(alink->vnic, NFP_NET_CFG_START_RXQ);
alink->queue_base /= alink->vnic->stride_rx;
+
+ return nfp_abm_ctrl_prio_check_params(alink);
+}
+
+static unsigned int nfp_abm_ctrl_prio_map_size(struct nfp_abm *abm)
+{
+ unsigned int size;
+
+ size = roundup_pow_of_two(order_base_2(abm->num_bands));
+ size = DIV_ROUND_UP(size * abm->num_prios, BITS_PER_BYTE);
+ size = round_up(size, sizeof(u32));
+
+ return size;
}
static const struct nfp_rtsym *
@@ -260,33 +340,86 @@ nfp_abm_ctrl_find_rtsym(struct nfp_pf *pf, const char *name, unsigned int size)
}
static const struct nfp_rtsym *
-nfp_abm_ctrl_find_q_rtsym(struct nfp_pf *pf, const char *name,
- unsigned int size)
+nfp_abm_ctrl_find_q_rtsym(struct nfp_abm *abm, const char *name_fmt,
+ size_t size)
{
- return nfp_abm_ctrl_find_rtsym(pf, name, size * NFP_NET_MAX_RX_RINGS);
+ char pf_symbol[64];
+
+ size = array3_size(size, abm->num_bands, NFP_NET_MAX_RX_RINGS);
+ snprintf(pf_symbol, sizeof(pf_symbol), name_fmt,
+ abm->pf_id, nfp_abm_has_prio(abm) ? "_per_band" : "");
+
+ return nfp_abm_ctrl_find_rtsym(abm->app->pf, pf_symbol, size);
}
int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm)
{
struct nfp_pf *pf = abm->app->pf;
const struct nfp_rtsym *sym;
- unsigned int pf_id;
- char pf_symbol[64];
+ int res;
+
+ abm->pf_id = nfp_cppcore_pcie_unit(pf->cpp);
+
+ /* Check if Qdisc offloads are supported */
+ res = nfp_pf_rtsym_read_optional(pf, NFP_RED_SUPPORT_SYM_NAME, 1);
+ if (res < 0)
+ return res;
+ abm->red_support = res;
+
+ /* Read count of prios and prio bands */
+ res = nfp_pf_rtsym_read_optional(pf, NFP_NUM_BANDS_SYM_NAME, 1);
+ if (res < 0)
+ return res;
+ abm->num_bands = res;
+
+ res = nfp_pf_rtsym_read_optional(pf, NFP_NUM_PRIOS_SYM_NAME, 1);
+ if (res < 0)
+ return res;
+ abm->num_prios = res;
+
+ /* Read available actions */
+ res = nfp_pf_rtsym_read_optional(pf, NFP_ACT_MASK_SYM_NAME,
+ BIT(NFP_ABM_ACT_MARK_DROP));
+ if (res < 0)
+ return res;
+ abm->action_mask = res;
+
+ abm->prio_map_len = nfp_abm_ctrl_prio_map_size(abm);
+ abm->dscp_mask = GENMASK(7, 8 - order_base_2(abm->num_prios));
+
+ /* Check values are sane, U16_MAX is arbitrarily chosen as max */
+ if (!is_power_of_2(abm->num_bands) || !is_power_of_2(abm->num_prios) ||
+ abm->num_bands > U16_MAX || abm->num_prios > U16_MAX ||
+ (abm->num_bands == 1) != (abm->num_prios == 1)) {
+ nfp_err(pf->cpp,
+ "invalid priomap description num bands: %u and num prios: %u\n",
+ abm->num_bands, abm->num_prios);
+ return -EINVAL;
+ }
- pf_id = nfp_cppcore_pcie_unit(pf->cpp);
- abm->pf_id = pf_id;
+ /* Find level and stat symbols */
+ if (!abm->red_support)
+ return 0;
- snprintf(pf_symbol, sizeof(pf_symbol), NFP_QLVL_SYM_NAME, pf_id);
- sym = nfp_abm_ctrl_find_q_rtsym(pf, pf_symbol, NFP_QLVL_STRIDE);
+ sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_QLVL_SYM_NAME,
+ NFP_QLVL_STRIDE);
if (IS_ERR(sym))
return PTR_ERR(sym);
abm->q_lvls = sym;
- snprintf(pf_symbol, sizeof(pf_symbol), NFP_QMSTAT_SYM_NAME, pf_id);
- sym = nfp_abm_ctrl_find_q_rtsym(pf, pf_symbol, NFP_QMSTAT_STRIDE);
+ sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_QMSTAT_SYM_NAME,
+ NFP_QMSTAT_STRIDE);
if (IS_ERR(sym))
return PTR_ERR(sym);
abm->qm_stats = sym;
+ if (nfp_abm_has_prio(abm)) {
+ sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_Q_STAT_SYM_NAME,
+ NFP_Q_STAT_STRIDE);
+ if (IS_ERR(sym))
+ return PTR_ERR(sym);
+ abm->q_stats = sym;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
index c0830c0c2c3f..4d4ff5844c47 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
@@ -2,14 +2,13 @@
/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
+#include <linux/bitmap.h>
#include <linux/etherdevice.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
+#include <linux/rtnetlink.h>
#include <linux/slab.h>
-#include <net/pkt_cls.h>
-#include <net/pkt_sched.h>
-#include <net/red.h>
#include "../nfpcore/nfp.h"
#include "../nfpcore/nfp_cpp.h"
@@ -28,269 +27,6 @@ static u32 nfp_abm_portid(enum nfp_repr_type rtype, unsigned int id)
}
static int
-__nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink,
- u32 handle, unsigned int qs, u32 init_val)
-{
- struct nfp_port *port = nfp_port_from_netdev(netdev);
- int ret;
-
- ret = nfp_abm_ctrl_set_all_q_lvls(alink, init_val);
- memset(alink->qdiscs, 0, sizeof(*alink->qdiscs) * alink->num_qdiscs);
-
- alink->parent = handle;
- alink->num_qdiscs = qs;
- port->tc_offload_cnt = qs;
-
- return ret;
-}
-
-static void
-nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink,
- u32 handle, unsigned int qs)
-{
- __nfp_abm_reset_root(netdev, alink, handle, qs, ~0);
-}
-
-static int
-nfp_abm_red_find(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
-{
- unsigned int i = TC_H_MIN(opt->parent) - 1;
-
- if (opt->parent == TC_H_ROOT)
- i = 0;
- else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent))
- i = TC_H_MIN(opt->parent) - 1;
- else
- return -EOPNOTSUPP;
-
- if (i >= alink->num_qdiscs || opt->handle != alink->qdiscs[i].handle)
- return -EOPNOTSUPP;
-
- return i;
-}
-
-static void
-nfp_abm_red_destroy(struct net_device *netdev, struct nfp_abm_link *alink,
- u32 handle)
-{
- unsigned int i;
-
- for (i = 0; i < alink->num_qdiscs; i++)
- if (handle == alink->qdiscs[i].handle)
- break;
- if (i == alink->num_qdiscs)
- return;
-
- if (alink->parent == TC_H_ROOT) {
- nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0);
- } else {
- nfp_abm_ctrl_set_q_lvl(alink, i, ~0);
- memset(&alink->qdiscs[i], 0, sizeof(*alink->qdiscs));
- }
-}
-
-static int
-nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
- struct tc_red_qopt_offload *opt)
-{
- bool existing;
- int i, err;
-
- i = nfp_abm_red_find(alink, opt);
- existing = i >= 0;
-
- if (opt->set.min != opt->set.max || !opt->set.is_ecn) {
- nfp_warn(alink->abm->app->cpp,
- "RED offload failed - unsupported parameters\n");
- err = -EINVAL;
- goto err_destroy;
- }
-
- if (existing) {
- if (alink->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_set_all_q_lvls(alink, opt->set.min);
- else
- err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
- if (err)
- goto err_destroy;
- return 0;
- }
-
- if (opt->parent == TC_H_ROOT) {
- i = 0;
- err = __nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 1,
- opt->set.min);
- } else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) {
- i = TC_H_MIN(opt->parent) - 1;
- err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
- } else {
- return -EINVAL;
- }
- /* Set the handle to try full clean up, in case IO failed */
- alink->qdiscs[i].handle = opt->handle;
- if (err)
- goto err_destroy;
-
- if (opt->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[i].stats);
- else
- err = nfp_abm_ctrl_read_q_stats(alink, i,
- &alink->qdiscs[i].stats);
- if (err)
- goto err_destroy;
-
- if (opt->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_xstats(alink,
- &alink->qdiscs[i].xstats);
- else
- err = nfp_abm_ctrl_read_q_xstats(alink, i,
- &alink->qdiscs[i].xstats);
- if (err)
- goto err_destroy;
-
- alink->qdiscs[i].stats.backlog_pkts = 0;
- alink->qdiscs[i].stats.backlog_bytes = 0;
-
- return 0;
-err_destroy:
- /* If the qdisc keeps on living, but we can't offload undo changes */
- if (existing) {
- opt->set.qstats->qlen -= alink->qdiscs[i].stats.backlog_pkts;
- opt->set.qstats->backlog -=
- alink->qdiscs[i].stats.backlog_bytes;
- }
- nfp_abm_red_destroy(netdev, alink, opt->handle);
-
- return err;
-}
-
-static void
-nfp_abm_update_stats(struct nfp_alink_stats *new, struct nfp_alink_stats *old,
- struct tc_qopt_offload_stats *stats)
-{
- _bstats_update(stats->bstats, new->tx_bytes - old->tx_bytes,
- new->tx_pkts - old->tx_pkts);
- stats->qstats->qlen += new->backlog_pkts - old->backlog_pkts;
- stats->qstats->backlog += new->backlog_bytes - old->backlog_bytes;
- stats->qstats->overlimits += new->overlimits - old->overlimits;
- stats->qstats->drops += new->drops - old->drops;
-}
-
-static int
-nfp_abm_red_stats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
-{
- struct nfp_alink_stats *prev_stats;
- struct nfp_alink_stats stats;
- int i, err;
-
- i = nfp_abm_red_find(alink, opt);
- if (i < 0)
- return i;
- prev_stats = &alink->qdiscs[i].stats;
-
- if (alink->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_stats(alink, &stats);
- else
- err = nfp_abm_ctrl_read_q_stats(alink, i, &stats);
- if (err)
- return err;
-
- nfp_abm_update_stats(&stats, prev_stats, &opt->stats);
-
- *prev_stats = stats;
-
- return 0;
-}
-
-static int
-nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
-{
- struct nfp_alink_xstats *prev_xstats;
- struct nfp_alink_xstats xstats;
- int i, err;
-
- i = nfp_abm_red_find(alink, opt);
- if (i < 0)
- return i;
- prev_xstats = &alink->qdiscs[i].xstats;
-
- if (alink->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_xstats(alink, &xstats);
- else
- err = nfp_abm_ctrl_read_q_xstats(alink, i, &xstats);
- if (err)
- return err;
-
- opt->xstats->forced_mark += xstats.ecn_marked - prev_xstats->ecn_marked;
- opt->xstats->pdrop += xstats.pdrop - prev_xstats->pdrop;
-
- *prev_xstats = xstats;
-
- return 0;
-}
-
-static int
-nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
- struct tc_red_qopt_offload *opt)
-{
- switch (opt->command) {
- case TC_RED_REPLACE:
- return nfp_abm_red_replace(netdev, alink, opt);
- case TC_RED_DESTROY:
- nfp_abm_red_destroy(netdev, alink, opt->handle);
- return 0;
- case TC_RED_STATS:
- return nfp_abm_red_stats(alink, opt);
- case TC_RED_XSTATS:
- return nfp_abm_red_xstats(alink, opt);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int
-nfp_abm_mq_stats(struct nfp_abm_link *alink, struct tc_mq_qopt_offload *opt)
-{
- struct nfp_alink_stats stats;
- unsigned int i;
- int err;
-
- for (i = 0; i < alink->num_qdiscs; i++) {
- if (alink->qdiscs[i].handle == TC_H_UNSPEC)
- continue;
-
- err = nfp_abm_ctrl_read_q_stats(alink, i, &stats);
- if (err)
- return err;
-
- nfp_abm_update_stats(&stats, &alink->qdiscs[i].stats,
- &opt->stats);
- }
-
- return 0;
-}
-
-static int
-nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
- struct tc_mq_qopt_offload *opt)
-{
- switch (opt->command) {
- case TC_MQ_CREATE:
- nfp_abm_reset_root(netdev, alink, opt->handle,
- alink->total_queues);
- return 0;
- case TC_MQ_DESTROY:
- if (opt->handle == alink->parent)
- nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0);
- return 0;
- case TC_MQ_STATS:
- return nfp_abm_mq_stats(alink, opt);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int
nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data)
{
@@ -302,10 +38,16 @@ nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
switch (type) {
+ case TC_SETUP_ROOT_QDISC:
+ return nfp_abm_setup_root(netdev, repr->app_priv, type_data);
case TC_SETUP_QDISC_MQ:
return nfp_abm_setup_tc_mq(netdev, repr->app_priv, type_data);
case TC_SETUP_QDISC_RED:
return nfp_abm_setup_tc_red(netdev, repr->app_priv, type_data);
+ case TC_SETUP_QDISC_GRED:
+ return nfp_abm_setup_tc_gred(netdev, repr->app_priv, type_data);
+ case TC_SETUP_BLOCK:
+ return nfp_abm_setup_cls_block(netdev, repr, type_data);
default:
return -EOPNOTSUPP;
}
@@ -384,7 +126,9 @@ nfp_abm_spawn_repr(struct nfp_app *app, struct nfp_abm_link *alink,
reprs = nfp_reprs_get_locked(app, rtype);
WARN(nfp_repr_get_locked(app, reprs, alink->id), "duplicate repr");
+ rtnl_lock();
rcu_assign_pointer(reprs->reprs[alink->id], netdev);
+ rtnl_unlock();
nfp_info(app->cpp, "%s Port %d Representor(%s) created\n",
ptype == NFP_PORT_PF_PORT ? "PCIe" : "Phys",
@@ -410,7 +154,9 @@ nfp_abm_kill_repr(struct nfp_app *app, struct nfp_abm_link *alink,
netdev = nfp_repr_get_locked(app, reprs, alink->id);
if (!netdev)
return;
+ rtnl_lock();
rcu_assign_pointer(reprs->reprs[alink->id], NULL);
+ rtnl_unlock();
synchronize_rcu();
/* Cast to make sure nfp_repr_clean_and_free() takes a nfp_repr */
nfp_repr_clean_and_free((struct nfp_repr *)netdev_priv(netdev));
@@ -461,6 +207,9 @@ static int nfp_abm_eswitch_set_switchdev(struct nfp_abm *abm)
struct nfp_net *nn;
int err;
+ if (!abm->red_support)
+ return -EOPNOTSUPP;
+
err = nfp_abm_ctrl_qm_enable(abm);
if (err)
return err;
@@ -573,31 +322,34 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
alink->abm = abm;
alink->vnic = nn;
alink->id = id;
- alink->parent = TC_H_ROOT;
alink->total_queues = alink->vnic->max_rx_rings;
- alink->qdiscs = kvcalloc(alink->total_queues, sizeof(*alink->qdiscs),
- GFP_KERNEL);
- if (!alink->qdiscs) {
- err = -ENOMEM;
+
+ INIT_LIST_HEAD(&alink->dscp_map);
+
+ err = nfp_abm_ctrl_read_params(alink);
+ if (err)
+ goto err_free_alink;
+
+ alink->prio_map = kzalloc(abm->prio_map_len, GFP_KERNEL);
+ if (!alink->prio_map)
goto err_free_alink;
- }
/* This is a multi-host app, make sure MAC/PHY is up, but don't
* make the MAC/PHY state follow the state of any of the ports.
*/
err = nfp_eth_set_configured(app->cpp, eth_port->index, true);
if (err < 0)
- goto err_free_qdiscs;
+ goto err_free_priomap;
netif_keep_dst(nn->dp.netdev);
nfp_abm_vnic_set_mac(app->pf, abm, nn, id);
- nfp_abm_ctrl_read_params(alink);
+ INIT_RADIX_TREE(&alink->qdiscs, GFP_KERNEL);
return 0;
-err_free_qdiscs:
- kvfree(alink->qdiscs);
+err_free_priomap:
+ kfree(alink->prio_map);
err_free_alink:
kfree(alink);
return err;
@@ -608,10 +360,20 @@ static void nfp_abm_vnic_free(struct nfp_app *app, struct nfp_net *nn)
struct nfp_abm_link *alink = nn->app_priv;
nfp_abm_kill_reprs(alink->abm, alink);
- kvfree(alink->qdiscs);
+ WARN(!radix_tree_empty(&alink->qdiscs), "left over qdiscs\n");
+ kfree(alink->prio_map);
kfree(alink);
}
+static int nfp_abm_vnic_init(struct nfp_app *app, struct nfp_net *nn)
+{
+ struct nfp_abm_link *alink = nn->app_priv;
+
+ if (nfp_abm_has_prio(alink->abm))
+ return nfp_abm_ctrl_prio_map_update(alink, alink->prio_map);
+ return 0;
+}
+
static u64 *
nfp_abm_port_get_stats(struct nfp_app *app, struct nfp_port *port, u64 *data)
{
@@ -659,6 +421,21 @@ nfp_abm_port_get_stats_strings(struct nfp_app *app, struct nfp_port *port,
return data;
}
+static int nfp_abm_fw_init_reset(struct nfp_abm *abm)
+{
+ unsigned int i;
+
+ if (!abm->red_support)
+ return 0;
+
+ for (i = 0; i < abm->num_bands * NFP_NET_MAX_RX_RINGS; i++) {
+ __nfp_abm_ctrl_set_q_lvl(abm, i, NFP_ABM_LVL_INFINITY);
+ __nfp_abm_ctrl_set_q_act(abm, i, NFP_ABM_ACT_DROP);
+ }
+
+ return nfp_abm_ctrl_qm_disable(abm);
+}
+
static int nfp_abm_init(struct nfp_app *app)
{
struct nfp_pf *pf = app->pf;
@@ -690,15 +467,31 @@ static int nfp_abm_init(struct nfp_app *app)
if (err)
goto err_free_abm;
+ err = -ENOMEM;
+ abm->num_thresholds = array_size(abm->num_bands, NFP_NET_MAX_RX_RINGS);
+ abm->threshold_undef = bitmap_zalloc(abm->num_thresholds, GFP_KERNEL);
+ if (!abm->threshold_undef)
+ goto err_free_abm;
+
+ abm->thresholds = kvcalloc(abm->num_thresholds,
+ sizeof(*abm->thresholds), GFP_KERNEL);
+ if (!abm->thresholds)
+ goto err_free_thresh_umap;
+
+ abm->actions = kvcalloc(abm->num_thresholds, sizeof(*abm->actions),
+ GFP_KERNEL);
+ if (!abm->actions)
+ goto err_free_thresh;
+
/* We start in legacy mode, make sure advanced queuing is disabled */
- err = nfp_abm_ctrl_qm_disable(abm);
+ err = nfp_abm_fw_init_reset(abm);
if (err)
- goto err_free_abm;
+ goto err_free_act;
err = -ENOMEM;
reprs = nfp_reprs_alloc(pf->max_data_vnics);
if (!reprs)
- goto err_free_abm;
+ goto err_free_act;
RCU_INIT_POINTER(app->reprs[NFP_REPR_TYPE_PHYS_PORT], reprs);
reprs = nfp_reprs_alloc(pf->max_data_vnics);
@@ -710,6 +503,12 @@ static int nfp_abm_init(struct nfp_app *app)
err_free_phys:
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
+err_free_act:
+ kvfree(abm->actions);
+err_free_thresh:
+ kvfree(abm->thresholds);
+err_free_thresh_umap:
+ bitmap_free(abm->threshold_undef);
err_free_abm:
kfree(abm);
app->priv = NULL;
@@ -723,6 +522,9 @@ static void nfp_abm_clean(struct nfp_app *app)
nfp_abm_eswitch_clean_up(abm);
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
+ bitmap_free(abm->threshold_undef);
+ kvfree(abm->actions);
+ kvfree(abm->thresholds);
kfree(abm);
app->priv = NULL;
}
@@ -736,6 +538,7 @@ const struct nfp_app_type app_abm = {
.vnic_alloc = nfp_abm_vnic_alloc,
.vnic_free = nfp_abm_vnic_free,
+ .vnic_init = nfp_abm_vnic_init,
.port_get_stats = nfp_abm_port_get_stats,
.port_get_stats_count = nfp_abm_port_get_stats_count,
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h
index f907b7d98917..49749c60885e 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.h
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.h
@@ -4,7 +4,19 @@
#ifndef __NFP_ABM_H__
#define __NFP_ABM_H__ 1
+#include <linux/bits.h>
+#include <linux/list.h>
+#include <linux/radix-tree.h>
#include <net/devlink.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+
+/* Dump of 64 PRIOs and 256 REDs seems to take 850us on Xeon v4 @ 2.20GHz;
+ * 2.5ms / 400Hz seems more than sufficient for stats resolution.
+ */
+#define NFP_ABM_STATS_REFRESH_IVAL (2500 * 1000) /* ns */
+
+#define NFP_ABM_LVL_INFINITY S32_MAX
struct nfp_app;
struct nfp_net;
@@ -12,21 +24,64 @@ struct nfp_net;
#define NFP_ABM_PORTID_TYPE GENMASK(23, 16)
#define NFP_ABM_PORTID_ID GENMASK(7, 0)
+/* The possible actions if thresholds are exceeded */
+enum nfp_abm_q_action {
+ /* mark if ECN capable, otherwise drop */
+ NFP_ABM_ACT_MARK_DROP = 0,
+ /* mark if ECN capable, otherwise goto QM */
+ NFP_ABM_ACT_MARK_QUEUE = 1,
+ NFP_ABM_ACT_DROP = 2,
+ NFP_ABM_ACT_QUEUE = 3,
+ NFP_ABM_ACT_NOQUEUE = 4,
+};
+
/**
* struct nfp_abm - ABM NIC app structure
* @app: back pointer to nfp_app
* @pf_id: ID of our PF link
+ *
+ * @red_support: is RED offload supported
+ * @num_prios: number of supported DSCP priorities
+ * @num_bands: number of supported DSCP priority bands
+ * @action_mask: bitmask of supported actions
+ *
+ * @thresholds: current threshold configuration
+ * @threshold_undef: bitmap of thresholds which have not been set
+ * @actions: current FW action configuration
+ * @num_thresholds: number of @thresholds and bits in @threshold_undef
+ *
+ * @prio_map_len: computed length of FW priority map (in bytes)
+ * @dscp_mask: mask FW will apply on DSCP field
+ *
* @eswitch_mode: devlink eswitch mode, advanced functions only visible
* in switchdev mode
+ *
* @q_lvls: queue level control area
* @qm_stats: queue statistics symbol
+ * @q_stats: basic queue statistics (only in per-band case)
*/
struct nfp_abm {
struct nfp_app *app;
unsigned int pf_id;
+
+ unsigned int red_support;
+ unsigned int num_prios;
+ unsigned int num_bands;
+ unsigned int action_mask;
+
+ u32 *thresholds;
+ unsigned long *threshold_undef;
+ u8 *actions;
+ size_t num_thresholds;
+
+ unsigned int prio_map_len;
+ u8 dscp_mask;
+
enum devlink_eswitch_mode eswitch_mode;
+
const struct nfp_rtsym *q_lvls;
const struct nfp_rtsym *qm_stats;
+ const struct nfp_rtsym *q_stats;
};
/**
@@ -57,16 +112,76 @@ struct nfp_alink_xstats {
u64 pdrop;
};
+enum nfp_qdisc_type {
+ NFP_QDISC_NONE = 0,
+ NFP_QDISC_MQ,
+ NFP_QDISC_RED,
+ NFP_QDISC_GRED,
+};
+
+#define NFP_QDISC_UNTRACKED ((struct nfp_qdisc *)1UL)
+
/**
- * struct nfp_red_qdisc - representation of single RED Qdisc
- * @handle: handle of currently offloaded RED Qdisc
- * @stats: statistics from last refresh
- * @xstats: base of extended statistics
+ * struct nfp_qdisc - tracked TC Qdisc
+ * @netdev: netdev on which Qdisc was created
+ * @type: Qdisc type
+ * @handle: handle of this Qdisc
+ * @parent_handle: handle of the parent (unreliable if Qdisc was grafted)
+ * @use_cnt: number of attachment points in the hierarchy
+ * @num_children: current size of the @children array
+ * @children: pointers to children
+ *
+ * @params_ok: parameters of this Qdisc are OK for offload
+ * @offload_mark: offload refresh state - selected for offload
+ * @offloaded: Qdisc is currently offloaded to the HW
+ *
+ * @mq: MQ Qdisc specific parameters and state
+ * @mq.stats: current stats of the MQ Qdisc
+ * @mq.prev_stats: previously reported @mq.stats
+ *
+ * @red: RED Qdisc specific parameters and state
+ * @red.num_bands: Number of valid entries in the @red.band table
+ * @red.band: Per-band array of RED instances
+ * @red.band.ecn: ECN marking is enabled (rather than drop)
+ * @red.band.threshold: ECN marking threshold
+ * @red.band.stats: current stats of the RED Qdisc
+ * @red.band.prev_stats: previously reported @red.stats
+ * @red.band.xstats: extended stats for RED - current
+ * @red.band.prev_xstats: extended stats for RED - previously reported
*/
-struct nfp_red_qdisc {
+struct nfp_qdisc {
+ struct net_device *netdev;
+ enum nfp_qdisc_type type;
u32 handle;
- struct nfp_alink_stats stats;
- struct nfp_alink_xstats xstats;
+ u32 parent_handle;
+ unsigned int use_cnt;
+ unsigned int num_children;
+ struct nfp_qdisc **children;
+
+ bool params_ok;
+ bool offload_mark;
+ bool offloaded;
+
+ union {
+ /* NFP_QDISC_MQ */
+ struct {
+ struct nfp_alink_stats stats;
+ struct nfp_alink_stats prev_stats;
+ } mq;
+ /* TC_SETUP_QDISC_RED, TC_SETUP_QDISC_GRED */
+ struct {
+ unsigned int num_bands;
+
+ struct {
+ bool ecn;
+ u32 threshold;
+ struct nfp_alink_stats stats;
+ struct nfp_alink_stats prev_stats;
+ struct nfp_alink_xstats xstats;
+ struct nfp_alink_xstats prev_xstats;
+ } band[MAX_DPs];
+ } red;
+ };
};
/**
@@ -76,9 +191,17 @@ struct nfp_red_qdisc {
* @id: id of the data vNIC
* @queue_base: id of base to host queue within PCIe (not QC idx)
* @total_queues: number of PF queues
- * @parent: handle of expected parent, i.e. handle of MQ, or TC_H_ROOT
- * @num_qdiscs: number of currently used qdiscs
- * @qdiscs: array of qdiscs
+ *
+ * @last_stats_update: ktime of last stats update
+ *
+ * @prio_map: current map of priorities
+ * @has_prio: @prio_map is valid
+ *
+ * @def_band: default band to use
+ * @dscp_map: list of DSCP to band mappings
+ *
+ * @root_qdisc: pointer to the current root of the Qdisc hierarchy
+ * @qdiscs: all qdiscs recorded by major part of the handle
*/
struct nfp_abm_link {
struct nfp_abm *abm;
@@ -86,26 +209,65 @@ struct nfp_abm_link {
unsigned int id;
unsigned int queue_base;
unsigned int total_queues;
- u32 parent;
- unsigned int num_qdiscs;
- struct nfp_red_qdisc *qdiscs;
+
+ u64 last_stats_update;
+
+ u32 *prio_map;
+ bool has_prio;
+
+ u8 def_band;
+ struct list_head dscp_map;
+
+ struct nfp_qdisc *root_qdisc;
+ struct radix_tree_root qdiscs;
};
-void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink);
+static inline bool nfp_abm_has_prio(struct nfp_abm *abm)
+{
+ return abm->num_bands > 1;
+}
+
+static inline bool nfp_abm_has_drop(struct nfp_abm *abm)
+{
+ return abm->action_mask & BIT(NFP_ABM_ACT_DROP);
+}
+
+static inline bool nfp_abm_has_mark(struct nfp_abm *abm)
+{
+ return abm->action_mask & BIT(NFP_ABM_ACT_MARK_DROP);
+}
+
+void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink);
+int nfp_abm_setup_root(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_root_qopt_offload *opt);
+int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt);
+int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_mq_qopt_offload *opt);
+int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_gred_qopt_offload *opt);
+int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr,
+ struct tc_block_offload *opt);
+
+int nfp_abm_ctrl_read_params(struct nfp_abm_link *alink);
int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm);
-int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val);
-int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i,
- u32 val);
-int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink,
- struct nfp_alink_stats *stats);
-int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i,
+int __nfp_abm_ctrl_set_q_lvl(struct nfp_abm *abm, unsigned int id, u32 val);
+int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, u32 val);
+int __nfp_abm_ctrl_set_q_act(struct nfp_abm *abm, unsigned int id,
+ enum nfp_abm_q_action act);
+int nfp_abm_ctrl_set_q_act(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, enum nfp_abm_q_action act);
+int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink,
+ unsigned int band, unsigned int queue,
struct nfp_alink_stats *stats);
-int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink,
- struct nfp_alink_xstats *xstats);
-int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i,
+int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink,
+ unsigned int band, unsigned int queue,
struct nfp_alink_xstats *xstats);
u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i);
u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i);
int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm);
int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm);
+void nfp_abm_prio_map_update(struct nfp_abm *abm);
+int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/abm/qdisc.c b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
new file mode 100644
index 000000000000..2473fb5f75e5
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
@@ -0,0 +1,850 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
+
+#include <linux/rtnetlink.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+#include <net/red.h>
+
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfp_app.h"
+#include "../nfp_main.h"
+#include "../nfp_net.h"
+#include "../nfp_port.h"
+#include "main.h"
+
+static bool nfp_abm_qdisc_is_red(struct nfp_qdisc *qdisc)
+{
+ return qdisc->type == NFP_QDISC_RED || qdisc->type == NFP_QDISC_GRED;
+}
+
+static bool nfp_abm_qdisc_child_valid(struct nfp_qdisc *qdisc, unsigned int id)
+{
+ return qdisc->children[id] &&
+ qdisc->children[id] != NFP_QDISC_UNTRACKED;
+}
+
+static void *nfp_abm_qdisc_tree_deref_slot(void __rcu **slot)
+{
+ return rtnl_dereference(*slot);
+}
+
+static void
+nfp_abm_stats_propagate(struct nfp_alink_stats *parent,
+ struct nfp_alink_stats *child)
+{
+ parent->tx_pkts += child->tx_pkts;
+ parent->tx_bytes += child->tx_bytes;
+ parent->backlog_pkts += child->backlog_pkts;
+ parent->backlog_bytes += child->backlog_bytes;
+ parent->overlimits += child->overlimits;
+ parent->drops += child->drops;
+}
+
+static void
+nfp_abm_stats_update_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
+ unsigned int queue)
+{
+ struct nfp_cpp *cpp = alink->abm->app->cpp;
+ unsigned int i;
+ int err;
+
+ if (!qdisc->offloaded)
+ return;
+
+ for (i = 0; i < qdisc->red.num_bands; i++) {
+ err = nfp_abm_ctrl_read_q_stats(alink, i, queue,
+ &qdisc->red.band[i].stats);
+ if (err)
+ nfp_err(cpp, "RED stats (%d, %d) read failed with error %d\n",
+ i, queue, err);
+
+ err = nfp_abm_ctrl_read_q_xstats(alink, i, queue,
+ &qdisc->red.band[i].xstats);
+ if (err)
+ nfp_err(cpp, "RED xstats (%d, %d) read failed with error %d\n",
+ i, queue, err);
+ }
+}
+
+static void
+nfp_abm_stats_update_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
+{
+ unsigned int i;
+
+ if (qdisc->type != NFP_QDISC_MQ)
+ return;
+
+ for (i = 0; i < alink->total_queues; i++)
+ if (nfp_abm_qdisc_child_valid(qdisc, i))
+ nfp_abm_stats_update_red(alink, qdisc->children[i], i);
+}
+
+static void __nfp_abm_stats_update(struct nfp_abm_link *alink, u64 time_now)
+{
+ alink->last_stats_update = time_now;
+ if (alink->root_qdisc)
+ nfp_abm_stats_update_mq(alink, alink->root_qdisc);
+}
+
+static void nfp_abm_stats_update(struct nfp_abm_link *alink)
+{
+ u64 now;
+
+ /* Limit the frequency of updates - stats of non-leaf qdiscs are a sum
+ * of all their leafs, so we would read the same stat multiple times
+ * for every dump.
+ */
+ now = ktime_get();
+ if (now - alink->last_stats_update < NFP_ABM_STATS_REFRESH_IVAL)
+ return;
+
+ __nfp_abm_stats_update(alink, now);
+}
+
+static void
+nfp_abm_qdisc_unlink_children(struct nfp_qdisc *qdisc,
+ unsigned int start, unsigned int end)
+{
+ unsigned int i;
+
+ for (i = start; i < end; i++)
+ if (nfp_abm_qdisc_child_valid(qdisc, i)) {
+ qdisc->children[i]->use_cnt--;
+ qdisc->children[i] = NULL;
+ }
+}
+
+static void
+nfp_abm_qdisc_offload_stop(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
+{
+ unsigned int i;
+
+ /* Don't complain when qdisc is getting unlinked */
+ if (qdisc->use_cnt)
+ nfp_warn(alink->abm->app->cpp, "Offload of '%08x' stopped\n",
+ qdisc->handle);
+
+ if (!nfp_abm_qdisc_is_red(qdisc))
+ return;
+
+ for (i = 0; i < qdisc->red.num_bands; i++) {
+ qdisc->red.band[i].stats.backlog_pkts = 0;
+ qdisc->red.band[i].stats.backlog_bytes = 0;
+ }
+}
+
+static int
+__nfp_abm_stats_init(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, struct nfp_alink_stats *prev_stats,
+ struct nfp_alink_xstats *prev_xstats)
+{
+ u64 backlog_pkts, backlog_bytes;
+ int err;
+
+ /* Don't touch the backlog, backlog can only be reset after it has
+ * been reported back to the tc qdisc stats.
+ */
+ backlog_pkts = prev_stats->backlog_pkts;
+ backlog_bytes = prev_stats->backlog_bytes;
+
+ err = nfp_abm_ctrl_read_q_stats(alink, band, queue, prev_stats);
+ if (err) {
+ nfp_err(alink->abm->app->cpp,
+ "RED stats init (%d, %d) failed with error %d\n",
+ band, queue, err);
+ return err;
+ }
+
+ err = nfp_abm_ctrl_read_q_xstats(alink, band, queue, prev_xstats);
+ if (err) {
+ nfp_err(alink->abm->app->cpp,
+ "RED xstats init (%d, %d) failed with error %d\n",
+ band, queue, err);
+ return err;
+ }
+
+ prev_stats->backlog_pkts = backlog_pkts;
+ prev_stats->backlog_bytes = backlog_bytes;
+ return 0;
+}
+
+static int
+nfp_abm_stats_init(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
+ unsigned int queue)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < qdisc->red.num_bands; i++) {
+ err = __nfp_abm_stats_init(alink, i, queue,
+ &qdisc->red.band[i].prev_stats,
+ &qdisc->red.band[i].prev_xstats);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+nfp_abm_offload_compile_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
+ unsigned int queue)
+{
+ bool good_red, good_gred;
+ unsigned int i;
+
+ good_red = qdisc->type == NFP_QDISC_RED &&
+ qdisc->params_ok &&
+ qdisc->use_cnt == 1 &&
+ !alink->has_prio &&
+ !qdisc->children[0];
+ good_gred = qdisc->type == NFP_QDISC_GRED &&
+ qdisc->params_ok &&
+ qdisc->use_cnt == 1;
+ qdisc->offload_mark = good_red || good_gred;
+
+ /* If we are starting offload init prev_stats */
+ if (qdisc->offload_mark && !qdisc->offloaded)
+ if (nfp_abm_stats_init(alink, qdisc, queue))
+ qdisc->offload_mark = false;
+
+ if (!qdisc->offload_mark)
+ return;
+
+ for (i = 0; i < alink->abm->num_bands; i++) {
+ enum nfp_abm_q_action act;
+
+ nfp_abm_ctrl_set_q_lvl(alink, i, queue,
+ qdisc->red.band[i].threshold);
+ act = qdisc->red.band[i].ecn ?
+ NFP_ABM_ACT_MARK_DROP : NFP_ABM_ACT_DROP;
+ nfp_abm_ctrl_set_q_act(alink, i, queue, act);
+ }
+}
+
+static void
+nfp_abm_offload_compile_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
+{
+ unsigned int i;
+
+ qdisc->offload_mark = qdisc->type == NFP_QDISC_MQ;
+ if (!qdisc->offload_mark)
+ return;
+
+ for (i = 0; i < alink->total_queues; i++) {
+ struct nfp_qdisc *child = qdisc->children[i];
+
+ if (!nfp_abm_qdisc_child_valid(qdisc, i))
+ continue;
+
+ nfp_abm_offload_compile_red(alink, child, i);
+ }
+}
+
+void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink)
+{
+ struct nfp_abm *abm = alink->abm;
+ struct radix_tree_iter iter;
+ struct nfp_qdisc *qdisc;
+ void __rcu **slot;
+ size_t i;
+
+ /* Mark all thresholds as unconfigured */
+ for (i = 0; i < abm->num_bands; i++)
+ __bitmap_set(abm->threshold_undef,
+ i * NFP_NET_MAX_RX_RINGS + alink->queue_base,
+ alink->total_queues);
+
+ /* Clear offload marks */
+ radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
+ qdisc = nfp_abm_qdisc_tree_deref_slot(slot);
+ qdisc->offload_mark = false;
+ }
+
+ if (alink->root_qdisc)
+ nfp_abm_offload_compile_mq(alink, alink->root_qdisc);
+
+ /* Refresh offload status */
+ radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
+ qdisc = nfp_abm_qdisc_tree_deref_slot(slot);
+ if (!qdisc->offload_mark && qdisc->offloaded)
+ nfp_abm_qdisc_offload_stop(alink, qdisc);
+ qdisc->offloaded = qdisc->offload_mark;
+ }
+
+ /* Reset the unconfigured thresholds */
+ for (i = 0; i < abm->num_thresholds; i++)
+ if (test_bit(i, abm->threshold_undef))
+ __nfp_abm_ctrl_set_q_lvl(abm, i, NFP_ABM_LVL_INFINITY);
+
+ __nfp_abm_stats_update(alink, ktime_get());
+}
+
+static void
+nfp_abm_qdisc_clear_mq(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct nfp_qdisc *qdisc)
+{
+ struct radix_tree_iter iter;
+ unsigned int mq_refs = 0;
+ void __rcu **slot;
+
+ if (!qdisc->use_cnt)
+ return;
+ /* MQ doesn't notify well on destruction, we need special handling of
+ * MQ's children.
+ */
+ if (qdisc->type == NFP_QDISC_MQ &&
+ qdisc == alink->root_qdisc &&
+ netdev->reg_state == NETREG_UNREGISTERING)
+ return;
+
+ /* Count refs held by MQ instances and clear pointers */
+ radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
+ struct nfp_qdisc *mq = nfp_abm_qdisc_tree_deref_slot(slot);
+ unsigned int i;
+
+ if (mq->type != NFP_QDISC_MQ || mq->netdev != netdev)
+ continue;
+ for (i = 0; i < mq->num_children; i++)
+ if (mq->children[i] == qdisc) {
+ mq->children[i] = NULL;
+ mq_refs++;
+ }
+ }
+
+ WARN(qdisc->use_cnt != mq_refs, "non-zero qdisc use count: %d (- %d)\n",
+ qdisc->use_cnt, mq_refs);
+}
+
+static void
+nfp_abm_qdisc_free(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct nfp_qdisc *qdisc)
+{
+ struct nfp_port *port = nfp_port_from_netdev(netdev);
+
+ if (!qdisc)
+ return;
+ nfp_abm_qdisc_clear_mq(netdev, alink, qdisc);
+ WARN_ON(radix_tree_delete(&alink->qdiscs,
+ TC_H_MAJ(qdisc->handle)) != qdisc);
+
+ kfree(qdisc->children);
+ kfree(qdisc);
+
+ port->tc_offload_cnt--;
+}
+
+static struct nfp_qdisc *
+nfp_abm_qdisc_alloc(struct net_device *netdev, struct nfp_abm_link *alink,
+ enum nfp_qdisc_type type, u32 parent_handle, u32 handle,
+ unsigned int children)
+{
+ struct nfp_port *port = nfp_port_from_netdev(netdev);
+ struct nfp_qdisc *qdisc;
+ int err;
+
+ qdisc = kzalloc(sizeof(*qdisc), GFP_KERNEL);
+ if (!qdisc)
+ return NULL;
+
+ if (children) {
+ qdisc->children = kcalloc(children, sizeof(void *), GFP_KERNEL);
+ if (!qdisc->children)
+ goto err_free_qdisc;
+ }
+
+ qdisc->netdev = netdev;
+ qdisc->type = type;
+ qdisc->parent_handle = parent_handle;
+ qdisc->handle = handle;
+ qdisc->num_children = children;
+
+ err = radix_tree_insert(&alink->qdiscs, TC_H_MAJ(qdisc->handle), qdisc);
+ if (err) {
+ nfp_err(alink->abm->app->cpp,
+ "Qdisc insertion into radix tree failed: %d\n", err);
+ goto err_free_child_tbl;
+ }
+
+ port->tc_offload_cnt++;
+ return qdisc;
+
+err_free_child_tbl:
+ kfree(qdisc->children);
+err_free_qdisc:
+ kfree(qdisc);
+ return NULL;
+}
+
+static struct nfp_qdisc *
+nfp_abm_qdisc_find(struct nfp_abm_link *alink, u32 handle)
+{
+ return radix_tree_lookup(&alink->qdiscs, TC_H_MAJ(handle));
+}
+
+static int
+nfp_abm_qdisc_replace(struct net_device *netdev, struct nfp_abm_link *alink,
+ enum nfp_qdisc_type type, u32 parent_handle, u32 handle,
+ unsigned int children, struct nfp_qdisc **qdisc)
+{
+ *qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (*qdisc) {
+ if (WARN_ON((*qdisc)->type != type))
+ return -EINVAL;
+ return 1;
+ }
+
+ *qdisc = nfp_abm_qdisc_alloc(netdev, alink, type, parent_handle, handle,
+ children);
+ return *qdisc ? 0 : -ENOMEM;
+}
+
+static void
+nfp_abm_qdisc_destroy(struct net_device *netdev, struct nfp_abm_link *alink,
+ u32 handle)
+{
+ struct nfp_qdisc *qdisc;
+
+ qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (!qdisc)
+ return;
+
+ /* We don't get TC_SETUP_ROOT_QDISC w/ MQ when netdev is unregistered */
+ if (alink->root_qdisc == qdisc)
+ qdisc->use_cnt--;
+
+ nfp_abm_qdisc_unlink_children(qdisc, 0, qdisc->num_children);
+ nfp_abm_qdisc_free(netdev, alink, qdisc);
+
+ if (alink->root_qdisc == qdisc) {
+ alink->root_qdisc = NULL;
+ /* Only root change matters, other changes are acted upon on
+ * the graft notification.
+ */
+ nfp_abm_qdisc_offload_update(alink);
+ }
+}
+
+static int
+nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle,
+ unsigned int id)
+{
+ struct nfp_qdisc *parent, *child;
+
+ parent = nfp_abm_qdisc_find(alink, handle);
+ if (!parent)
+ return 0;
+
+ if (WARN(id >= parent->num_children,
+ "graft child out of bound %d >= %d\n",
+ id, parent->num_children))
+ return -EINVAL;
+
+ nfp_abm_qdisc_unlink_children(parent, id, id + 1);
+
+ child = nfp_abm_qdisc_find(alink, child_handle);
+ if (child)
+ child->use_cnt++;
+ else
+ child = NFP_QDISC_UNTRACKED;
+ parent->children[id] = child;
+
+ nfp_abm_qdisc_offload_update(alink);
+
+ return 0;
+}
+
+static void
+nfp_abm_stats_calculate(struct nfp_alink_stats *new,
+ struct nfp_alink_stats *old,
+ struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_queue *qstats)
+{
+ _bstats_update(bstats, new->tx_bytes - old->tx_bytes,
+ new->tx_pkts - old->tx_pkts);
+ qstats->qlen += new->backlog_pkts - old->backlog_pkts;
+ qstats->backlog += new->backlog_bytes - old->backlog_bytes;
+ qstats->overlimits += new->overlimits - old->overlimits;
+ qstats->drops += new->drops - old->drops;
+}
+
+static void
+nfp_abm_stats_red_calculate(struct nfp_alink_xstats *new,
+ struct nfp_alink_xstats *old,
+ struct red_stats *stats)
+{
+ stats->forced_mark += new->ecn_marked - old->ecn_marked;
+ stats->pdrop += new->pdrop - old->pdrop;
+}
+
+static int
+nfp_abm_gred_stats(struct nfp_abm_link *alink, u32 handle,
+ struct tc_gred_qopt_offload_stats *stats)
+{
+ struct nfp_qdisc *qdisc;
+ unsigned int i;
+
+ nfp_abm_stats_update(alink);
+
+ qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (!qdisc)
+ return -EOPNOTSUPP;
+ /* If the qdisc offload has stopped we may need to adjust the backlog
+ * counters back so carry on even if qdisc is not currently offloaded.
+ */
+
+ for (i = 0; i < qdisc->red.num_bands; i++) {
+ if (!stats->xstats[i])
+ continue;
+
+ nfp_abm_stats_calculate(&qdisc->red.band[i].stats,
+ &qdisc->red.band[i].prev_stats,
+ &stats->bstats[i], &stats->qstats[i]);
+ qdisc->red.band[i].prev_stats = qdisc->red.band[i].stats;
+
+ nfp_abm_stats_red_calculate(&qdisc->red.band[i].xstats,
+ &qdisc->red.band[i].prev_xstats,
+ stats->xstats[i]);
+ qdisc->red.band[i].prev_xstats = qdisc->red.band[i].xstats;
+ }
+
+ return qdisc->offloaded ? 0 : -EOPNOTSUPP;
+}
+
+static bool
+nfp_abm_gred_check_params(struct nfp_abm_link *alink,
+ struct tc_gred_qopt_offload *opt)
+{
+ struct nfp_cpp *cpp = alink->abm->app->cpp;
+ struct nfp_abm *abm = alink->abm;
+ unsigned int i;
+
+ if (opt->set.grio_on || opt->set.wred_on) {
+ nfp_warn(cpp, "GRED offload failed - GRIO and WRED not supported (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.dp_def != alink->def_band) {
+ nfp_warn(cpp, "GRED offload failed - default band must be %d (p:%08x h:%08x)\n",
+ alink->def_band, opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.dp_cnt != abm->num_bands) {
+ nfp_warn(cpp, "GRED offload failed - band count must be %d (p:%08x h:%08x)\n",
+ abm->num_bands, opt->parent, opt->handle);
+ return false;
+ }
+
+ for (i = 0; i < abm->num_bands; i++) {
+ struct tc_gred_vq_qopt_offload_params *band = &opt->set.tab[i];
+
+ if (!band->present)
+ return false;
+ if (!band->is_ecn && !nfp_abm_has_drop(abm)) {
+ nfp_warn(cpp, "GRED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x vq:%d)\n",
+ opt->parent, opt->handle, i);
+ return false;
+ }
+ if (band->is_ecn && !nfp_abm_has_mark(abm)) {
+ nfp_warn(cpp, "GRED offload failed - ECN marking not supported (p:%08x h:%08x vq:%d)\n",
+ opt->parent, opt->handle, i);
+ return false;
+ }
+ if (band->is_harddrop) {
+ nfp_warn(cpp, "GRED offload failed - harddrop is not supported (p:%08x h:%08x vq:%d)\n",
+ opt->parent, opt->handle, i);
+ return false;
+ }
+ if (band->min != band->max) {
+ nfp_warn(cpp, "GRED offload failed - threshold mismatch (p:%08x h:%08x vq:%d)\n",
+ opt->parent, opt->handle, i);
+ return false;
+ }
+ if (band->min > S32_MAX) {
+ nfp_warn(cpp, "GRED offload failed - threshold too large %d > %d (p:%08x h:%08x vq:%d)\n",
+ band->min, S32_MAX, opt->parent, opt->handle,
+ i);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int
+nfp_abm_gred_replace(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_gred_qopt_offload *opt)
+{
+ struct nfp_qdisc *qdisc;
+ unsigned int i;
+ int ret;
+
+ ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_GRED, opt->parent,
+ opt->handle, 0, &qdisc);
+ if (ret < 0)
+ return ret;
+
+ qdisc->params_ok = nfp_abm_gred_check_params(alink, opt);
+ if (qdisc->params_ok) {
+ qdisc->red.num_bands = opt->set.dp_cnt;
+ for (i = 0; i < qdisc->red.num_bands; i++) {
+ qdisc->red.band[i].ecn = opt->set.tab[i].is_ecn;
+ qdisc->red.band[i].threshold = opt->set.tab[i].min;
+ }
+ }
+
+ if (qdisc->use_cnt)
+ nfp_abm_qdisc_offload_update(alink);
+
+ return 0;
+}
+
+int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_gred_qopt_offload *opt)
+{
+ switch (opt->command) {
+ case TC_GRED_REPLACE:
+ return nfp_abm_gred_replace(netdev, alink, opt);
+ case TC_GRED_DESTROY:
+ nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
+ return 0;
+ case TC_GRED_STATS:
+ return nfp_abm_gred_stats(alink, opt->handle, &opt->stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
+{
+ struct nfp_qdisc *qdisc;
+
+ nfp_abm_stats_update(alink);
+
+ qdisc = nfp_abm_qdisc_find(alink, opt->handle);
+ if (!qdisc || !qdisc->offloaded)
+ return -EOPNOTSUPP;
+
+ nfp_abm_stats_red_calculate(&qdisc->red.band[0].xstats,
+ &qdisc->red.band[0].prev_xstats,
+ opt->xstats);
+ qdisc->red.band[0].prev_xstats = qdisc->red.band[0].xstats;
+ return 0;
+}
+
+static int
+nfp_abm_red_stats(struct nfp_abm_link *alink, u32 handle,
+ struct tc_qopt_offload_stats *stats)
+{
+ struct nfp_qdisc *qdisc;
+
+ nfp_abm_stats_update(alink);
+
+ qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (!qdisc)
+ return -EOPNOTSUPP;
+ /* If the qdisc offload has stopped we may need to adjust the backlog
+ * counters back so carry on even if qdisc is not currently offloaded.
+ */
+
+ nfp_abm_stats_calculate(&qdisc->red.band[0].stats,
+ &qdisc->red.band[0].prev_stats,
+ stats->bstats, stats->qstats);
+ qdisc->red.band[0].prev_stats = qdisc->red.band[0].stats;
+
+ return qdisc->offloaded ? 0 : -EOPNOTSUPP;
+}
+
+static bool
+nfp_abm_red_check_params(struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt)
+{
+ struct nfp_cpp *cpp = alink->abm->app->cpp;
+ struct nfp_abm *abm = alink->abm;
+
+ if (!opt->set.is_ecn && !nfp_abm_has_drop(abm)) {
+ nfp_warn(cpp, "RED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.is_ecn && !nfp_abm_has_mark(abm)) {
+ nfp_warn(cpp, "RED offload failed - ECN marking not supported (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.is_harddrop) {
+ nfp_warn(cpp, "RED offload failed - harddrop is not supported (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.min != opt->set.max) {
+ nfp_warn(cpp, "RED offload failed - unsupported min/max parameters (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.min > NFP_ABM_LVL_INFINITY) {
+ nfp_warn(cpp, "RED offload failed - threshold too large %d > %d (p:%08x h:%08x)\n",
+ opt->set.min, NFP_ABM_LVL_INFINITY, opt->parent,
+ opt->handle);
+ return false;
+ }
+
+ return true;
+}
+
+static int
+nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt)
+{
+ struct nfp_qdisc *qdisc;
+ int ret;
+
+ ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_RED, opt->parent,
+ opt->handle, 1, &qdisc);
+ if (ret < 0)
+ return ret;
+
+ /* If limit != 0 child gets reset */
+ if (opt->set.limit) {
+ if (nfp_abm_qdisc_child_valid(qdisc, 0))
+ qdisc->children[0]->use_cnt--;
+ qdisc->children[0] = NULL;
+ } else {
+ /* Qdisc was just allocated without a limit will use noop_qdisc,
+ * i.e. a block hole.
+ */
+ if (!ret)
+ qdisc->children[0] = NFP_QDISC_UNTRACKED;
+ }
+
+ qdisc->params_ok = nfp_abm_red_check_params(alink, opt);
+ if (qdisc->params_ok) {
+ qdisc->red.num_bands = 1;
+ qdisc->red.band[0].ecn = opt->set.is_ecn;
+ qdisc->red.band[0].threshold = opt->set.min;
+ }
+
+ if (qdisc->use_cnt == 1)
+ nfp_abm_qdisc_offload_update(alink);
+
+ return 0;
+}
+
+int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt)
+{
+ switch (opt->command) {
+ case TC_RED_REPLACE:
+ return nfp_abm_red_replace(netdev, alink, opt);
+ case TC_RED_DESTROY:
+ nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
+ return 0;
+ case TC_RED_STATS:
+ return nfp_abm_red_stats(alink, opt->handle, &opt->stats);
+ case TC_RED_XSTATS:
+ return nfp_abm_red_xstats(alink, opt);
+ case TC_RED_GRAFT:
+ return nfp_abm_qdisc_graft(alink, opt->handle,
+ opt->child_handle, 0);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+nfp_abm_mq_create(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_mq_qopt_offload *opt)
+{
+ struct nfp_qdisc *qdisc;
+ int ret;
+
+ ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_MQ,
+ TC_H_ROOT, opt->handle, alink->total_queues,
+ &qdisc);
+ if (ret < 0)
+ return ret;
+
+ qdisc->params_ok = true;
+ qdisc->offloaded = true;
+ nfp_abm_qdisc_offload_update(alink);
+ return 0;
+}
+
+static int
+nfp_abm_mq_stats(struct nfp_abm_link *alink, u32 handle,
+ struct tc_qopt_offload_stats *stats)
+{
+ struct nfp_qdisc *qdisc, *red;
+ unsigned int i, j;
+
+ qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (!qdisc)
+ return -EOPNOTSUPP;
+
+ nfp_abm_stats_update(alink);
+
+ /* MQ stats are summed over the children in the core, so we need
+ * to add up the unreported child values.
+ */
+ memset(&qdisc->mq.stats, 0, sizeof(qdisc->mq.stats));
+ memset(&qdisc->mq.prev_stats, 0, sizeof(qdisc->mq.prev_stats));
+
+ for (i = 0; i < qdisc->num_children; i++) {
+ if (!nfp_abm_qdisc_child_valid(qdisc, i))
+ continue;
+
+ if (!nfp_abm_qdisc_is_red(qdisc->children[i]))
+ continue;
+ red = qdisc->children[i];
+
+ for (j = 0; j < red->red.num_bands; j++) {
+ nfp_abm_stats_propagate(&qdisc->mq.stats,
+ &red->red.band[j].stats);
+ nfp_abm_stats_propagate(&qdisc->mq.prev_stats,
+ &red->red.band[j].prev_stats);
+ }
+ }
+
+ nfp_abm_stats_calculate(&qdisc->mq.stats, &qdisc->mq.prev_stats,
+ stats->bstats, stats->qstats);
+
+ return qdisc->offloaded ? 0 : -EOPNOTSUPP;
+}
+
+int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_mq_qopt_offload *opt)
+{
+ switch (opt->command) {
+ case TC_MQ_CREATE:
+ return nfp_abm_mq_create(netdev, alink, opt);
+ case TC_MQ_DESTROY:
+ nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
+ return 0;
+ case TC_MQ_STATS:
+ return nfp_abm_mq_stats(alink, opt->handle, &opt->stats);
+ case TC_MQ_GRAFT:
+ return nfp_abm_qdisc_graft(alink, opt->handle,
+ opt->graft_params.child_handle,
+ opt->graft_params.queue);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int nfp_abm_setup_root(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_root_qopt_offload *opt)
+{
+ if (opt->ingress)
+ return -EOPNOTSUPP;
+ if (alink->root_qdisc)
+ alink->root_qdisc->use_cnt--;
+ alink->root_qdisc = nfp_abm_qdisc_find(alink, opt->handle);
+ if (alink->root_qdisc)
+ alink->root_qdisc->use_cnt++;
+
+ nfp_abm_qdisc_offload_update(alink);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 97d33bb4d84d..e23ca90289f7 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -2382,6 +2382,49 @@ static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
+static int __ashr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
+{
+ /* Set signedness bit (MSB of result). */
+ emit_alu(nfp_prog, reg_none(), reg_a(dst), ALU_OP_OR, reg_imm(0));
+ emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, reg_b(dst),
+ SHF_SC_R_SHF, shift_amt);
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+
+ return 0;
+}
+
+static int ashr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 umin, umax;
+ u8 dst, src;
+
+ dst = insn->dst_reg * 2;
+ umin = meta->umin_src;
+ umax = meta->umax_src;
+ if (umin == umax)
+ return __ashr_imm(nfp_prog, dst, umin);
+
+ src = insn->src_reg * 2;
+ /* NOTE: the first insn will set both indirect shift amount (source A)
+ * and signedness bit (MSB of result).
+ */
+ emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst));
+ emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
+ reg_b(dst), SHF_SC_R_SHF);
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+
+ return 0;
+}
+
+static int ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u8 dst = insn->dst_reg * 2;
+
+ return __ashr_imm(nfp_prog, dst, insn->imm);
+}
+
static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
@@ -3009,26 +3052,19 @@ static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
+ u8 dst_gpr = insn->dst_reg * 2;
swreg tmp_reg;
- if (!imm) {
- meta->skip = true;
- return 0;
- }
-
- if (imm & ~0U) {
- tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
- emit_alu(nfp_prog, reg_none(),
- reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg);
- emit_br(nfp_prog, BR_BNE, insn->off, 0);
- }
-
- if (imm >> 32) {
- tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
+ tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
+ emit_alu(nfp_prog, imm_b(nfp_prog),
+ reg_a(dst_gpr), ALU_OP_AND, tmp_reg);
+ /* Upper word of the mask can only be 0 or ~0 from sign extension,
+ * so either ignore it or OR the whole thing in.
+ */
+ if (imm >> 32)
emit_alu(nfp_prog, reg_none(),
- reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg);
- emit_br(nfp_prog, BR_BNE, insn->off, 0);
- }
+ reg_a(dst_gpr + 1), ALU_OP_OR, imm_b(nfp_prog));
+ emit_br(nfp_prog, BR_BNE, insn->off, 0);
return 0;
}
@@ -3286,6 +3322,8 @@ static const instr_cb_t instr_cb[256] = {
[BPF_ALU | BPF_DIV | BPF_K] = div_imm,
[BPF_ALU | BPF_NEG] = neg_reg,
[BPF_ALU | BPF_LSH | BPF_K] = shl_imm,
+ [BPF_ALU | BPF_ARSH | BPF_X] = ashr_reg,
+ [BPF_ALU | BPF_ARSH | BPF_K] = ashr_imm,
[BPF_ALU | BPF_END | BPF_X] = end_reg32,
[BPF_LD | BPF_IMM | BPF_DW] = imm_ld8,
[BPF_LD | BPF_ABS | BPF_B] = data_ld1,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 6243af0ab025..dccae0319204 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -465,7 +465,7 @@ static int nfp_bpf_init(struct nfp_app *app)
app->ctrl_mtu = nfp_bpf_ctrl_cmsg_mtu(bpf);
}
- bpf->bpf_dev = bpf_offload_dev_create();
+ bpf->bpf_dev = bpf_offload_dev_create(&nfp_bpf_dev_ops);
err = PTR_ERR_OR_ZERO(bpf->bpf_dev);
if (err)
goto err_free_neutral_maps;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 7f591d71ab28..941277936475 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -509,7 +509,11 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt);
int nfp_bpf_jit(struct nfp_prog *prog);
bool nfp_bpf_supported_opcode(u8 code);
-extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops;
+int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
+ int prev_insn_idx);
+int nfp_bpf_finalize(struct bpf_verifier_env *env);
+
+extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops;
struct netdev_bpf;
struct nfp_app;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index ba8ceedcf6a2..f0283854fade 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -33,9 +33,6 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
struct nfp_bpf_neutral_map *record;
int err;
- /* Map record paths are entered via ndo, update side is protected. */
- ASSERT_RTNL();
-
/* Reuse path - other offloaded program is already tracking this map. */
record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id,
nfp_bpf_maps_neutral_params);
@@ -84,8 +81,6 @@ nfp_map_ptrs_forget(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog)
bool freed = false;
int i;
- ASSERT_RTNL();
-
for (i = 0; i < nfp_prog->map_records_cnt; i++) {
if (--nfp_prog->map_records[i]->count) {
nfp_prog->map_records[i] = NULL;
@@ -187,11 +182,10 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog)
kfree(nfp_prog);
}
-static int
-nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
- struct netdev_bpf *bpf)
+static int nfp_bpf_verifier_prep(struct bpf_prog *prog)
{
- struct bpf_prog *prog = bpf->verifier.prog;
+ struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev);
+ struct nfp_app *app = nn->app;
struct nfp_prog *nfp_prog;
int ret;
@@ -209,7 +203,6 @@ nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
goto err_free;
nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
- bpf->verifier.ops = &nfp_bpf_analyzer_ops;
return 0;
@@ -219,8 +212,9 @@ err_free:
return ret;
}
-static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
+static int nfp_bpf_translate(struct bpf_prog *prog)
{
+ struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev);
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
unsigned int max_instr;
int err;
@@ -242,15 +236,13 @@ static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog);
}
-static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
+static void nfp_bpf_destroy(struct bpf_prog *prog)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
kvfree(nfp_prog->prog);
nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog);
nfp_prog_free(nfp_prog);
-
- return 0;
}
/* Atomic engine requires values to be in big endian, we need to byte swap
@@ -422,12 +414,6 @@ nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
{
switch (bpf->command) {
- case BPF_OFFLOAD_VERIFIER_PREP:
- return nfp_bpf_verifier_prep(app, nn, bpf);
- case BPF_OFFLOAD_TRANSLATE:
- return nfp_bpf_translate(nn, bpf->offload.prog);
- case BPF_OFFLOAD_DESTROY:
- return nfp_bpf_destroy(nn, bpf->offload.prog);
case BPF_OFFLOAD_MAP_ALLOC:
return nfp_bpf_map_alloc(app->priv, bpf->offmap);
case BPF_OFFLOAD_MAP_FREE:
@@ -489,14 +475,15 @@ nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
- unsigned int max_mtu, max_stack, max_prog_len;
+ unsigned int fw_mtu, pkt_off, max_stack, max_prog_len;
dma_addr_t dma_addr;
void *img;
int err;
- max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
- if (max_mtu < nn->dp.netdev->mtu) {
- NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with MTU larger than HW packet split boundary");
+ fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
+ pkt_off = min(prog->aux->max_pkt_offset, nn->dp.netdev->mtu);
+ if (fw_mtu < pkt_off) {
+ NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with potential packet access beyond HW packet split boundary");
return -EOPNOTSUPP;
}
@@ -600,3 +587,11 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
return 0;
}
+
+const struct bpf_prog_offload_ops nfp_bpf_dev_ops = {
+ .insn_hook = nfp_verify_insn,
+ .finalize = nfp_bpf_finalize,
+ .prepare = nfp_bpf_verifier_prep,
+ .translate = nfp_bpf_translate,
+ .destroy = nfp_bpf_destroy,
+};
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 99f977bfd8cc..337bb862ec1d 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -623,8 +623,8 @@ nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
return 0;
}
-static int
-nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
+int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
+ int prev_insn_idx)
{
struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
@@ -745,7 +745,7 @@ continue_subprog:
goto continue_subprog;
}
-static int nfp_bpf_finalize(struct bpf_verifier_env *env)
+int nfp_bpf_finalize(struct bpf_verifier_env *env)
{
struct bpf_subprog_info *info;
struct nfp_prog *nfp_prog;
@@ -788,8 +788,3 @@ static int nfp_bpf_finalize(struct bpf_verifier_env *env)
return 0;
}
-
-const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
- .insn_hook = nfp_verify_insn,
- .finalize = nfp_bpf_finalize,
-};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 244dc261006e..8d54b36afee8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -2,7 +2,6 @@
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
-#include <net/geneve.h>
#include <net/pkt_cls.h>
#include <net/switchdev.h>
#include <net/tc_act/tc_csum.h>
@@ -91,21 +90,6 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
return act_size;
}
-static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
- enum nfp_flower_tun_type tun_type)
-{
- if (!out_dev->rtnl_link_ops)
- return false;
-
- if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
- return tun_type == NFP_FL_TUNNEL_VXLAN;
-
- if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
- return tun_type == NFP_FL_TUNNEL_GENEVE;
-
- return false;
-}
-
static int
nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
const struct tc_action *action, struct nfp_fl_payload *nfp_flow,
@@ -151,11 +135,12 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
/* Set action output parameters. */
output->flags = cpu_to_be16(tmp_flags);
- /* Only offload if egress ports are on the same device as the
- * ingress port.
- */
- if (!switchdev_port_same_parent_id(in_dev, out_dev))
- return -EOPNOTSUPP;
+ if (nfp_netdev_is_nfp_repr(in_dev)) {
+ /* Confirm ingress and egress are on same device. */
+ if (!switchdev_port_same_parent_id(in_dev, out_dev))
+ return -EOPNOTSUPP;
+ }
+
if (!nfp_netdev_is_nfp_repr(out_dev))
return -EOPNOTSUPP;
@@ -384,10 +369,21 @@ nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
return 0;
}
+struct ipv4_ttl_word {
+ __u8 ttl;
+ __u8 protocol;
+ __sum16 check;
+};
+
static int
nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
- struct nfp_fl_set_ip4_addrs *set_ip_addr)
+ struct nfp_fl_set_ip4_addrs *set_ip_addr,
+ struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos)
{
+ struct ipv4_ttl_word *ttl_word_mask;
+ struct ipv4_ttl_word *ttl_word;
+ struct iphdr *tos_word_mask;
+ struct iphdr *tos_word;
__be32 exact, mask;
/* We are expecting tcf_pedit to return a big endian value */
@@ -402,20 +398,53 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
set_ip_addr->ipv4_dst_mask |= mask;
set_ip_addr->ipv4_dst &= ~mask;
set_ip_addr->ipv4_dst |= exact & mask;
+ set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
+ set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
+ NFP_FL_LW_SIZ;
break;
case offsetof(struct iphdr, saddr):
set_ip_addr->ipv4_src_mask |= mask;
set_ip_addr->ipv4_src &= ~mask;
set_ip_addr->ipv4_src |= exact & mask;
+ set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
+ set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
+ NFP_FL_LW_SIZ;
+ break;
+ case offsetof(struct iphdr, ttl):
+ ttl_word_mask = (struct ipv4_ttl_word *)&mask;
+ ttl_word = (struct ipv4_ttl_word *)&exact;
+
+ if (ttl_word_mask->protocol || ttl_word_mask->check)
+ return -EOPNOTSUPP;
+
+ set_ip_ttl_tos->ipv4_ttl_mask |= ttl_word_mask->ttl;
+ set_ip_ttl_tos->ipv4_ttl &= ~ttl_word_mask->ttl;
+ set_ip_ttl_tos->ipv4_ttl |= ttl_word->ttl & ttl_word_mask->ttl;
+ set_ip_ttl_tos->head.jump_id =
+ NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
+ set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
+ NFP_FL_LW_SIZ;
+ break;
+ case round_down(offsetof(struct iphdr, tos), 4):
+ tos_word_mask = (struct iphdr *)&mask;
+ tos_word = (struct iphdr *)&exact;
+
+ if (tos_word_mask->version || tos_word_mask->ihl ||
+ tos_word_mask->tot_len)
+ return -EOPNOTSUPP;
+
+ set_ip_ttl_tos->ipv4_tos_mask |= tos_word_mask->tos;
+ set_ip_ttl_tos->ipv4_tos &= ~tos_word_mask->tos;
+ set_ip_ttl_tos->ipv4_tos |= tos_word->tos & tos_word_mask->tos;
+ set_ip_ttl_tos->head.jump_id =
+ NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
+ set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
+ NFP_FL_LW_SIZ;
break;
default:
return -EOPNOTSUPP;
}
- set_ip_addr->reserved = cpu_to_be16(0);
- set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
- set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ;
-
return 0;
}
@@ -432,12 +461,57 @@ nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
}
+struct ipv6_hop_limit_word {
+ __be16 payload_len;
+ u8 nexthdr;
+ u8 hop_limit;
+};
+
+static int
+nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
+ struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
+{
+ struct ipv6_hop_limit_word *fl_hl_mask;
+ struct ipv6_hop_limit_word *fl_hl;
+
+ switch (off) {
+ case offsetof(struct ipv6hdr, payload_len):
+ fl_hl_mask = (struct ipv6_hop_limit_word *)&mask;
+ fl_hl = (struct ipv6_hop_limit_word *)&exact;
+
+ if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len)
+ return -EOPNOTSUPP;
+
+ ip_hl_fl->ipv6_hop_limit_mask |= fl_hl_mask->hop_limit;
+ ip_hl_fl->ipv6_hop_limit &= ~fl_hl_mask->hop_limit;
+ ip_hl_fl->ipv6_hop_limit |= fl_hl->hop_limit &
+ fl_hl_mask->hop_limit;
+ break;
+ case round_down(offsetof(struct ipv6hdr, flow_lbl), 4):
+ if (mask & ~IPV6_FLOW_LABEL_MASK ||
+ exact & ~IPV6_FLOW_LABEL_MASK)
+ return -EOPNOTSUPP;
+
+ ip_hl_fl->ipv6_label_mask |= mask;
+ ip_hl_fl->ipv6_label &= ~mask;
+ ip_hl_fl->ipv6_label |= exact & mask;
+ break;
+ }
+
+ ip_hl_fl->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL;
+ ip_hl_fl->head.len_lw = sizeof(*ip_hl_fl) >> NFP_FL_LW_SIZ;
+
+ return 0;
+}
+
static int
nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
struct nfp_fl_set_ipv6_addr *ip_dst,
- struct nfp_fl_set_ipv6_addr *ip_src)
+ struct nfp_fl_set_ipv6_addr *ip_src,
+ struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
{
__be32 exact, mask;
+ int err = 0;
u8 word;
/* We are expecting tcf_pedit to return a big endian value */
@@ -448,7 +522,8 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
return -EOPNOTSUPP;
if (off < offsetof(struct ipv6hdr, saddr)) {
- return -EOPNOTSUPP;
+ err = nfp_fl_set_ip6_hop_limit_flow_label(off, exact, mask,
+ ip_hl_fl);
} else if (off < offsetof(struct ipv6hdr, daddr)) {
word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
@@ -462,7 +537,7 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
return -EOPNOTSUPP;
}
- return 0;
+ return err;
}
static int
@@ -513,6 +588,8 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
char *nfp_action, int *a_len, u32 *csum_updated)
{
struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
+ struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl;
+ struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos;
struct nfp_fl_set_ip4_addrs set_ip_addr;
struct nfp_fl_set_tport set_tport;
struct nfp_fl_set_eth set_eth;
@@ -522,6 +599,8 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
u32 offset, cmd;
u8 ip_proto = 0;
+ memset(&set_ip6_tc_hl_fl, 0, sizeof(set_ip6_tc_hl_fl));
+ memset(&set_ip_ttl_tos, 0, sizeof(set_ip_ttl_tos));
memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
memset(&set_ip6_src, 0, sizeof(set_ip6_src));
memset(&set_ip_addr, 0, sizeof(set_ip_addr));
@@ -542,11 +621,12 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
err = nfp_fl_set_eth(action, idx, offset, &set_eth);
break;
case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
- err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr);
+ err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr,
+ &set_ip_ttl_tos);
break;
case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
- &set_ip6_src);
+ &set_ip6_src, &set_ip6_tc_hl_fl);
break;
case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
err = nfp_fl_set_tport(action, idx, offset, &set_tport,
@@ -577,6 +657,16 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
memcpy(nfp_action, &set_eth, act_size);
*a_len += act_size;
}
+ if (set_ip_ttl_tos.head.len_lw) {
+ nfp_action += act_size;
+ act_size = sizeof(set_ip_ttl_tos);
+ memcpy(nfp_action, &set_ip_ttl_tos, act_size);
+ *a_len += act_size;
+
+ /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
+ *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
+ nfp_fl_csum_l4_to_flag(ip_proto);
+ }
if (set_ip_addr.head.len_lw) {
nfp_action += act_size;
act_size = sizeof(set_ip_addr);
@@ -587,6 +677,15 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
nfp_fl_csum_l4_to_flag(ip_proto);
}
+ if (set_ip6_tc_hl_fl.head.len_lw) {
+ nfp_action += act_size;
+ act_size = sizeof(set_ip6_tc_hl_fl);
+ memcpy(nfp_action, &set_ip6_tc_hl_fl, act_size);
+ *a_len += act_size;
+
+ /* Hardware will automatically fix TCP/UDP checksum. */
+ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
+ }
if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
/* TC compiles set src and dst IPv6 address as a single action,
* the hardware requires this to be 2 separate actions.
@@ -728,9 +827,8 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
*a_len += sizeof(struct nfp_fl_push_vlan);
} else if (is_tcf_tunnel_set(a)) {
struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
- struct nfp_repr *repr = netdev_priv(netdev);
- *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
+ *tun_type = nfp_fl_get_tun_from_act_l4_port(app, a);
if (*tun_type == NFP_FL_TUNNEL_NONE)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 29d673aa5277..15f41cfef9f1 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -8,6 +8,7 @@
#include <linux/skbuff.h>
#include <linux/types.h>
#include <net/geneve.h>
+#include <net/vxlan.h>
#include "../nfp_app.h"
#include "../nfpcore/nfp_cpp.h"
@@ -65,8 +66,10 @@
#define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6
#define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7
#define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9
+#define NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS 10
#define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC 11
#define NFP_FL_ACTION_OPCODE_SET_IPV6_DST 12
+#define NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL 13
#define NFP_FL_ACTION_OPCODE_SET_UDP 14
#define NFP_FL_ACTION_OPCODE_SET_TCP 15
#define NFP_FL_ACTION_OPCODE_PRE_LAG 16
@@ -82,6 +85,8 @@
#define NFP_FL_PUSH_VLAN_CFI BIT(12)
#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
+#define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff)
+
/* LAG ports */
#define NFP_FL_LAG_OUT 0xC0DE0000
@@ -125,6 +130,26 @@ struct nfp_fl_set_ip4_addrs {
__be32 ipv4_dst;
};
+struct nfp_fl_set_ip4_ttl_tos {
+ struct nfp_fl_act_head head;
+ u8 ipv4_ttl_mask;
+ u8 ipv4_tos_mask;
+ u8 ipv4_ttl;
+ u8 ipv4_tos;
+ __be16 reserved;
+};
+
+struct nfp_fl_set_ipv6_tc_hl_fl {
+ struct nfp_fl_act_head head;
+ u8 ipv6_tc_mask;
+ u8 ipv6_hop_limit_mask;
+ __be16 reserved;
+ u8 ipv6_tc;
+ u8 ipv6_hop_limit;
+ __be32 ipv6_label_mask;
+ __be32 ipv6_label;
+};
+
struct nfp_fl_set_ipv6_addr {
struct nfp_fl_act_head head;
__be16 reserved;
@@ -475,6 +500,32 @@ static inline int nfp_flower_cmsg_get_data_len(struct sk_buff *skb)
return skb->len - NFP_FLOWER_CMSG_HLEN;
}
+static inline bool
+nfp_fl_netdev_is_tunnel_type(struct net_device *netdev,
+ enum nfp_flower_tun_type tun_type)
+{
+ if (netif_is_vxlan(netdev))
+ return tun_type == NFP_FL_TUNNEL_VXLAN;
+ if (netif_is_geneve(netdev))
+ return tun_type == NFP_FL_TUNNEL_GENEVE;
+
+ return false;
+}
+
+static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev)
+{
+ if (!netdev->rtnl_link_ops)
+ return false;
+ if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
+ return true;
+ if (netif_is_vxlan(netdev))
+ return true;
+ if (netif_is_geneve(netdev))
+ return true;
+
+ return false;
+}
+
struct sk_buff *
nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports);
void
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index 81dcf5b318ba..5db838f45694 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -472,17 +472,25 @@ nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag,
schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
}
-static int
+static void
nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag,
struct net_device *master)
{
struct nfp_fl_lag_group *group;
+ struct nfp_flower_priv *priv;
+
+ priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
+
+ if (!netif_is_bond_master(master))
+ return;
mutex_lock(&lag->lock);
group = nfp_fl_lag_find_group_for_master_with_lag(lag, master);
if (!group) {
mutex_unlock(&lag->lock);
- return -ENOENT;
+ nfp_warn(priv->app->cpp, "untracked bond got unregistered %s\n",
+ netdev_name(master));
+ return;
}
group->to_remove = true;
@@ -490,7 +498,6 @@ nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag,
mutex_unlock(&lag->lock);
schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
- return 0;
}
static int
@@ -575,7 +582,7 @@ nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
return 0;
}
-static int
+static void
nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
struct netdev_notifier_changelowerstate_info *info)
{
@@ -586,18 +593,18 @@ nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
unsigned long *flags;
if (!netif_is_lag_port(netdev) || !nfp_netdev_is_nfp_repr(netdev))
- return 0;
+ return;
lag_lower_info = info->lower_state_info;
if (!lag_lower_info)
- return 0;
+ return;
priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
repr = netdev_priv(netdev);
/* Verify that the repr is associated with this app. */
if (repr->app != priv->app)
- return 0;
+ return;
repr_priv = repr->app_priv;
flags = &repr_priv->lag_port_flags;
@@ -617,20 +624,15 @@ nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
mutex_unlock(&lag->lock);
schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
- return 0;
}
-static int
-nfp_fl_lag_netdev_event(struct notifier_block *nb, unsigned long event,
- void *ptr)
+int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv,
+ struct net_device *netdev,
+ unsigned long event, void *ptr)
{
- struct net_device *netdev;
- struct nfp_fl_lag *lag;
+ struct nfp_fl_lag *lag = &priv->nfp_lag;
int err;
- netdev = netdev_notifier_info_to_dev(ptr);
- lag = container_of(nb, struct nfp_fl_lag, lag_nb);
-
switch (event) {
case NETDEV_CHANGEUPPER:
err = nfp_fl_lag_changeupper_event(lag, ptr);
@@ -638,17 +640,11 @@ nfp_fl_lag_netdev_event(struct notifier_block *nb, unsigned long event,
return NOTIFY_BAD;
return NOTIFY_OK;
case NETDEV_CHANGELOWERSTATE:
- err = nfp_fl_lag_changels_event(lag, netdev, ptr);
- if (err)
- return NOTIFY_BAD;
+ nfp_fl_lag_changels_event(lag, netdev, ptr);
return NOTIFY_OK;
case NETDEV_UNREGISTER:
- if (netif_is_bond_master(netdev)) {
- err = nfp_fl_lag_schedule_group_delete(lag, netdev);
- if (err)
- return NOTIFY_BAD;
- return NOTIFY_OK;
- }
+ nfp_fl_lag_schedule_group_delete(lag, netdev);
+ return NOTIFY_OK;
}
return NOTIFY_DONE;
@@ -673,8 +669,6 @@ void nfp_flower_lag_init(struct nfp_fl_lag *lag)
/* 0 is a reserved batch version so increment to first valid value. */
nfp_fl_increment_version(lag);
-
- lag->lag_nb.notifier_call = nfp_fl_lag_netdev_event;
}
void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 3a54728d2ea6..5059110a1768 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -146,23 +146,12 @@ nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false);
}
-static int
-nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev)
-{
- return tc_setup_cb_egdev_register(netdev,
- nfp_flower_setup_tc_egress_cb,
- netdev_priv(netdev));
-}
-
static void
nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
{
struct nfp_repr *repr = netdev_priv(netdev);
kfree(repr->app_priv);
-
- tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb,
- netdev_priv(netdev));
}
static void
@@ -568,6 +557,8 @@ static int nfp_flower_init(struct nfp_app *app)
goto err_cleanup_metadata;
}
+ INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
+
return 0;
err_cleanup_metadata:
@@ -661,10 +652,6 @@ static int nfp_flower_start(struct nfp_app *app)
err = nfp_flower_lag_reset(&app_priv->nfp_lag);
if (err)
return err;
-
- err = register_netdevice_notifier(&app_priv->nfp_lag.lag_nb);
- if (err)
- return err;
}
return nfp_tunnel_config_start(app);
@@ -672,12 +659,27 @@ static int nfp_flower_start(struct nfp_app *app)
static void nfp_flower_stop(struct nfp_app *app)
{
+ nfp_tunnel_config_stop(app);
+}
+
+static int
+nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
+ unsigned long event, void *ptr)
+{
struct nfp_flower_priv *app_priv = app->priv;
+ int ret;
- if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
- unregister_netdevice_notifier(&app_priv->nfp_lag.lag_nb);
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
+ ret = nfp_flower_lag_netdev_event(app_priv, netdev, event, ptr);
+ if (ret & NOTIFY_STOP_MASK)
+ return ret;
+ }
- nfp_tunnel_config_stop(app);
+ ret = nfp_flower_reg_indir_block_handler(app, netdev, event);
+ if (ret & NOTIFY_STOP_MASK)
+ return ret;
+
+ return nfp_tunnel_mac_event_handler(app, netdev, event, ptr);
}
const struct nfp_app_type app_flower = {
@@ -698,7 +700,6 @@ const struct nfp_app_type app_flower = {
.vnic_init = nfp_flower_vnic_init,
.vnic_clean = nfp_flower_vnic_clean,
- .repr_init = nfp_flower_repr_netdev_init,
.repr_preclean = nfp_flower_repr_netdev_preclean,
.repr_clean = nfp_flower_repr_netdev_clean,
@@ -708,6 +709,8 @@ const struct nfp_app_type app_flower = {
.start = nfp_flower_start,
.stop = nfp_flower_stop,
+ .netdev_event = nfp_flower_netdev_event,
+
.ctrl_msg_rx = nfp_flower_cmsg_rx,
.sriov_enable = nfp_flower_sriov_enable,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 90045bab95bf..b858bac47621 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -20,7 +20,6 @@ struct nfp_fl_pre_lag;
struct net_device;
struct nfp_app;
-#define NFP_FL_STATS_CTX_DONT_CARE cpu_to_be32(0xffffffff)
#define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \
init_unalloc)
#define NFP_FLOWER_MASK_ENTRY_RS 256
@@ -72,7 +71,6 @@ struct nfp_mtu_conf {
/**
* struct nfp_fl_lag - Flower APP priv data for link aggregation
- * @lag_nb: Notifier to track master/slave events
* @work: Work queue for writing configs to the HW
* @lock: Lock to protect lag_group_list
* @group_list: List of all master/slave groups offloaded
@@ -85,7 +83,6 @@ struct nfp_mtu_conf {
* retransmission
*/
struct nfp_fl_lag {
- struct notifier_block lag_nb;
struct delayed_work work;
struct mutex lock;
struct list_head group_list;
@@ -126,13 +123,13 @@ struct nfp_fl_lag {
* @nfp_neigh_off_lock: Lock for the neighbour address list
* @nfp_mac_off_ids: IDA to manage id assignment for offloaded macs
* @nfp_mac_off_count: Number of MACs in address list
- * @nfp_tun_mac_nb: Notifier to monitor link state
* @nfp_tun_neigh_nb: Notifier to monitor neighbour state
* @reify_replies: atomically stores the number of replies received
* from firmware for repr reify
* @reify_wait_queue: wait queue for repr reify response counting
* @mtu_conf: Configuration of repr MTU value
* @nfp_lag: Link aggregation data block
+ * @indr_block_cb_priv: List of priv data passed to indirect block cbs
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -160,12 +157,12 @@ struct nfp_flower_priv {
spinlock_t nfp_neigh_off_lock;
struct ida nfp_mac_off_ids;
int nfp_mac_off_count;
- struct notifier_block nfp_tun_mac_nb;
struct notifier_block nfp_tun_neigh_nb;
atomic_t reify_replies;
wait_queue_head_t reify_wait_queue;
struct nfp_mtu_conf mtu_conf;
struct nfp_fl_lag nfp_lag;
+ struct list_head indr_block_cb_priv;
};
/**
@@ -209,7 +206,6 @@ struct nfp_fl_payload {
char *unmasked_data;
char *mask_data;
char *action_data;
- bool ingress_offload;
};
extern const struct rhashtable_params nfp_flower_table_params;
@@ -226,7 +222,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app);
int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data);
-int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
+int nfp_flower_compile_flow_match(struct nfp_app *app,
+ struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
@@ -244,7 +241,7 @@ int nfp_modify_flow_metadata(struct nfp_app *app,
struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
- struct net_device *netdev, __be32 host_ctx);
+ struct net_device *netdev);
struct nfp_fl_payload *
nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
@@ -252,21 +249,28 @@ void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb);
int nfp_tunnel_config_start(struct nfp_app *app);
void nfp_tunnel_config_stop(struct nfp_app *app);
+int nfp_tunnel_mac_event_handler(struct nfp_app *app,
+ struct net_device *netdev,
+ unsigned long event, void *ptr);
void nfp_tunnel_write_macs(struct nfp_app *app);
void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
-int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv);
void nfp_flower_lag_init(struct nfp_fl_lag *lag);
void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag);
int nfp_flower_lag_reset(struct nfp_fl_lag *lag);
+int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv,
+ struct net_device *netdev,
+ unsigned long event, void *ptr);
bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb);
int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
struct net_device *master,
struct nfp_fl_pre_lag *pre_act);
int nfp_flower_lag_get_output_id(struct nfp_app *app,
struct net_device *master);
+int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
+ struct net_device *netdev,
+ unsigned long event);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index e54fb6034326..cdf75595f627 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -52,10 +52,13 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
return 0;
}
- if (tun_type)
+ if (tun_type) {
frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
- else
+ } else {
+ if (!cmsg_port)
+ return -EOPNOTSUPP;
frame->in_port = cpu_to_be32(cmsg_port);
+ }
return 0;
}
@@ -289,17 +292,21 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
}
}
-int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
+int nfp_flower_compile_flow_match(struct nfp_app *app,
+ struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
enum nfp_flower_tun_type tun_type)
{
- struct nfp_repr *netdev_repr;
+ u32 cmsg_port = 0;
int err;
u8 *ext;
u8 *msk;
+ if (nfp_netdev_is_nfp_repr(netdev))
+ cmsg_port = nfp_repr_get_port_id(netdev);
+
memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
memset(nfp_flow->mask_data, 0, key_ls->key_size);
@@ -327,15 +334,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
/* Populate Exact Port data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
- nfp_repr_get_port_id(netdev),
- false, tun_type);
+ cmsg_port, false, tun_type);
if (err)
return err;
/* Populate Mask Port Data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
- nfp_repr_get_port_id(netdev),
- true, tun_type);
+ cmsg_port, true, tun_type);
if (err)
return err;
@@ -399,16 +404,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
msk += sizeof(struct nfp_flower_ipv4_udp_tun);
/* Configure tunnel end point MAC. */
- if (nfp_netdev_is_nfp_repr(netdev)) {
- netdev_repr = netdev_priv(netdev);
- nfp_tunnel_write_macs(netdev_repr->app);
-
- /* Store the tunnel destination in the rule data.
- * This must be present and be an exact match.
- */
- nfp_flow->nfp_tun_ipv4_addr = tun_dst;
- nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
- }
+ nfp_tunnel_write_macs(app);
+
+ /* Store the tunnel destination in the rule data.
+ * This must be present and be an exact match.
+ */
+ nfp_flow->nfp_tun_ipv4_addr = tun_dst;
+ nfp_tunnel_add_ipv4_off(app, tun_dst);
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
err = nfp_flower_compile_geneve_opt(ext, flow, false);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 48729bf171e0..573a4400a26c 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -21,7 +21,6 @@ struct nfp_mask_id_table {
struct nfp_fl_flow_table_cmp_arg {
struct net_device *netdev;
unsigned long cookie;
- __be32 host_ctx;
};
static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
@@ -76,14 +75,13 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
/* Must be called with either RTNL or rcu_read_lock */
struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
- struct net_device *netdev, __be32 host_ctx)
+ struct net_device *netdev)
{
struct nfp_fl_flow_table_cmp_arg flower_cmp_arg;
struct nfp_flower_priv *priv = app->priv;
flower_cmp_arg.netdev = netdev;
flower_cmp_arg.cookie = tc_flower_cookie;
- flower_cmp_arg.host_ctx = host_ctx;
return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg,
nfp_flower_table_params);
@@ -287,6 +285,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
+ nfp_flow->ingress_dev = netdev;
new_mask_id = 0;
if (!nfp_check_mask_add(app, nfp_flow->mask_data,
@@ -306,8 +305,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
priv->stats[stats_cxt].bytes = 0;
priv->stats[stats_cxt].used = jiffies;
- check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev,
- NFP_FL_STATS_CTX_DONT_CARE);
+ check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (check_entry) {
if (nfp_release_stats_entry(app, stats_cxt))
return -EINVAL;
@@ -352,9 +350,7 @@ static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key;
const struct nfp_fl_payload *flow_entry = obj;
- if ((!cmp_arg->netdev || flow_entry->ingress_dev == cmp_arg->netdev) &&
- (cmp_arg->host_ctx == NFP_FL_STATS_CTX_DONT_CARE ||
- flow_entry->meta.host_ctx_id == cmp_arg->host_ctx))
+ if (flow_entry->ingress_dev == cmp_arg->netdev)
return flow_entry->tc_flower_cookie != cmp_arg->cookie;
return 1;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 67e576fe7fc0..2cdbf29ecbe7 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -56,11 +56,10 @@
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
static int
-nfp_flower_xmit_flow(struct net_device *netdev,
- struct nfp_fl_payload *nfp_flow, u8 mtype)
+nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
+ u8 mtype)
{
u32 meta_len, key_len, mask_len, act_len, tot_len;
- struct nfp_repr *priv = netdev_priv(netdev);
struct sk_buff *skb;
unsigned char *msg;
@@ -78,7 +77,7 @@ nfp_flower_xmit_flow(struct net_device *netdev,
nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
- skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL);
+ skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -96,7 +95,7 @@ nfp_flower_xmit_flow(struct net_device *netdev,
nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
- nfp_ctrl_tx(priv->app->ctrl, skb);
+ nfp_ctrl_tx(app->ctrl, skb);
return 0;
}
@@ -129,9 +128,9 @@ nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
static int
nfp_flower_calculate_key_layers(struct nfp_app *app,
+ struct net_device *netdev,
struct nfp_fl_key_ls *ret_key_ls,
struct tc_cls_flower_offload *flow,
- bool egress,
enum nfp_flower_tun_type *tun_type)
{
struct flow_dissector_key_basic *mask_basic = NULL;
@@ -187,8 +186,6 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL,
flow->key);
- if (!egress)
- return -EOPNOTSUPP;
if (mask_enc_ctl->addr_type != 0xffff ||
enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
@@ -251,9 +248,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
default:
return -EOPNOTSUPP;
}
- } else if (egress) {
- /* Reject non tunnel matches offloaded to egress repr. */
- return -EOPNOTSUPP;
+
+ /* Ensure the ingress netdev matches the expected tun type. */
+ if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type))
+ return -EOPNOTSUPP;
}
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
@@ -390,7 +388,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
}
static struct nfp_fl_payload *
-nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
+nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
{
struct nfp_fl_payload *flow_pay;
@@ -414,7 +412,6 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
flow_pay->nfp_tun_ipv4_addr = 0;
flow_pay->meta.flags = 0;
- flow_pay->ingress_offload = !egress;
return flow_pay;
@@ -432,7 +429,6 @@ err_free_flow:
* @app: Pointer to the APP handle
* @netdev: netdev structure.
* @flow: TC flower classifier offload structure.
- * @egress: NFP netdev is the egress.
*
* Adds a new flow to the repeated hash structure and action payload.
*
@@ -440,46 +436,35 @@ err_free_flow:
*/
static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow, bool egress)
+ struct tc_cls_flower_offload *flow)
{
enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
- struct nfp_port *port = nfp_port_from_netdev(netdev);
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flow_pay;
struct nfp_fl_key_ls *key_layer;
- struct net_device *ingr_dev;
+ struct nfp_port *port = NULL;
int err;
- ingr_dev = egress ? NULL : netdev;
- flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
- NFP_FL_STATS_CTX_DONT_CARE);
- if (flow_pay) {
- /* Ignore as duplicate if it has been added by different cb. */
- if (flow_pay->ingress_offload && egress)
- return 0;
- else
- return -EOPNOTSUPP;
- }
+ if (nfp_netdev_is_nfp_repr(netdev))
+ port = nfp_port_from_netdev(netdev);
key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
if (!key_layer)
return -ENOMEM;
- err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
+ err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
&tun_type);
if (err)
goto err_free_key_ls;
- flow_pay = nfp_flower_allocate_new(key_layer, egress);
+ flow_pay = nfp_flower_allocate_new(key_layer);
if (!flow_pay) {
err = -ENOMEM;
goto err_free_key_ls;
}
- flow_pay->ingress_dev = egress ? NULL : netdev;
-
- err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
- tun_type);
+ err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
+ flow_pay, tun_type);
if (err)
goto err_destroy_flow;
@@ -487,8 +472,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_destroy_flow;
- err = nfp_compile_flow_metadata(app, flow, flow_pay,
- flow_pay->ingress_dev);
+ err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev);
if (err)
goto err_destroy_flow;
@@ -498,12 +482,13 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_release_metadata;
- err = nfp_flower_xmit_flow(netdev, flow_pay,
+ err = nfp_flower_xmit_flow(app, flow_pay,
NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
if (err)
goto err_remove_rhash;
- port->tc_offload_cnt++;
+ if (port)
+ port->tc_offload_cnt++;
/* Deallocate flow payload when flower rule has been destroyed. */
kfree(key_layer);
@@ -531,7 +516,6 @@ err_free_key_ls:
* @app: Pointer to the APP handle
* @netdev: netdev structure.
* @flow: TC flower classifier offload structure
- * @egress: Netdev is the egress dev.
*
* Removes a flow from the repeated hash structure and clears the
* action payload.
@@ -540,19 +524,19 @@ err_free_key_ls:
*/
static int
nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow, bool egress)
+ struct tc_cls_flower_offload *flow)
{
- struct nfp_port *port = nfp_port_from_netdev(netdev);
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *nfp_flow;
- struct net_device *ingr_dev;
+ struct nfp_port *port = NULL;
int err;
- ingr_dev = egress ? NULL : netdev;
- nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
- NFP_FL_STATS_CTX_DONT_CARE);
+ if (nfp_netdev_is_nfp_repr(netdev))
+ port = nfp_port_from_netdev(netdev);
+
+ nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (!nfp_flow)
- return egress ? 0 : -ENOENT;
+ return -ENOENT;
err = nfp_modify_flow_metadata(app, nfp_flow);
if (err)
@@ -561,13 +545,14 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
if (nfp_flow->nfp_tun_ipv4_addr)
nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
- err = nfp_flower_xmit_flow(netdev, nfp_flow,
+ err = nfp_flower_xmit_flow(app, nfp_flow,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
if (err)
goto err_free_flow;
err_free_flow:
- port->tc_offload_cnt--;
+ if (port)
+ port->tc_offload_cnt--;
kfree(nfp_flow->action_data);
kfree(nfp_flow->mask_data);
kfree(nfp_flow->unmasked_data);
@@ -583,7 +568,6 @@ err_free_flow:
* @app: Pointer to the APP handle
* @netdev: Netdev structure.
* @flow: TC flower classifier offload structure
- * @egress: Netdev is the egress dev.
*
* Populates a flow statistics structure which which corresponds to a
* specific flow.
@@ -592,22 +576,16 @@ err_free_flow:
*/
static int
nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow, bool egress)
+ struct tc_cls_flower_offload *flow)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *nfp_flow;
- struct net_device *ingr_dev;
u32 ctx_id;
- ingr_dev = egress ? NULL : netdev;
- nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
- NFP_FL_STATS_CTX_DONT_CARE);
+ nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (!nfp_flow)
return -EINVAL;
- if (nfp_flow->ingress_offload && egress)
- return 0;
-
ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
spin_lock_bh(&priv->stats_lock);
@@ -624,35 +602,18 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
static int
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flower, bool egress)
+ struct tc_cls_flower_offload *flower)
{
if (!eth_proto_is_802_3(flower->common.protocol))
return -EOPNOTSUPP;
switch (flower->command) {
case TC_CLSFLOWER_REPLACE:
- return nfp_flower_add_offload(app, netdev, flower, egress);
+ return nfp_flower_add_offload(app, netdev, flower);
case TC_CLSFLOWER_DESTROY:
- return nfp_flower_del_offload(app, netdev, flower, egress);
+ return nfp_flower_del_offload(app, netdev, flower);
case TC_CLSFLOWER_STATS:
- return nfp_flower_get_stats(app, netdev, flower, egress);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv)
-{
- struct nfp_repr *repr = cb_priv;
-
- if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
- return -EOPNOTSUPP;
-
- switch (type) {
- case TC_SETUP_CLSFLOWER:
- return nfp_flower_repr_offload(repr->app, repr->netdev,
- type_data, true);
+ return nfp_flower_get_stats(app, netdev, flower);
default:
return -EOPNOTSUPP;
}
@@ -669,7 +630,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
switch (type) {
case TC_SETUP_CLSFLOWER:
return nfp_flower_repr_offload(repr->app, repr->netdev,
- type_data, false);
+ type_data);
default:
return -EOPNOTSUPP;
}
@@ -708,3 +669,130 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
}
}
+
+struct nfp_flower_indr_block_cb_priv {
+ struct net_device *netdev;
+ struct nfp_app *app;
+ struct list_head list;
+};
+
+static struct nfp_flower_indr_block_cb_priv *
+nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
+ struct net_device *netdev)
+{
+ struct nfp_flower_indr_block_cb_priv *cb_priv;
+ struct nfp_flower_priv *priv = app->priv;
+
+ /* All callback list access should be protected by RTNL. */
+ ASSERT_RTNL();
+
+ list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
+ if (cb_priv->netdev == netdev)
+ return cb_priv;
+
+ return NULL;
+}
+
+static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
+ struct tc_cls_flower_offload *flower = type_data;
+
+ if (flower->common.chain_index)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return nfp_flower_repr_offload(priv->app, priv->netdev,
+ type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
+ struct tc_block_offload *f)
+{
+ struct nfp_flower_indr_block_cb_priv *cb_priv;
+ struct nfp_flower_priv *priv = app->priv;
+ int err;
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
+ if (!cb_priv)
+ return -ENOMEM;
+
+ cb_priv->netdev = netdev;
+ cb_priv->app = app;
+ list_add(&cb_priv->list, &priv->indr_block_cb_priv);
+
+ err = tcf_block_cb_register(f->block,
+ nfp_flower_setup_indr_block_cb,
+ cb_priv, cb_priv, f->extack);
+ if (err) {
+ list_del(&cb_priv->list);
+ kfree(cb_priv);
+ }
+
+ return err;
+ case TC_BLOCK_UNBIND:
+ cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
+ if (!cb_priv)
+ return -ENOENT;
+
+ tcf_block_cb_unregister(f->block,
+ nfp_flower_setup_indr_block_cb,
+ cb_priv);
+ list_del(&cb_priv->list);
+ kfree(cb_priv);
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int
+nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
+ type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
+ struct net_device *netdev,
+ unsigned long event)
+{
+ int err;
+
+ if (!nfp_fl_is_netdev_to_offload(netdev))
+ return NOTIFY_OK;
+
+ if (event == NETDEV_REGISTER) {
+ err = __tc_indr_block_cb_register(netdev, app,
+ nfp_flower_indr_setup_tc_cb,
+ app);
+ if (err)
+ nfp_flower_cmsg_warn(app,
+ "Indirect block reg failed - %s\n",
+ netdev->name);
+ } else if (event == NETDEV_UNREGISTER) {
+ __tc_indr_block_cb_unregister(netdev,
+ nfp_flower_indr_setup_tc_cb, app);
+ }
+
+ return NOTIFY_OK;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 8e5bec04d1f9..2d9f26a725c2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -4,7 +4,6 @@
#include <linux/etherdevice.h>
#include <linux/inetdevice.h>
#include <net/netevent.h>
-#include <net/vxlan.h>
#include <linux/idr.h>
#include <net/dst_metadata.h>
#include <net/arp.h>
@@ -182,18 +181,6 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
}
}
-static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev)
-{
- if (!netdev->rtnl_link_ops)
- return false;
- if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
- return true;
- if (netif_is_vxlan(netdev))
- return true;
-
- return false;
-}
-
static int
nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
gfp_t flag)
@@ -615,7 +602,7 @@ static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
if (nfp_netdev_is_nfp_repr(netdev))
port = nfp_repr_get_port_id(netdev);
- else if (!nfp_tun_is_netdev_to_offload(netdev))
+ else if (!nfp_fl_is_netdev_to_offload(netdev))
return;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
@@ -652,29 +639,16 @@ static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
mutex_unlock(&priv->nfp_mac_off_lock);
}
-static int nfp_tun_mac_event_handler(struct notifier_block *nb,
- unsigned long event, void *ptr)
+int nfp_tunnel_mac_event_handler(struct nfp_app *app,
+ struct net_device *netdev,
+ unsigned long event, void *ptr)
{
- struct nfp_flower_priv *app_priv;
- struct net_device *netdev;
- struct nfp_app *app;
-
if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) {
- app_priv = container_of(nb, struct nfp_flower_priv,
- nfp_tun_mac_nb);
- app = app_priv->app;
- netdev = netdev_notifier_info_to_dev(ptr);
-
/* If non-nfp netdev then free its offload index. */
- if (nfp_tun_is_netdev_to_offload(netdev))
+ if (nfp_fl_is_netdev_to_offload(netdev))
nfp_tun_del_mac_idx(app, netdev->ifindex);
} else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR ||
event == NETDEV_REGISTER) {
- app_priv = container_of(nb, struct nfp_flower_priv,
- nfp_tun_mac_nb);
- app = app_priv->app;
- netdev = netdev_notifier_info_to_dev(ptr);
-
nfp_tun_add_to_mac_offload_list(netdev, app);
/* Force a list write to keep NFP up to date. */
@@ -686,14 +660,11 @@ static int nfp_tun_mac_event_handler(struct notifier_block *nb,
int nfp_tunnel_config_start(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
- struct net_device *netdev;
- int err;
/* Initialise priv data for MAC offloading. */
priv->nfp_mac_off_count = 0;
mutex_init(&priv->nfp_mac_off_lock);
INIT_LIST_HEAD(&priv->nfp_mac_off_list);
- priv->nfp_tun_mac_nb.notifier_call = nfp_tun_mac_event_handler;
mutex_init(&priv->nfp_mac_index_lock);
INIT_LIST_HEAD(&priv->nfp_mac_index_list);
ida_init(&priv->nfp_mac_off_ids);
@@ -707,27 +678,7 @@ int nfp_tunnel_config_start(struct nfp_app *app)
INIT_LIST_HEAD(&priv->nfp_neigh_off_list);
priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
- err = register_netdevice_notifier(&priv->nfp_tun_mac_nb);
- if (err)
- goto err_free_mac_ida;
-
- err = register_netevent_notifier(&priv->nfp_tun_neigh_nb);
- if (err)
- goto err_unreg_mac_nb;
-
- /* Parse netdevs already registered for MACs that need offloaded. */
- rtnl_lock();
- for_each_netdev(&init_net, netdev)
- nfp_tun_add_to_mac_offload_list(netdev, app);
- rtnl_unlock();
-
- return 0;
-
-err_unreg_mac_nb:
- unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
-err_free_mac_ida:
- ida_destroy(&priv->nfp_mac_off_ids);
- return err;
+ return register_netevent_notifier(&priv->nfp_tun_neigh_nb);
}
void nfp_tunnel_config_stop(struct nfp_app *app)
@@ -739,7 +690,6 @@ void nfp_tunnel_config_stop(struct nfp_app *app)
struct nfp_ipv4_addr_entry *ip_entry;
struct list_head *ptr, *storage;
- unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
unregister_netevent_notifier(&priv->nfp_tun_neigh_nb);
/* Free any memory that may be occupied by MAC list. */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 68a0991aac22..3a973282b2bb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -131,11 +131,100 @@ nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
struct nfp_reprs *old;
old = nfp_reprs_get_locked(app, type);
+ rtnl_lock();
rcu_assign_pointer(app->reprs[type], reprs);
+ rtnl_unlock();
return old;
}
+static void
+nfp_app_netdev_feat_change(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_net *nn;
+ unsigned int type;
+
+ if (!nfp_netdev_is_nfp_net(netdev))
+ return;
+ nn = netdev_priv(netdev);
+ if (nn->app != app)
+ return;
+
+ for (type = 0; type < __NFP_REPR_TYPE_MAX; type++) {
+ struct nfp_reprs *reprs;
+ unsigned int i;
+
+ reprs = rtnl_dereference(app->reprs[type]);
+ if (!reprs)
+ continue;
+
+ for (i = 0; i < reprs->num_reprs; i++) {
+ struct net_device *repr;
+
+ repr = rtnl_dereference(reprs->reprs[i]);
+ if (!repr)
+ continue;
+
+ nfp_repr_transfer_features(repr, netdev);
+ }
+ }
+}
+
+static int
+nfp_app_netdev_event(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct net_device *netdev;
+ struct nfp_app *app;
+
+ netdev = netdev_notifier_info_to_dev(ptr);
+ app = container_of(nb, struct nfp_app, netdev_nb);
+
+ /* Handle events common code is interested in */
+ switch (event) {
+ case NETDEV_FEAT_CHANGE:
+ nfp_app_netdev_feat_change(app, netdev);
+ break;
+ }
+
+ /* Call offload specific handlers */
+ if (app->type->netdev_event)
+ return app->type->netdev_event(app, netdev, event, ptr);
+ return NOTIFY_DONE;
+}
+
+int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
+{
+ int err;
+
+ app->ctrl = ctrl;
+
+ if (app->type->start) {
+ err = app->type->start(app);
+ if (err)
+ return err;
+ }
+
+ app->netdev_nb.notifier_call = nfp_app_netdev_event;
+ err = register_netdevice_notifier(&app->netdev_nb);
+ if (err)
+ goto err_app_stop;
+
+ return 0;
+
+err_app_stop:
+ if (app->type->stop)
+ app->type->stop(app);
+ return err;
+}
+
+void nfp_app_stop(struct nfp_app *app)
+{
+ unregister_netdevice_notifier(&app->netdev_nb);
+
+ if (app->type->stop)
+ app->type->stop(app);
+}
+
struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
{
struct nfp_app *app;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 4d6ecf99b1cc..d578d856a009 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -69,6 +69,7 @@ extern const struct nfp_app_type app_abm;
* @port_get_stats_strings: get strings for extra statistics
* @start: start application logic
* @stop: stop application logic
+ * @netdev_event: Netdevice notifier event
* @ctrl_msg_rx: control message handler
* @ctrl_msg_rx_raw: handler for control messages from data queues
* @setup_tc: setup TC ndo
@@ -122,6 +123,9 @@ struct nfp_app_type {
int (*start)(struct nfp_app *app);
void (*stop)(struct nfp_app *app);
+ int (*netdev_event)(struct nfp_app *app, struct net_device *netdev,
+ unsigned long event, void *ptr);
+
void (*ctrl_msg_rx)(struct nfp_app *app, struct sk_buff *skb);
void (*ctrl_msg_rx_raw)(struct nfp_app *app, const void *data,
unsigned int len);
@@ -151,6 +155,7 @@ struct nfp_app_type {
* @reprs: array of pointers to representors
* @type: pointer to const application ops and info
* @ctrl_mtu: MTU to set on the control vNIC (set in .init())
+ * @netdev_nb: Netdevice notifier block
* @priv: app-specific priv data
*/
struct nfp_app {
@@ -163,6 +168,9 @@ struct nfp_app {
const struct nfp_app_type *type;
unsigned int ctrl_mtu;
+
+ struct notifier_block netdev_nb;
+
void *priv;
};
@@ -264,21 +272,6 @@ nfp_app_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
return app->type->repr_change_mtu(app, netdev, new_mtu);
}
-static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
-{
- app->ctrl = ctrl;
- if (!app->type->start)
- return 0;
- return app->type->start(app);
-}
-
-static inline void nfp_app_stop(struct nfp_app *app)
-{
- if (!app->type->stop)
- return;
- app->type->stop(app);
-}
-
static inline const char *nfp_app_name(struct nfp_app *app)
{
if (!app)
@@ -430,6 +423,8 @@ nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size, gfp_t priority);
struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id);
void nfp_app_free(struct nfp_app *app);
+int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl);
+void nfp_app_stop(struct nfp_app *app);
/* Callbacks shared between apps */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 6f0c37d09256..be37c2d6151c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -158,6 +158,7 @@ struct nfp_net_tx_desc {
__le16 data_len; /* Length of frame + meta data */
} __packed;
__le32 vals[4];
+ __le64 vals8[2];
};
};
@@ -543,6 +544,7 @@ struct nfp_net_dp {
* @reconfig_timer_active: Timer for reading reconfiguration results is pending
* @reconfig_sync_present: Some thread is performing synchronous reconfig
* @reconfig_timer: Timer for async reading of reconfig results
+ * @reconfig_in_progress_update: Update FW is processing now (debug only)
* @link_up: Is the link up?
* @link_status_lock: Protects @link_* and ensures atomicity with BAR reading
* @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter
@@ -611,6 +613,7 @@ struct nfp_net {
bool reconfig_timer_active;
bool reconfig_sync_present;
struct timer_list reconfig_timer;
+ u32 reconfig_in_progress_update;
u32 rx_coalesce_usecs;
u32 rx_coalesce_max_frames;
@@ -851,7 +854,7 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
void __iomem *ctrl_bar);
struct nfp_net *
-nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
+nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
unsigned int max_tx_rings, unsigned int max_rx_rings);
void nfp_net_free(struct nfp_net *nn);
@@ -868,6 +871,7 @@ unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
void nfp_net_rss_write_itbl(struct nfp_net *nn);
void nfp_net_rss_write_key(struct nfp_net *nn);
void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
+int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd);
unsigned int
nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 6bddfcfdec34..e97636d2e6ee 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -101,6 +101,7 @@ static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
/* ensure update is written before pinging HW */
nn_pci_flush(nn);
nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
+ nn->reconfig_in_progress_update = update;
}
/* Pass 0 as update to run posted reconfigs. */
@@ -123,10 +124,14 @@ static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
if (reg == 0)
return true;
if (reg & NFP_NET_CFG_UPDATE_ERR) {
- nn_err(nn, "Reconfig error: 0x%08x\n", reg);
+ nn_err(nn, "Reconfig error (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n",
+ reg, nn->reconfig_in_progress_update,
+ nn_readl(nn, NFP_NET_CFG_CTRL));
return true;
} else if (last_check) {
- nn_err(nn, "Reconfig timeout: 0x%08x\n", reg);
+ nn_err(nn, "Reconfig timeout (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n",
+ reg, nn->reconfig_in_progress_update,
+ nn_readl(nn, NFP_NET_CFG_CTRL));
return true;
}
@@ -279,7 +284,7 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
*
* Return: Negative errno on error, 0 on success
*/
-static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
+int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
{
u32 mbox = nn->tlv_caps.mbox_off;
int ret;
@@ -647,27 +652,29 @@ static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
* @txbuf: Pointer to driver soft TX descriptor
* @txd: Pointer to HW TX descriptor
* @skb: Pointer to SKB
+ * @md_bytes: Prepend length
*
* Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
* Return error on packet header greater than maximum supported LSO header size.
*/
static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
struct nfp_net_tx_buf *txbuf,
- struct nfp_net_tx_desc *txd, struct sk_buff *skb)
+ struct nfp_net_tx_desc *txd, struct sk_buff *skb,
+ u32 md_bytes)
{
- u32 hdrlen;
+ u32 l3_offset, l4_offset, hdrlen;
u16 mss;
if (!skb_is_gso(skb))
return;
if (!skb->encapsulation) {
- txd->l3_offset = skb_network_offset(skb);
- txd->l4_offset = skb_transport_offset(skb);
+ l3_offset = skb_network_offset(skb);
+ l4_offset = skb_transport_offset(skb);
hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
} else {
- txd->l3_offset = skb_inner_network_offset(skb);
- txd->l4_offset = skb_inner_transport_offset(skb);
+ l3_offset = skb_inner_network_offset(skb);
+ l4_offset = skb_inner_transport_offset(skb);
hdrlen = skb_inner_transport_header(skb) - skb->data +
inner_tcp_hdrlen(skb);
}
@@ -676,7 +683,9 @@ static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
- txd->lso_hdrlen = hdrlen;
+ txd->l3_offset = l3_offset - md_bytes;
+ txd->l4_offset = l4_offset - md_bytes;
+ txd->lso_hdrlen = hdrlen - md_bytes;
txd->mss = cpu_to_le16(mss);
txd->flags |= PCIE_DESC_TX_LSO;
@@ -786,11 +795,11 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
const struct skb_frag_struct *frag;
- struct nfp_net_tx_desc *txd, txdg;
int f, nr_frags, wr_idx, md_bytes;
struct nfp_net_tx_ring *tx_ring;
struct nfp_net_r_vector *r_vec;
struct nfp_net_tx_buf *txbuf;
+ struct nfp_net_tx_desc *txd;
struct netdev_queue *nd_q;
struct nfp_net_dp *dp;
dma_addr_t dma_addr;
@@ -801,13 +810,13 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
qidx = skb_get_queue_mapping(skb);
tx_ring = &dp->tx_rings[qidx];
r_vec = tx_ring->r_vec;
- nd_q = netdev_get_tx_queue(dp->netdev, qidx);
nr_frags = skb_shinfo(skb)->nr_frags;
if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
qidx, tx_ring->wr_p, tx_ring->rd_p);
+ nd_q = netdev_get_tx_queue(dp->netdev, qidx);
netif_tx_stop_queue(nd_q);
nfp_net_tx_xmit_more_flush(tx_ring);
u64_stats_update_begin(&r_vec->tx_sync);
@@ -851,7 +860,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
txd->lso_hdrlen = 0;
/* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
- nfp_net_tx_tso(r_vec, txbuf, txd, skb);
+ nfp_net_tx_tso(r_vec, txbuf, txd, skb, md_bytes);
nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
txd->flags |= PCIE_DESC_TX_VLAN;
@@ -860,8 +869,10 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
/* Gather DMA */
if (nr_frags > 0) {
+ __le64 second_half;
+
/* all descs must match except for in addr, length and eop */
- txdg = *txd;
+ second_half = txd->vals8[1];
for (f = 0; f < nr_frags; f++) {
frag = &skb_shinfo(skb)->frags[f];
@@ -878,11 +889,11 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
tx_ring->txbufs[wr_idx].fidx = f;
txd = &tx_ring->txds[wr_idx];
- *txd = txdg;
txd->dma_len = cpu_to_le16(fsize);
nfp_desc_set_dma_addr(txd, dma_addr);
- txd->offset_eop |=
- (f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0;
+ txd->offset_eop = md_bytes |
+ ((f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0);
+ txd->vals8[1] = second_half;
}
u64_stats_update_begin(&r_vec->tx_sync);
@@ -890,16 +901,16 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
u64_stats_update_end(&r_vec->tx_sync);
}
- netdev_tx_sent_queue(nd_q, txbuf->real_len);
-
skb_tx_timestamp(skb);
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
+
tx_ring->wr_p += nr_frags + 1;
if (nfp_net_tx_ring_should_stop(tx_ring))
nfp_net_tx_ring_stop(nd_q, tx_ring);
tx_ring->wr_ptr_add += nr_frags + 1;
- if (!skb->xmit_more || netif_xmit_stopped(nd_q))
+ if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, skb->xmit_more))
nfp_net_tx_xmit_more_flush(tx_ring);
return NETDEV_TX_OK;
@@ -940,14 +951,10 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
- const struct skb_frag_struct *frag;
struct netdev_queue *nd_q;
u32 done_pkts = 0, done_bytes = 0;
- struct sk_buff *skb;
- int todo, nr_frags;
u32 qcp_rd_p;
- int fidx;
- int idx;
+ int todo;
if (tx_ring->wr_p == tx_ring->rd_p)
return;
@@ -961,26 +968,33 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
while (todo--) {
+ const struct skb_frag_struct *frag;
+ struct nfp_net_tx_buf *tx_buf;
+ struct sk_buff *skb;
+ int fidx, nr_frags;
+ int idx;
+
idx = D_IDX(tx_ring, tx_ring->rd_p++);
+ tx_buf = &tx_ring->txbufs[idx];
- skb = tx_ring->txbufs[idx].skb;
+ skb = tx_buf->skb;
if (!skb)
continue;
nr_frags = skb_shinfo(skb)->nr_frags;
- fidx = tx_ring->txbufs[idx].fidx;
+ fidx = tx_buf->fidx;
if (fidx == -1) {
/* unmap head */
- dma_unmap_single(dp->dev, tx_ring->txbufs[idx].dma_addr,
+ dma_unmap_single(dp->dev, tx_buf->dma_addr,
skb_headlen(skb), DMA_TO_DEVICE);
- done_pkts += tx_ring->txbufs[idx].pkt_cnt;
- done_bytes += tx_ring->txbufs[idx].real_len;
+ done_pkts += tx_buf->pkt_cnt;
+ done_bytes += tx_buf->real_len;
} else {
/* unmap fragment */
frag = &skb_shinfo(skb)->frags[fidx];
- dma_unmap_page(dp->dev, tx_ring->txbufs[idx].dma_addr,
+ dma_unmap_page(dp->dev, tx_buf->dma_addr,
skb_frag_size(frag), DMA_TO_DEVICE);
}
@@ -988,9 +1002,9 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
if (fidx == nr_frags - 1)
napi_consume_skb(skb, budget);
- tx_ring->txbufs[idx].dma_addr = 0;
- tx_ring->txbufs[idx].skb = NULL;
- tx_ring->txbufs[idx].fidx = -2;
+ tx_buf->dma_addr = 0;
+ tx_buf->skb = NULL;
+ tx_buf->fidx = -2;
}
tx_ring->qcp_rd_p = qcp_rd_p;
@@ -3275,7 +3289,10 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
hdrlen = skb_inner_transport_header(skb) - skb->data +
inner_tcp_hdrlen(skb);
- if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ))
+ /* Assume worst case scenario of having longest possible
+ * metadata prepend - 8B
+ */
+ if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ - 8))
features &= ~NETIF_F_GSO_MASK;
}
@@ -3560,6 +3577,7 @@ void nfp_net_info(struct nfp_net *nn)
/**
* nfp_net_alloc() - Allocate netdev and related structure
* @pdev: PCI device
+ * @ctrl_bar: PCI IOMEM with vNIC config memory
* @needs_netdev: Whether to allocate a netdev for this vNIC
* @max_tx_rings: Maximum number of TX rings supported by device
* @max_rx_rings: Maximum number of RX rings supported by device
@@ -3570,11 +3588,12 @@ void nfp_net_info(struct nfp_net *nn)
*
* Return: NFP Net device structure, or ERR_PTR on error.
*/
-struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
- unsigned int max_tx_rings,
- unsigned int max_rx_rings)
+struct nfp_net *
+nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
+ unsigned int max_tx_rings, unsigned int max_rx_rings)
{
struct nfp_net *nn;
+ int err;
if (needs_netdev) {
struct net_device *netdev;
@@ -3594,6 +3613,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
}
nn->dp.dev = &pdev->dev;
+ nn->dp.ctrl_bar = ctrl_bar;
nn->pdev = pdev;
nn->max_tx_rings = max_tx_rings;
@@ -3616,7 +3636,19 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0);
+ err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
+ &nn->tlv_caps);
+ if (err)
+ goto err_free_nn;
+
return nn;
+
+err_free_nn:
+ if (nn->dp.netdev)
+ free_netdev(nn->dp.netdev);
+ else
+ vfree(nn);
+ return ERR_PTR(err);
}
/**
@@ -3889,11 +3921,6 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
}
- err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
- &nn->tlv_caps);
- if (err)
- return err;
-
if (nn->dp.netdev)
nfp_net_netdev_init(nn);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
index f2aaef976c7d..6d5213b5bcb0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
@@ -41,8 +41,8 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
data += 4;
if (length % NFP_NET_CFG_TLV_LENGTH_INC) {
- dev_err(dev, "TLV size not multiple of %u len:%u\n",
- NFP_NET_CFG_TLV_LENGTH_INC, length);
+ dev_err(dev, "TLV size not multiple of %u offset:%u len:%u\n",
+ NFP_NET_CFG_TLV_LENGTH_INC, offset, length);
return -EINVAL;
}
if (data + length > end) {
@@ -61,14 +61,14 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
if (!length)
return 0;
- dev_err(dev, "END TLV should be empty, has len:%d\n",
- length);
+ dev_err(dev, "END TLV should be empty, has offset:%u len:%d\n",
+ offset, length);
return -EINVAL;
case NFP_NET_CFG_TLV_TYPE_ME_FREQ:
if (length != 4) {
dev_err(dev,
- "ME FREQ TLV should be 4B, is %dB\n",
- length);
+ "ME FREQ TLV should be 4B, is %dB offset:%u\n",
+ length, offset);
return -EINVAL;
}
@@ -90,6 +90,15 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
FIELD_GET(NFP_NET_CFG_TLV_HEADER_TYPE, hdr),
offset, length);
break;
+ case NFP_NET_CFG_TLV_TYPE_REPR_CAP:
+ if (length < 4) {
+ dev_err(dev, "REPR CAP TLV short %dB < 4B offset:%u\n",
+ length, offset);
+ return -EINVAL;
+ }
+
+ caps->repr_cap = readl(data);
+ break;
default:
if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr))
break;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index d7c8518ac952..166d7f71442e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -397,6 +397,8 @@
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
+#define NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET 5
+
/**
* VLAN filtering using general use mailbox
* %NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
@@ -464,6 +466,10 @@
* Variable, experimental IDs. IDs designated for internal development and
* experiments before a stable TLV ID has been allocated to a feature. Should
* never be present in production firmware.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_REPR_CAP:
+ * Single word, equivalent of %NFP_NET_CFG_CAP for representors, features which
+ * can be used on representors.
*/
#define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0
#define NFP_NET_CFG_TLV_TYPE_RESERVED 1
@@ -472,6 +478,7 @@
#define NFP_NET_CFG_TLV_TYPE_MBOX 4
#define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL0 5
#define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL1 6
+#define NFP_NET_CFG_TLV_TYPE_REPR_CAP 7
struct device;
@@ -480,11 +487,13 @@ struct device;
* @me_freq_mhz: ME clock_freq (MHz)
* @mbox_off: vNIC mailbox area offset
* @mbox_len: vNIC mailbox area length
+ * @repr_cap: capabilities for representors
*/
struct nfp_net_tlv_caps {
u32 me_freq_mhz;
unsigned int mbox_off;
unsigned int mbox_len;
+ u32 repr_cap;
};
int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index 69b1c9b62e3d..ab7f2498e1c4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -8,7 +8,7 @@
static struct dentry *nfp_dir;
-static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
+static int nfp_rx_q_show(struct seq_file *file, void *data)
{
struct nfp_net_r_vector *r_vec = file->private;
struct nfp_net_rx_ring *rx_ring;
@@ -65,31 +65,12 @@ out:
rtnl_unlock();
return 0;
}
+DEFINE_SHOW_ATTRIBUTE(nfp_rx_q);
-static int nfp_net_debugfs_rx_q_open(struct inode *inode, struct file *f)
-{
- return single_open(f, nfp_net_debugfs_rx_q_read, inode->i_private);
-}
+static int nfp_tx_q_show(struct seq_file *file, void *data);
+DEFINE_SHOW_ATTRIBUTE(nfp_tx_q);
-static const struct file_operations nfp_rx_q_fops = {
- .owner = THIS_MODULE,
- .open = nfp_net_debugfs_rx_q_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek
-};
-
-static int nfp_net_debugfs_tx_q_open(struct inode *inode, struct file *f);
-
-static const struct file_operations nfp_tx_q_fops = {
- .owner = THIS_MODULE,
- .open = nfp_net_debugfs_tx_q_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek
-};
-
-static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
+static int nfp_tx_q_show(struct seq_file *file, void *data)
{
struct nfp_net_r_vector *r_vec = file->private;
struct nfp_net_tx_ring *tx_ring;
@@ -158,18 +139,11 @@ out:
return 0;
}
-static int nfp_net_debugfs_tx_q_open(struct inode *inode, struct file *f)
+static int nfp_xdp_q_show(struct seq_file *file, void *data)
{
- return single_open(f, nfp_net_debugfs_tx_q_read, inode->i_private);
+ return nfp_tx_q_show(file, data);
}
-
-static const struct file_operations nfp_xdp_q_fops = {
- .owner = THIS_MODULE,
- .open = nfp_net_debugfs_tx_q_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek
-};
+DEFINE_SHOW_ATTRIBUTE(nfp_xdp_q);
void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir)
{
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 1e7d20468a34..08f5fdbd8e41 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -116,13 +116,13 @@ nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
/* Allocate and initialise the vNIC */
- nn = nfp_net_alloc(pf->pdev, needs_netdev, n_tx_rings, n_rx_rings);
+ nn = nfp_net_alloc(pf->pdev, ctrl_bar, needs_netdev,
+ n_tx_rings, n_rx_rings);
if (IS_ERR(nn))
return nn;
nn->app = pf->app;
nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
- nn->dp.ctrl_bar = ctrl_bar;
nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->dp.is_vf = 0;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index c09b893c30dd..69d7aebda09b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -11,6 +11,7 @@
#include "nfpcore/nfp_nsp.h"
#include "nfp_app.h"
#include "nfp_main.h"
+#include "nfp_net.h"
#include "nfp_net_ctrl.h"
#include "nfp_net_repr.h"
#include "nfp_net_sriov.h"
@@ -231,6 +232,27 @@ err_port_disable:
return err;
}
+static netdev_features_t
+nfp_repr_fix_features(struct net_device *netdev, netdev_features_t features)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+ netdev_features_t old_features = features;
+ netdev_features_t lower_features;
+ struct net_device *lower_dev;
+
+ lower_dev = repr->dst->u.port_info.lower_dev;
+
+ lower_features = lower_dev->features;
+ if (lower_features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
+ lower_features |= NETIF_F_HW_CSUM;
+
+ features = netdev_intersect_features(features, lower_features);
+ features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_HW_TC);
+ features |= NETIF_F_LLTX;
+
+ return features;
+}
+
const struct net_device_ops nfp_repr_netdev_ops = {
.ndo_init = nfp_app_ndo_init,
.ndo_uninit = nfp_app_ndo_uninit,
@@ -248,10 +270,25 @@ const struct net_device_ops nfp_repr_netdev_ops = {
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
.ndo_get_vf_config = nfp_app_get_vf_config,
.ndo_set_vf_link_state = nfp_app_set_vf_link_state,
+ .ndo_fix_features = nfp_repr_fix_features,
.ndo_set_features = nfp_port_set_features,
.ndo_set_mac_address = eth_mac_addr,
};
+void
+nfp_repr_transfer_features(struct net_device *netdev, struct net_device *lower)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+
+ if (repr->dst->u.port_info.lower_dev != lower)
+ return;
+
+ netdev->gso_max_size = lower->gso_max_size;
+ netdev->gso_max_segs = lower->gso_max_segs;
+
+ netdev_update_features(netdev);
+}
+
static void nfp_repr_clean(struct nfp_repr *repr)
{
unregister_netdev(repr->netdev);
@@ -281,6 +318,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
struct net_device *pf_netdev)
{
struct nfp_repr *repr = netdev_priv(netdev);
+ struct nfp_net *nn = netdev_priv(pf_netdev);
+ u32 repr_cap = nn->tlv_caps.repr_cap;
int err;
nfp_repr_set_lockdep_class(netdev);
@@ -299,6 +338,55 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
+ /* Set features the lower device can support with representors */
+ if (repr_cap & NFP_NET_CFG_CTRL_LIVE_ADDR)
+ netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
+ netdev->hw_features = NETIF_F_HIGHDMA;
+ if (repr_cap & NFP_NET_CFG_CTRL_RXCSUM_ANY)
+ netdev->hw_features |= NETIF_F_RXCSUM;
+ if (repr_cap & NFP_NET_CFG_CTRL_TXCSUM)
+ netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ if (repr_cap & NFP_NET_CFG_CTRL_GATHER)
+ netdev->hw_features |= NETIF_F_SG;
+ if ((repr_cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) ||
+ repr_cap & NFP_NET_CFG_CTRL_LSO2)
+ netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+ if (repr_cap & NFP_NET_CFG_CTRL_RSS_ANY)
+ netdev->hw_features |= NETIF_F_RXHASH;
+ if (repr_cap & NFP_NET_CFG_CTRL_VXLAN) {
+ if (repr_cap & NFP_NET_CFG_CTRL_LSO)
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ }
+ if (repr_cap & NFP_NET_CFG_CTRL_NVGRE) {
+ if (repr_cap & NFP_NET_CFG_CTRL_LSO)
+ netdev->hw_features |= NETIF_F_GSO_GRE;
+ }
+ if (repr_cap & (NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE))
+ netdev->hw_enc_features = netdev->hw_features;
+
+ netdev->vlan_features = netdev->hw_features;
+
+ if (repr_cap & NFP_NET_CFG_CTRL_RXVLAN)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ if (repr_cap & NFP_NET_CFG_CTRL_TXVLAN) {
+ if (repr_cap & NFP_NET_CFG_CTRL_LSO2)
+ netdev_warn(netdev, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
+ else
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+ }
+ if (repr_cap & NFP_NET_CFG_CTRL_CTAG_FILTER)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ netdev->features = netdev->hw_features;
+
+ /* Advertise but disable TSO by default. */
+ netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
+
+ netdev->priv_flags |= IFF_NO_QUEUE;
+ netdev->features |= NETIF_F_LLTX;
+
if (nfp_app_has_tc(app)) {
netdev->features |= NETIF_F_HW_TC;
netdev->hw_features |= NETIF_F_HW_TC;
@@ -442,7 +530,9 @@ int nfp_reprs_resync_phys_ports(struct nfp_app *app)
continue;
nfp_app_repr_preclean(app, netdev);
+ rtnl_lock();
rcu_assign_pointer(reprs->reprs[i], NULL);
+ rtnl_unlock();
synchronize_rcu();
nfp_repr_clean(repr);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
index c412b94bfb97..e0f13dfe1f39 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
@@ -92,6 +92,8 @@ nfp_repr_get_locked(struct nfp_app *app, struct nfp_reprs *set,
unsigned int id);
void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len);
+void
+nfp_repr_transfer_features(struct net_device *netdev, struct net_device *lower);
int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
u32 cmsg_port_id, struct nfp_port *port,
struct net_device *pf_netdev);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index d2c1e9ea5668..1145849ca7ba 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -172,7 +172,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
rx_bar_off = NFP_PCIE_QUEUE(startq);
/* Allocate and initialise the netdev */
- nn = nfp_net_alloc(pdev, true, max_tx_rings, max_rx_rings);
+ nn = nfp_net_alloc(pdev, ctrl_bar, true, max_tx_rings, max_rx_rings);
if (IS_ERR(nn)) {
err = PTR_ERR(nn);
goto err_ctrl_unmap;
@@ -180,7 +180,6 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
vf->nn = nn;
nn->fw_ver = fw_ver;
- nn->dp.ctrl_bar = ctrl_bar;
nn->dp.is_vf = 1;
nn->stride_tx = stride;
nn->stride_rx = stride;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 25382f8fbb70..89d17399fb5a 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -280,7 +280,7 @@
#define LPC_FCCR_MIRRORCOUNTERCURRENT(n) ((n) & 0xFFFF)
/*
- * rxfliterctrl, rxfilterwolstatus, and rxfilterwolclear shared
+ * rxfilterctrl, rxfilterwolstatus, and rxfilterwolclear shared
* register definitions
*/
#define LPC_RXFLTRW_ACCEPTUNICAST (1 << 0)
@@ -291,7 +291,7 @@
#define LPC_RXFLTRW_ACCEPTPERFECT (1 << 5)
/*
- * rxfliterctrl register definitions
+ * rxfilterctrl register definitions
*/
#define LPC_RXFLTRWSTS_MAGICPACKETENWOL (1 << 12)
#define LPC_RXFLTRWSTS_RXFILTERENWOL (1 << 13)
@@ -783,8 +783,6 @@ static int lpc_mii_probe(struct net_device *ndev)
phy_set_max_speed(phydev, SPEED_100);
- phydev->advertising = phydev->supported;
-
pldat->link = 0;
pldat->speed = 0;
pldat->duplex = -1;
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index d9a03aba0e02..24a90163775e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -296,6 +296,12 @@ enum qed_wol_support {
QED_WOL_SUPPORT_PME,
};
+enum qed_db_rec_exec {
+ DB_REC_DRY_RUN,
+ DB_REC_REAL_DEAL,
+ DB_REC_ONCE,
+};
+
struct qed_hw_info {
/* PCI personality */
enum qed_pci_personality personality;
@@ -425,6 +431,14 @@ struct qed_qm_info {
u8 num_pf_rls;
};
+struct qed_db_recovery_info {
+ struct list_head list;
+
+ /* Lock to protect the doorbell recovery mechanism list */
+ spinlock_t lock;
+ u32 db_recovery_counter;
+};
+
struct storm_stats {
u32 address;
u32 len;
@@ -522,6 +536,7 @@ struct qed_simd_fp_handler {
enum qed_slowpath_wq_flag {
QED_SLOWPATH_MFW_TLV_REQ,
+ QED_SLOWPATH_PERIODIC_DB_REC,
};
struct qed_hwfn {
@@ -640,6 +655,9 @@ struct qed_hwfn {
/* L2-related */
struct qed_l2_info *p_l2_info;
+ /* Mechanism for recovering from doorbell drop */
+ struct qed_db_recovery_info db_recovery_info;
+
/* Nvm images number and attributes */
struct qed_nvm_image_info nvm_info;
@@ -652,11 +670,12 @@ struct qed_hwfn {
struct delayed_work iov_task;
unsigned long iov_task_flags;
#endif
-
- struct z_stream_s *stream;
+ struct z_stream_s *stream;
+ bool slowpath_wq_active;
struct workqueue_struct *slowpath_wq;
struct delayed_work slowpath_task;
unsigned long slowpath_task_flags;
+ u32 periodic_db_rec_count;
};
struct pci_params {
@@ -897,6 +916,12 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
+/* doorbell recovery mechanism */
+void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
+void qed_db_recovery_execute(struct qed_hwfn *p_hwfn,
+ enum qed_db_rec_exec db_exec);
+bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
+
/* Other Linux specific common definitions */
#define DP_NAME(cdev) ((cdev)->name)
@@ -931,4 +956,6 @@ int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn,
union qed_mfw_tlv_data *tlv_data);
void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc);
+
+void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn);
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 88a8576ca9ce..8f6551421945 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -66,6 +66,318 @@
static DEFINE_SPINLOCK(qm_lock);
+/******************** Doorbell Recovery *******************/
+/* The doorbell recovery mechanism consists of a list of entries which represent
+ * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each
+ * entity needs to register with the mechanism and provide the parameters
+ * describing it's doorbell, including a location where last used doorbell data
+ * can be found. The doorbell execute function will traverse the list and
+ * doorbell all of the registered entries.
+ */
+struct qed_db_recovery_entry {
+ struct list_head list_entry;
+ void __iomem *db_addr;
+ void *db_data;
+ enum qed_db_rec_width db_width;
+ enum qed_db_rec_space db_space;
+ u8 hwfn_idx;
+};
+
+/* Display a single doorbell recovery entry */
+static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn,
+ struct qed_db_recovery_entry *db_entry,
+ char *action)
+{
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n",
+ action,
+ db_entry,
+ db_entry->db_addr,
+ db_entry->db_data,
+ db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b",
+ db_entry->db_space == DB_REC_USER ? "user" : "kernel",
+ db_entry->hwfn_idx);
+}
+
+/* Doorbell address sanity (address within doorbell bar range) */
+static bool qed_db_rec_sanity(struct qed_dev *cdev,
+ void __iomem *db_addr, void *db_data)
+{
+ /* Make sure doorbell address is within the doorbell bar */
+ if (db_addr < cdev->doorbells ||
+ (u8 __iomem *)db_addr >
+ (u8 __iomem *)cdev->doorbells + cdev->db_size) {
+ WARN(true,
+ "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
+ db_addr,
+ cdev->doorbells,
+ (u8 __iomem *)cdev->doorbells + cdev->db_size);
+ return false;
+ }
+
+ /* ake sure doorbell data pointer is not null */
+ if (!db_data) {
+ WARN(true, "Illegal doorbell data pointer: %p", db_data);
+ return false;
+ }
+
+ return true;
+}
+
+/* Find hwfn according to the doorbell address */
+static struct qed_hwfn *qed_db_rec_find_hwfn(struct qed_dev *cdev,
+ void __iomem *db_addr)
+{
+ struct qed_hwfn *p_hwfn;
+
+ /* In CMT doorbell bar is split down the middle between engine 0 and enigne 1 */
+ if (cdev->num_hwfns > 1)
+ p_hwfn = db_addr < cdev->hwfns[1].doorbells ?
+ &cdev->hwfns[0] : &cdev->hwfns[1];
+ else
+ p_hwfn = QED_LEADING_HWFN(cdev);
+
+ return p_hwfn;
+}
+
+/* Add a new entry to the doorbell recovery mechanism */
+int qed_db_recovery_add(struct qed_dev *cdev,
+ void __iomem *db_addr,
+ void *db_data,
+ enum qed_db_rec_width db_width,
+ enum qed_db_rec_space db_space)
+{
+ struct qed_db_recovery_entry *db_entry;
+ struct qed_hwfn *p_hwfn;
+
+ /* Shortcircuit VFs, for now */
+ if (IS_VF(cdev)) {
+ DP_VERBOSE(cdev,
+ QED_MSG_IOV, "db recovery - skipping VF doorbell\n");
+ return 0;
+ }
+
+ /* Sanitize doorbell address */
+ if (!qed_db_rec_sanity(cdev, db_addr, db_data))
+ return -EINVAL;
+
+ /* Obtain hwfn from doorbell address */
+ p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr);
+
+ /* Create entry */
+ db_entry = kzalloc(sizeof(*db_entry), GFP_KERNEL);
+ if (!db_entry) {
+ DP_NOTICE(cdev, "Failed to allocate a db recovery entry\n");
+ return -ENOMEM;
+ }
+
+ /* Populate entry */
+ db_entry->db_addr = db_addr;
+ db_entry->db_data = db_data;
+ db_entry->db_width = db_width;
+ db_entry->db_space = db_space;
+ db_entry->hwfn_idx = p_hwfn->my_id;
+
+ /* Display */
+ qed_db_recovery_dp_entry(p_hwfn, db_entry, "Adding");
+
+ /* Protect the list */
+ spin_lock_bh(&p_hwfn->db_recovery_info.lock);
+ list_add_tail(&db_entry->list_entry, &p_hwfn->db_recovery_info.list);
+ spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
+
+ return 0;
+}
+
+/* Remove an entry from the doorbell recovery mechanism */
+int qed_db_recovery_del(struct qed_dev *cdev,
+ void __iomem *db_addr, void *db_data)
+{
+ struct qed_db_recovery_entry *db_entry = NULL;
+ struct qed_hwfn *p_hwfn;
+ int rc = -EINVAL;
+
+ /* Shortcircuit VFs, for now */
+ if (IS_VF(cdev)) {
+ DP_VERBOSE(cdev,
+ QED_MSG_IOV, "db recovery - skipping VF doorbell\n");
+ return 0;
+ }
+
+ /* Sanitize doorbell address */
+ if (!qed_db_rec_sanity(cdev, db_addr, db_data))
+ return -EINVAL;
+
+ /* Obtain hwfn from doorbell address */
+ p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr);
+
+ /* Protect the list */
+ spin_lock_bh(&p_hwfn->db_recovery_info.lock);
+ list_for_each_entry(db_entry,
+ &p_hwfn->db_recovery_info.list, list_entry) {
+ /* search according to db_data addr since db_addr is not unique (roce) */
+ if (db_entry->db_data == db_data) {
+ qed_db_recovery_dp_entry(p_hwfn, db_entry, "Deleting");
+ list_del(&db_entry->list_entry);
+ rc = 0;
+ break;
+ }
+ }
+
+ spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
+
+ if (rc == -EINVAL)
+
+ DP_NOTICE(p_hwfn,
+ "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n",
+ db_data, db_addr);
+ else
+ kfree(db_entry);
+
+ return rc;
+}
+
+/* Initialize the doorbell recovery mechanism */
+static int qed_db_recovery_setup(struct qed_hwfn *p_hwfn)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting up db recovery\n");
+
+ /* Make sure db_size was set in cdev */
+ if (!p_hwfn->cdev->db_size) {
+ DP_ERR(p_hwfn->cdev, "db_size not set\n");
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&p_hwfn->db_recovery_info.list);
+ spin_lock_init(&p_hwfn->db_recovery_info.lock);
+ p_hwfn->db_recovery_info.db_recovery_counter = 0;
+
+ return 0;
+}
+
+/* Destroy the doorbell recovery mechanism */
+static void qed_db_recovery_teardown(struct qed_hwfn *p_hwfn)
+{
+ struct qed_db_recovery_entry *db_entry = NULL;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Tearing down db recovery\n");
+ if (!list_empty(&p_hwfn->db_recovery_info.list)) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n");
+ while (!list_empty(&p_hwfn->db_recovery_info.list)) {
+ db_entry =
+ list_first_entry(&p_hwfn->db_recovery_info.list,
+ struct qed_db_recovery_entry,
+ list_entry);
+ qed_db_recovery_dp_entry(p_hwfn, db_entry, "Purging");
+ list_del(&db_entry->list_entry);
+ kfree(db_entry);
+ }
+ }
+ p_hwfn->db_recovery_info.db_recovery_counter = 0;
+}
+
+/* Print the content of the doorbell recovery mechanism */
+void qed_db_recovery_dp(struct qed_hwfn *p_hwfn)
+{
+ struct qed_db_recovery_entry *db_entry = NULL;
+
+ DP_NOTICE(p_hwfn,
+ "Displaying doorbell recovery database. Counter was %d\n",
+ p_hwfn->db_recovery_info.db_recovery_counter);
+
+ /* Protect the list */
+ spin_lock_bh(&p_hwfn->db_recovery_info.lock);
+ list_for_each_entry(db_entry,
+ &p_hwfn->db_recovery_info.list, list_entry) {
+ qed_db_recovery_dp_entry(p_hwfn, db_entry, "Printing");
+ }
+
+ spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
+}
+
+/* Ring the doorbell of a single doorbell recovery entry */
+static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
+ struct qed_db_recovery_entry *db_entry,
+ enum qed_db_rec_exec db_exec)
+{
+ if (db_exec != DB_REC_ONCE) {
+ /* Print according to width */
+ if (db_entry->db_width == DB_REC_WIDTH_32B) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "%s doorbell address %p data %x\n",
+ db_exec == DB_REC_DRY_RUN ?
+ "would have rung" : "ringing",
+ db_entry->db_addr,
+ *(u32 *)db_entry->db_data);
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "%s doorbell address %p data %llx\n",
+ db_exec == DB_REC_DRY_RUN ?
+ "would have rung" : "ringing",
+ db_entry->db_addr,
+ *(u64 *)(db_entry->db_data));
+ }
+ }
+
+ /* Sanity */
+ if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr,
+ db_entry->db_data))
+ return;
+
+ /* Flush the write combined buffer. Since there are multiple doorbelling
+ * entities using the same address, if we don't flush, a transaction
+ * could be lost.
+ */
+ wmb();
+
+ /* Ring the doorbell */
+ if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) {
+ if (db_entry->db_width == DB_REC_WIDTH_32B)
+ DIRECT_REG_WR(db_entry->db_addr,
+ *(u32 *)(db_entry->db_data));
+ else
+ DIRECT_REG_WR64(db_entry->db_addr,
+ *(u64 *)(db_entry->db_data));
+ }
+
+ /* Flush the write combined buffer. Next doorbell may come from a
+ * different entity to the same address...
+ */
+ wmb();
+}
+
+/* Traverse the doorbell recovery entry list and ring all the doorbells */
+void qed_db_recovery_execute(struct qed_hwfn *p_hwfn,
+ enum qed_db_rec_exec db_exec)
+{
+ struct qed_db_recovery_entry *db_entry = NULL;
+
+ if (db_exec != DB_REC_ONCE) {
+ DP_NOTICE(p_hwfn,
+ "Executing doorbell recovery. Counter was %d\n",
+ p_hwfn->db_recovery_info.db_recovery_counter);
+
+ /* Track amount of times recovery was executed */
+ p_hwfn->db_recovery_info.db_recovery_counter++;
+ }
+
+ /* Protect the list */
+ spin_lock_bh(&p_hwfn->db_recovery_info.lock);
+ list_for_each_entry(db_entry,
+ &p_hwfn->db_recovery_info.list, list_entry) {
+ qed_db_recovery_ring(p_hwfn, db_entry, db_exec);
+ if (db_exec == DB_REC_ONCE)
+ break;
+ }
+
+ spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
+}
+
+/******************** Doorbell Recovery end ****************/
+
#define QED_MIN_DPIS (4)
#define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS)
@@ -194,6 +506,9 @@ void qed_resc_free(struct qed_dev *cdev)
qed_dmae_info_free(p_hwfn);
qed_dcbx_info_free(p_hwfn);
qed_dbg_user_data_free(p_hwfn);
+
+ /* Destroy doorbell recovery mechanism */
+ qed_db_recovery_teardown(p_hwfn);
}
}
@@ -969,6 +1284,11 @@ int qed_resc_alloc(struct qed_dev *cdev)
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
u32 n_eqes, num_cons;
+ /* Initialize the doorbell recovery mechanism */
+ rc = qed_db_recovery_setup(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
/* First allocate the context manager structure */
rc = qed_cxt_mngr_alloc(p_hwfn);
if (rc)
@@ -1468,6 +1788,14 @@ enum QED_ROCE_EDPM_MODE {
QED_ROCE_EDPM_MODE_DISABLE = 2,
};
+bool qed_edpm_enabled(struct qed_hwfn *p_hwfn)
+{
+ if (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm)
+ return false;
+
+ return true;
+}
+
static int
qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
@@ -1537,13 +1865,13 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_hwfn->wid_count = (u16) n_cpus;
DP_INFO(p_hwfn,
- "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
+ "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n",
norm_regsize,
pwm_regsize,
p_hwfn->dpi_size,
p_hwfn->dpi_count,
- ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
- "disabled" : "enabled");
+ (!qed_edpm_enabled(p_hwfn)) ?
+ "disabled" : "enabled", PAGE_SIZE);
if (rc) {
DP_ERR(p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index defdda1ffaa2..acccd85170aa 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -472,6 +472,34 @@ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle);
int
qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle);
+/**
+ * @brief db_recovery_add - add doorbell information to the doorbell
+ * recovery mechanism.
+ *
+ * @param cdev
+ * @param db_addr - doorbell address
+ * @param db_data - address of where db_data is stored
+ * @param db_width - doorbell is 32b pr 64b
+ * @param db_space - doorbell recovery addresses are user or kernel space
+ */
+int qed_db_recovery_add(struct qed_dev *cdev,
+ void __iomem *db_addr,
+ void *db_data,
+ enum qed_db_rec_width db_width,
+ enum qed_db_rec_space db_space);
+
+/**
+ * @brief db_recovery_del - remove doorbell information from the doorbell
+ * recovery mechanism. db_data serves as key (db_addr is not unique).
+ *
+ * @param cdev
+ * @param db_addr - doorbell address
+ * @param db_data - address where db_data is stored. Serves as key for the
+ * entry to delete.
+ */
+int qed_db_recovery_del(struct qed_dev *cdev,
+ void __iomem *db_addr, void *db_data);
+
const char *qed_hw_get_resc_name(enum qed_resources res_id);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index b38e12c9de9d..b13cfb449d8f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12655,6 +12655,7 @@ struct public_drv_mb {
#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF
#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3
#define DRV_MB_PARAM_NVM_LEN_OFFSET 24
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
@@ -12814,6 +12815,11 @@ struct public_drv_mb {
union drv_union_data union_data;
};
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT 0
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK 0xff000000
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT 24
+
enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_LINK_CHANGE,
MFW_DRV_MSG_FLR_FW_ACK_FAILED,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index b22f464ea3fa..92340919d852 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -361,29 +361,147 @@ static int qed_pglub_rbc_attn_cb(struct qed_hwfn *p_hwfn)
return 0;
}
-#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
-#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
-#define QED_DORQ_ATTENTION_SIZE_MASK (0x7f)
-#define QED_DORQ_ATTENTION_SIZE_SHIFT (16)
+#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
+#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
+#define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0)
+#define QED_DORQ_ATTENTION_SIZE_MASK (0x7f)
+#define QED_DORQ_ATTENTION_SIZE_SHIFT (16)
+
+#define QED_DB_REC_COUNT 1000
+#define QED_DB_REC_INTERVAL 100
+
+static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 count = QED_DB_REC_COUNT;
+ u32 usage = 1;
+
+ /* wait for usage to zero or count to run out. This is necessary since
+ * EDPM doorbell transactions can take multiple 64b cycles, and as such
+ * can "split" over the pci. Possibly, the doorbell drop can happen with
+ * half an EDPM in the queue and other half dropped. Another EDPM
+ * doorbell to the same address (from doorbell recovery mechanism or
+ * from the doorbelling entity) could have first half dropped and second
+ * half interpreted as continuation of the first. To prevent such
+ * malformed doorbells from reaching the device, flush the queue before
+ * releasing the overflow sticky indication.
+ */
+ while (count-- && usage) {
+ usage = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT);
+ udelay(QED_DB_REC_INTERVAL);
+ }
+
+ /* should have been depleted by now */
+ if (usage) {
+ DP_NOTICE(p_hwfn->cdev,
+ "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n",
+ QED_DB_REC_INTERVAL * QED_DB_REC_COUNT, usage);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 overflow;
+ int rc;
+
+ overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
+ DP_NOTICE(p_hwfn, "PF Overflow sticky 0x%x\n", overflow);
+ if (!overflow) {
+ qed_db_recovery_execute(p_hwfn, DB_REC_ONCE);
+ return 0;
+ }
+
+ if (qed_edpm_enabled(p_hwfn)) {
+ rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+ }
+
+ /* Flush any pending (e)dpm as they may never arrive */
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
+
+ /* Release overflow sticky indication (stop silently dropping everything) */
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
+
+ /* Repeat all last doorbells (doorbell drop recovery) */
+ qed_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL);
+
+ return 0;
+}
+
static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
{
- u32 reason;
+ u32 int_sts, first_drop_reason, details, address, all_drops_reason;
+ struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
+ int rc;
- reason = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) &
- QED_DORQ_ATTENTION_REASON_MASK;
- if (reason) {
- u32 details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- DORQ_REG_DB_DROP_DETAILS);
+ int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
+ DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
- DP_INFO(p_hwfn->cdev,
- "DORQ db_drop: address 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
- qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- DORQ_REG_DB_DROP_DETAILS_ADDRESS),
- (u16)(details & QED_DORQ_ATTENTION_OPAQUE_MASK),
- GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
- reason);
+ /* int_sts may be zero since all PFs were interrupted for doorbell
+ * overflow but another one already handled it. Can abort here. If
+ * This PF also requires overflow recovery we will be interrupted again.
+ * The masked almost full indication may also be set. Ignoring.
+ */
+ if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL))
+ return 0;
+
+ /* check if db_drop or overflow happened */
+ if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
+ DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
+ /* Obtain data about db drop/overflow */
+ first_drop_reason = qed_rd(p_hwfn, p_ptt,
+ DORQ_REG_DB_DROP_REASON) &
+ QED_DORQ_ATTENTION_REASON_MASK;
+ details = qed_rd(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS);
+ address = qed_rd(p_hwfn, p_ptt,
+ DORQ_REG_DB_DROP_DETAILS_ADDRESS);
+ all_drops_reason = qed_rd(p_hwfn, p_ptt,
+ DORQ_REG_DB_DROP_DETAILS_REASON);
+
+ /* Log info */
+ DP_NOTICE(p_hwfn->cdev,
+ "Doorbell drop occurred\n"
+ "Address\t\t0x%08x\t(second BAR address)\n"
+ "FID\t\t0x%04x\t\t(Opaque FID)\n"
+ "Size\t\t0x%04x\t\t(in bytes)\n"
+ "1st drop reason\t0x%08x\t(details on first drop since last handling)\n"
+ "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n",
+ address,
+ GET_FIELD(details, QED_DORQ_ATTENTION_OPAQUE),
+ GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
+ first_drop_reason, all_drops_reason);
+
+ rc = qed_db_rec_handler(p_hwfn, p_ptt);
+ qed_periodic_db_rec_start(p_hwfn);
+ if (rc)
+ return rc;
+
+ /* Clear the doorbell drop details and prepare for next drop */
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
+
+ /* Mark interrupt as handled (note: even if drop was due to a different
+ * reason than overflow we mark as handled)
+ */
+ qed_wr(p_hwfn,
+ p_ptt,
+ DORQ_REG_INT_STS_WR,
+ DORQ_REG_INT_STS_DB_DROP |
+ DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR);
+
+ /* If there are no indications other than drop indications, success */
+ if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP |
+ DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR |
+ DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0)
+ return 0;
}
+ /* Some other indication was present - non recoverable */
+ DP_INFO(p_hwfn, "DORQ fatal attention\n");
+
return -EINVAL;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 54b4ee0acfd7..d81a62ebd524 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -190,6 +190,16 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
*/
void qed_int_disable_post_isr_release(struct qed_dev *cdev);
+/**
+ * @brief - Doorbell Recovery handler.
+ * Run DB_REAL_DEAL doorbell recovery in case of PF overflow
+ * (and flush DORQ if needed), otherwise run DB_REC_ONCE.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
#define QED_CAU_DEF_RX_TIMER_RES 0
#define QED_CAU_DEF_TX_TIMER_RES 0
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index c6f4bab67a5f..90afd514ffe1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -1085,7 +1085,14 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
- return qed_spq_post(p_hwfn, p_ent, NULL);
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+
+ rc = qed_db_recovery_add(p_hwfn->cdev, p_tx->doorbell_addr,
+ &p_tx->db_msg, DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ return rc;
}
static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
@@ -1119,9 +1126,11 @@ static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
+ struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
+ qed_db_recovery_del(p_hwfn->cdev, p_tx->doorbell_addr, &p_tx->db_msg);
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -1542,6 +1551,13 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
qed_db_addr(p_ll2_conn->cid,
DQ_DEMS_LEGACY);
+ /* prepare db data */
+ SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+ SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_CORE_TX_BD_PROD_CMD);
+ p_tx->db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+
rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
if (rc)
@@ -1780,7 +1796,6 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
struct qed_ll2_tx_packet *p_pkt = NULL;
- struct core_db_data db_msg = { 0, 0, 0 };
u16 bd_prod;
/* If there are missing BDs, don't do anything now */
@@ -1809,24 +1824,19 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
}
- SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
- SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
- SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
- DQ_XCM_CORE_TX_BD_PROD_CMD);
- db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
- db_msg.spq_prod = cpu_to_le16(bd_prod);
+ p_tx->db_msg.spq_prod = cpu_to_le16(bd_prod);
/* Make sure the BDs data is updated before ringing the doorbell */
wmb();
- DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
+ DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&p_tx->db_msg));
DP_VERBOSE(p_hwfn,
(NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
p_ll2_conn->queue_id,
p_ll2_conn->cid,
- p_ll2_conn->input.conn_type, db_msg.spq_prod);
+ p_ll2_conn->input.conn_type, p_tx->db_msg.spq_prod);
}
int qed_ll2_prepare_tx_packet(void *cxt,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 1a5c1ae01474..5f01fbd3c073 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -103,6 +103,7 @@ struct qed_ll2_tx_queue {
struct qed_ll2_tx_packet cur_completing_packet;
u16 cur_completing_bd_idx;
void __iomem *doorbell_addr;
+ struct core_db_data db_msg;
u16 bds_idx;
u16 cur_send_frag_num;
u16 cur_completing_frag_num;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index fff7f04d4525..6adf5bda9811 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -966,9 +966,47 @@ static void qed_update_pf_params(struct qed_dev *cdev,
}
}
+#define QED_PERIODIC_DB_REC_COUNT 100
+#define QED_PERIODIC_DB_REC_INTERVAL_MS 100
+#define QED_PERIODIC_DB_REC_INTERVAL \
+ msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
+#define QED_PERIODIC_DB_REC_WAIT_COUNT 10
+#define QED_PERIODIC_DB_REC_WAIT_INTERVAL \
+ (QED_PERIODIC_DB_REC_INTERVAL_MS / QED_PERIODIC_DB_REC_WAIT_COUNT)
+
+static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
+ enum qed_slowpath_wq_flag wq_flag,
+ unsigned long delay)
+{
+ if (!hwfn->slowpath_wq_active)
+ return -EINVAL;
+
+ /* Memory barrier for setting atomic bit */
+ smp_mb__before_atomic();
+ set_bit(wq_flag, &hwfn->slowpath_task_flags);
+ smp_mb__after_atomic();
+ queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay);
+
+ return 0;
+}
+
+void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn)
+{
+ /* Reset periodic Doorbell Recovery counter */
+ p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT;
+
+ /* Don't schedule periodic Doorbell Recovery if already scheduled */
+ if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
+ &p_hwfn->slowpath_task_flags))
+ return;
+
+ qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC,
+ QED_PERIODIC_DB_REC_INTERVAL);
+}
+
static void qed_slowpath_wq_stop(struct qed_dev *cdev)
{
- int i;
+ int i, sleep_count = QED_PERIODIC_DB_REC_WAIT_COUNT;
if (IS_VF(cdev))
return;
@@ -977,6 +1015,15 @@ static void qed_slowpath_wq_stop(struct qed_dev *cdev)
if (!cdev->hwfns[i].slowpath_wq)
continue;
+ /* Stop queuing new delayed works */
+ cdev->hwfns[i].slowpath_wq_active = false;
+
+ /* Wait until the last periodic doorbell recovery is executed */
+ while (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
+ &cdev->hwfns[i].slowpath_task_flags) &&
+ sleep_count--)
+ msleep(QED_PERIODIC_DB_REC_WAIT_INTERVAL);
+
flush_workqueue(cdev->hwfns[i].slowpath_wq);
destroy_workqueue(cdev->hwfns[i].slowpath_wq);
}
@@ -989,7 +1036,10 @@ static void qed_slowpath_task(struct work_struct *work)
struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
if (!ptt) {
- queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
+ if (hwfn->slowpath_wq_active)
+ queue_delayed_work(hwfn->slowpath_wq,
+ &hwfn->slowpath_task, 0);
+
return;
}
@@ -997,6 +1047,15 @@ static void qed_slowpath_task(struct work_struct *work)
&hwfn->slowpath_task_flags))
qed_mfw_process_tlv_req(hwfn, ptt);
+ if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC,
+ &hwfn->slowpath_task_flags)) {
+ qed_db_rec_handler(hwfn, ptt);
+ if (hwfn->periodic_db_rec_count--)
+ qed_slowpath_delayed_work(hwfn,
+ QED_SLOWPATH_PERIODIC_DB_REC,
+ QED_PERIODIC_DB_REC_INTERVAL);
+ }
+
qed_ptt_release(hwfn, ptt);
}
@@ -1023,6 +1082,7 @@ static int qed_slowpath_wq_start(struct qed_dev *cdev)
}
INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
+ hwfn->slowpath_wq_active = true;
}
return 0;
@@ -1939,21 +1999,30 @@ exit:
* 0B | 0x3 [command index] |
* 4B | b'0: check_response? | b'1-31 reserved |
* 8B | File-type | reserved |
+ * 12B | Image length in bytes |
* \----------------------------------------------------------------------/
* Start a new file of the provided type
*/
static int qed_nvm_flash_image_file_start(struct qed_dev *cdev,
const u8 **data, bool *check_resp)
{
+ u32 file_type, file_size = 0;
int rc;
*data += 4;
*check_resp = !!(**data & BIT(0));
*data += 4;
+ file_type = **data;
DP_VERBOSE(cdev, NETIF_MSG_DRV,
- "About to start a new file of type %02x\n", **data);
- rc = qed_mcp_nvm_put_file_begin(cdev, **data);
+ "About to start a new file of type %02x\n", file_type);
+ if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) {
+ *data += 4;
+ file_size = *((u32 *)(*data));
+ }
+
+ rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type,
+ (u8 *)(&file_size), 4);
*data += 4;
return rc;
@@ -2315,6 +2384,8 @@ const struct qed_common_ops qed_common_ops_pass = {
.update_mac = &qed_update_mac,
.update_mtu = &qed_update_mtu,
.update_wol = &qed_update_wol,
+ .db_recovery_add = &qed_db_recovery_add,
+ .db_recovery_del = &qed_db_recovery_del,
.read_module_eeprom = &qed_read_module_eeprom,
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index a96364df4320..e7f18e34ff0d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1619,7 +1619,7 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_sp_pf_update_stag(p_hwfn);
}
- DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
/* Acknowledge the MFW */
@@ -1641,7 +1641,9 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >>
OEM_CFG_CHANNEL_TYPE_OFFSET;
if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
- DP_NOTICE(p_hwfn, "Incorrect UFP Channel type %d\n", val);
+ DP_NOTICE(p_hwfn,
+ "Incorrect UFP Channel type %d port_id 0x%02x\n",
+ val, MFW_PORT(p_hwfn));
val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET;
if (val == OEM_CFG_SCHED_TYPE_ETS) {
@@ -1650,7 +1652,9 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW;
} else {
p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN;
- DP_NOTICE(p_hwfn, "Unknown UFP scheduling mode %d\n", val);
+ DP_NOTICE(p_hwfn,
+ "Unknown UFP scheduling mode %d port_id 0x%02x\n",
+ val, MFW_PORT(p_hwfn));
}
qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
@@ -1665,13 +1669,15 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS;
} else {
p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN;
- DP_NOTICE(p_hwfn, "Unknown Host priority control %d\n", val);
+ DP_NOTICE(p_hwfn,
+ "Unknown Host priority control %d port_id 0x%02x\n",
+ val, MFW_PORT(p_hwfn));
}
DP_NOTICE(p_hwfn,
- "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
- p_hwfn->ufp_info.mode,
- p_hwfn->ufp_info.tc, p_hwfn->ufp_info.pri_type);
+ "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n",
+ p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
+ p_hwfn->ufp_info.pri_type, MFW_PORT(p_hwfn));
}
static int
@@ -2739,24 +2745,6 @@ int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf)
return 0;
}
-int qed_mcp_nvm_put_file_begin(struct qed_dev *cdev, u32 addr)
-{
- struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
- struct qed_ptt *p_ptt;
- u32 resp, param;
- int rc;
-
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt)
- return -EBUSY;
- rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
- &resp, &param);
- cdev->mcp_nvm_resp = resp;
- qed_ptt_release(p_hwfn, p_ptt);
-
- return rc;
-}
-
int qed_mcp_nvm_write(struct qed_dev *cdev,
u32 cmd, u32 addr, u8 *p_buf, u32 len)
{
@@ -2770,6 +2758,9 @@ int qed_mcp_nvm_write(struct qed_dev *cdev,
return -EBUSY;
switch (cmd) {
+ case QED_PUT_FILE_BEGIN:
+ nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
+ break;
case QED_PUT_FILE_DATA:
nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
break;
@@ -2782,10 +2773,14 @@ int qed_mcp_nvm_write(struct qed_dev *cdev,
goto out;
}
+ buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
while (buf_idx < len) {
- buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
- nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
- addr) + buf_idx;
+ if (cmd == QED_PUT_FILE_BEGIN)
+ nvm_offset = addr;
+ else
+ nvm_offset = ((buf_size <<
+ DRV_MB_PARAM_NVM_LEN_OFFSET) | addr) +
+ buf_idx;
rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
&resp, &param, buf_size,
(u32 *)&p_buf[buf_idx]);
@@ -2810,7 +2805,19 @@ int qed_mcp_nvm_write(struct qed_dev *cdev,
if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000)
usleep_range(1000, 2000);
- buf_idx += buf_size;
+ /* For MBI upgrade, MFW response includes the next buffer offset
+ * to be delivered to MFW.
+ */
+ if (param && cmd == QED_PUT_FILE_DATA) {
+ buf_idx = QED_MFW_GET_FIELD(param,
+ FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
+ buf_size = QED_MFW_GET_FIELD(param,
+ FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
+ } else {
+ buf_idx += buf_size;
+ buf_size = min_t(u32, (len - buf_idx),
+ MCP_DRV_NVM_BUF_LEN);
+ }
}
cdev->mcp_nvm_resp = resp;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 1adfe52b3905..eddf67798d6f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -543,16 +543,6 @@ int qed_mcp_nvm_write(struct qed_dev *cdev,
u32 cmd, u32 addr, u8 *p_buf, u32 len);
/**
- * @brief Put file begin
- *
- * @param cdev
- * @param addr - nvm offset
- *
- * @return int - 0 - operation was successful.
- */
-int qed_mcp_nvm_put_file_begin(struct qed_dev *cdev, u32 addr);
-
-/**
* @brief Check latest response
*
* @param cdev
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 2440970882c4..8939ed6e08b7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -1243,6 +1243,56 @@
0x1701534UL
#define TSEM_REG_DBG_FORCE_FRAME \
0x1701538UL
+#define DORQ_REG_PF_USAGE_CNT \
+ 0x1009c0UL
+#define DORQ_REG_PF_OVFL_STICKY \
+ 0x1009d0UL
+#define DORQ_REG_DPM_FORCE_ABORT \
+ 0x1009d8UL
+#define DORQ_REG_INT_STS \
+ 0x100180UL
+#define DORQ_REG_INT_STS_ADDRESS_ERROR \
+ (0x1UL << 0)
+#define DORQ_REG_INT_STS_WR \
+ 0x100188UL
+#define DORQ_REG_DB_DROP_DETAILS_REL \
+ 0x100a28UL
+#define DORQ_REG_INT_STS_ADDRESS_ERROR_SHIFT \
+ 0
+#define DORQ_REG_INT_STS_DB_DROP \
+ (0x1UL << 1)
+#define DORQ_REG_INT_STS_DB_DROP_SHIFT \
+ 1
+#define DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR \
+ (0x1UL << 2)
+#define DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR_SHIFT \
+ 2
+#define DORQ_REG_INT_STS_DORQ_FIFO_AFULL\
+ (0x1UL << 3)
+#define DORQ_REG_INT_STS_DORQ_FIFO_AFULL_SHIFT \
+ 3
+#define DORQ_REG_INT_STS_CFC_BYP_VALIDATION_ERR \
+ (0x1UL << 4)
+#define DORQ_REG_INT_STS_CFC_BYP_VALIDATION_ERR_SHIFT \
+ 4
+#define DORQ_REG_INT_STS_CFC_LD_RESP_ERR \
+ (0x1UL << 5)
+#define DORQ_REG_INT_STS_CFC_LD_RESP_ERR_SHIFT \
+ 5
+#define DORQ_REG_INT_STS_XCM_DONE_CNT_ERR \
+ (0x1UL << 6)
+#define DORQ_REG_INT_STS_XCM_DONE_CNT_ERR_SHIFT \
+ 6
+#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_OVFL_ERR \
+ (0x1UL << 7)
+#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_OVFL_ERR_SHIFT \
+ 7
+#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_UNDER_ERR \
+ (0x1UL << 8)
+#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_UNDER_ERR_SHIFT \
+ 8
+#define DORQ_REG_DB_DROP_DETAILS_REASON \
+ 0x100a20UL
#define MSEM_REG_DBG_SELECT \
0x1801528UL
#define MSEM_REG_DBG_DWORD_ENABLE \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 3157c0d99441..4179c9013fc6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -227,7 +227,9 @@ struct qed_spq {
u32 comp_count;
u32 cid;
- qed_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE];
+ u32 db_addr_offset;
+ struct core_db_data db_data;
+ qed_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE];
};
/**
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 0a9c5bb0fa48..eb88bbc6b193 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -252,9 +252,9 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
{
struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
+ struct core_db_data *p_db_data = &p_spq->db_data;
u16 echo = qed_chain_get_prod_idx(p_chain);
struct slow_path_element *elem;
- struct core_db_data db;
p_ent->elem.hdr.echo = cpu_to_le16(echo);
elem = qed_chain_produce(p_chain);
@@ -266,27 +266,22 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
*elem = p_ent->elem; /* struct assignment */
/* send a doorbell on the slow hwfn session */
- memset(&db, 0, sizeof(db));
- SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
- SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
- SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
- DQ_XCM_CORE_SPQ_PROD_CMD);
- db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
- db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
+ p_db_data->spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
/* make sure the SPQE is updated before the doorbell */
wmb();
- DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
+ DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
/* make sure doorbell is rang */
wmb();
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
- qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
- p_spq->cid, db.params, db.agg_flags,
- qed_chain_get_prod_idx(p_chain));
+ p_spq->db_addr_offset,
+ p_spq->cid,
+ p_db_data->params,
+ p_db_data->agg_flags, qed_chain_get_prod_idx(p_chain));
return 0;
}
@@ -490,8 +485,11 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
struct qed_spq_entry *p_virt = NULL;
+ struct core_db_data *p_db_data;
+ void __iomem *db_addr;
dma_addr_t p_phys = 0;
u32 i, capacity;
+ int rc;
INIT_LIST_HEAD(&p_spq->pending);
INIT_LIST_HEAD(&p_spq->completion_pending);
@@ -528,6 +526,25 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn)
/* reset the chain itself */
qed_chain_reset(&p_spq->chain);
+
+ /* Initialize the address/data of the SPQ doorbell */
+ p_spq->db_addr_offset = qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY);
+ p_db_data = &p_spq->db_data;
+ memset(p_db_data, 0, sizeof(*p_db_data));
+ SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
+ SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_CORE_SPQ_PROD_CMD);
+ p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+
+ /* Register the SPQ doorbell with the doorbell recovery mechanism */
+ db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
+ p_spq->db_addr_offset);
+ rc = qed_db_recovery_add(p_hwfn->cdev, db_addr, &p_spq->db_data,
+ DB_REC_WIDTH_32B, DB_REC_KERNEL);
+ if (rc)
+ DP_INFO(p_hwfn,
+ "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
}
int qed_spq_alloc(struct qed_hwfn *p_hwfn)
@@ -575,11 +592,17 @@ spq_allocate_fail:
void qed_spq_free(struct qed_hwfn *p_hwfn)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
+ void __iomem *db_addr;
u32 capacity;
if (!p_spq)
return;
+ /* Delete the SPQ doorbell from the doorbell recovery mechanism */
+ db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
+ p_spq->db_addr_offset);
+ qed_db_recovery_del(p_hwfn->cdev, db_addr, &p_spq->db_data);
+
if (p_spq->p_virt) {
capacity = qed_chain_get_capacity(&p_spq->chain);
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index de98a974673b..613249d1e967 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -168,6 +168,13 @@ struct qede_ptp;
#define QEDE_RFS_MAX_FLTR 256
+enum qede_flags_bit {
+ QEDE_FLAGS_IS_VF = 0,
+ QEDE_FLAGS_LINK_REQUESTED,
+ QEDE_FLAGS_PTP_TX_IN_PRORGESS,
+ QEDE_FLAGS_TX_TIMESTAMPING_EN
+};
+
struct qede_dev {
struct qed_dev *cdev;
struct net_device *ndev;
@@ -177,10 +184,7 @@ struct qede_dev {
u8 dp_level;
unsigned long flags;
-#define QEDE_FLAG_IS_VF BIT(0)
-#define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF))
-#define QEDE_TX_TIMESTAMPING_EN BIT(1)
-#define QEDE_FLAGS_PTP_TX_IN_PRORGESS BIT(2)
+#define IS_VF(edev) (test_bit(QEDE_FLAGS_IS_VF, &(edev)->flags))
const struct qed_eth_ops *ops;
struct qede_ptp *ptp;
@@ -377,6 +381,7 @@ struct qede_tx_queue {
u64 xmit_pkts;
u64 stopped_cnt;
+ u64 tx_mem_alloc_err;
__le16 *hw_cons_ptr;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 8cbbd628fd73..16331c6c6fa7 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -73,6 +73,7 @@ static const struct {
} qede_tqstats_arr[] = {
QEDE_TQSTAT(xmit_pkts),
QEDE_TQSTAT(stopped_cnt),
+ QEDE_TQSTAT(tx_mem_alloc_err),
};
#define QEDE_STAT_OFFSET(stat_name, type, base) \
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 1a78027de071..bdf816fe5a16 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1466,8 +1466,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
if (qede_pkt_req_lin(skb, xmit_type)) {
if (skb_linearize(skb)) {
- DP_NOTICE(edev,
- "SKB linearization failed - silently dropping this SKB\n");
+ txq->tx_mem_alloc_err++;
+
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 46d0f2eaa0c0..5a74fcbdbc2b 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1086,7 +1086,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
}
if (is_vf)
- edev->flags |= QEDE_FLAG_IS_VF;
+ set_bit(QEDE_FLAGS_IS_VF, &edev->flags);
qede_init_ndev(edev);
@@ -1774,6 +1774,10 @@ static int qede_drain_txq(struct qede_dev *edev,
static int qede_stop_txq(struct qede_dev *edev,
struct qede_tx_queue *txq, int rss_id)
{
+ /* delete doorbell from doorbell recovery mechanism */
+ edev->ops->common->db_recovery_del(edev->cdev, txq->doorbell_addr,
+ &txq->tx_db);
+
return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
}
@@ -1910,6 +1914,11 @@ static int qede_start_txq(struct qede_dev *edev,
DQ_XCM_ETH_TX_BD_PROD_CMD);
txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+ /* register doorbell with doorbell recovery mechanism */
+ rc = edev->ops->common->db_recovery_add(edev->cdev, txq->doorbell_addr,
+ &txq->tx_db, DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+
return rc;
}
@@ -2057,6 +2066,8 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
if (!is_locked)
__qede_lock(edev);
+ clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
+
edev->state = QEDE_STATE_CLOSED;
qede_rdma_dev_event_close(edev);
@@ -2163,6 +2174,8 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
/* Program un-configured VLANs */
qede_configure_vlan_filters(edev);
+ set_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
+
/* Ask for link-up using current configuration */
memset(&link_params, 0, sizeof(link_params));
link_params.link_up = true;
@@ -2258,8 +2271,8 @@ static void qede_link_update(void *dev, struct qed_link_output *link)
{
struct qede_dev *edev = dev;
- if (!netif_running(edev->ndev)) {
- DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
+ if (!test_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags)) {
+ DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not ready\n");
return;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 013ff567283c..5f3f42a25361 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -223,12 +223,12 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev)
switch (ptp->tx_type) {
case HWTSTAMP_TX_ON:
- edev->flags |= QEDE_TX_TIMESTAMPING_EN;
+ set_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags);
tx_type = QED_PTP_HWTSTAMP_TX_ON;
break;
case HWTSTAMP_TX_OFF:
- edev->flags &= ~QEDE_TX_TIMESTAMPING_EN;
+ clear_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags);
tx_type = QED_PTP_HWTSTAMP_TX_OFF;
break;
@@ -518,7 +518,7 @@ void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags))
return;
- if (unlikely(!(edev->flags & QEDE_TX_TIMESTAMPING_EN))) {
+ if (unlikely(!test_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags))) {
DP_NOTICE(edev,
"Tx timestamping was not enabled, this packet will not be timestamped\n");
} else if (unlikely(ptp->tx_skb)) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index d42ba2293d8c..16d0479f6891 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2993,10 +2993,8 @@ int qlcnic_check_temp(struct qlcnic_adapter *adapter)
static inline void dump_tx_ring_desc(struct qlcnic_host_tx_ring *tx_ring)
{
int i;
- struct cmd_desc_type0 *tx_desc_info;
for (i = 0; i < tx_ring->num_desc; i++) {
- tx_desc_info = &tx_ring->desc_head[i];
pr_info("TX Desc: %d\n", i);
print_hex_dump(KERN_INFO, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
&tx_ring->desc_head[i],
@@ -4008,19 +4006,12 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
int queue_type)
{
struct net_device *netdev = adapter->netdev;
- u8 max_hw_rings = 0;
char buf[8];
- int cur_rings;
- if (queue_type == QLCNIC_RX_QUEUE) {
- max_hw_rings = adapter->max_sds_rings;
- cur_rings = adapter->drv_sds_rings;
+ if (queue_type == QLCNIC_RX_QUEUE)
strcpy(buf, "SDS");
- } else if (queue_type == QLCNIC_TX_QUEUE) {
- max_hw_rings = adapter->max_tx_rings;
- cur_rings = adapter->drv_tx_rings;
+ else
strcpy(buf, "Tx");
- }
if (!is_power_of_2(ring_cnt)) {
netdev_err(netdev, "%s rings value should be a power of 2\n",
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 50eaafa3eaba..af3b037fa442 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -1067,9 +1067,6 @@ static int qlcnic_sriov_pf_cfg_ip_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_adapter *adapter = vf->adapter;
int err = -EIO;
- u8 op;
-
- op = cmd->req.arg[1] & 0xff;
cmd->req.arg[1] |= vf->vp->handle << 16;
cmd->req.arg[1] |= BIT_31;
@@ -1339,14 +1336,13 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
{
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_vport *vp = vf->vp;
- u8 cmd_op, mode = vp->vlan_mode;
+ u8 mode = vp->vlan_mode;
struct qlcnic_adapter *adapter;
struct qlcnic_sriov *sriov;
adapter = vf->adapter;
sriov = adapter->ahw->sriov;
- cmd_op = trans->req_hdr->cmd_op;
cmd->rsp.arg[0] |= 1 << 25;
/* For 84xx adapter in case of PVID , PFD should send vlan mode as
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index a9f1bc013364..bcb890b18a94 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -61,6 +61,7 @@ static const char qcaspi_gstrings_stats[][ETH_GSTRING_LEN] = {
"Transmit ring full",
"SPI errors",
"Write verify errors",
+ "Buffer available errors",
};
#ifdef CONFIG_DEBUG_FS
@@ -125,19 +126,7 @@ qcaspi_info_show(struct seq_file *s, void *what)
return 0;
}
-
-static int
-qcaspi_info_open(struct inode *inode, struct file *file)
-{
- return single_open(file, qcaspi_info_show, inode->i_private);
-}
-
-static const struct file_operations qcaspi_info_ops = {
- .open = qcaspi_info_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(qcaspi_info);
void
qcaspi_init_device_debugfs(struct qcaspi *qca)
@@ -153,7 +142,7 @@ qcaspi_init_device_debugfs(struct qcaspi *qca)
return;
}
debugfs_create_file("info", S_IFREG | 0444, device_root, qca,
- &qcaspi_info_ops);
+ &qcaspi_info_fops);
}
void
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index d5310504f436..97f92953bdb9 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -289,6 +289,14 @@ qcaspi_transmit(struct qcaspi *qca)
qcaspi_read_register(qca, SPI_REG_WRBUF_SPC_AVA, &available);
+ if (available > QCASPI_HW_BUF_LEN) {
+ /* This could only happen by interferences on the SPI line.
+ * So retry later ...
+ */
+ qca->stats.buf_avail_err++;
+ return -1;
+ }
+
while (qca->txr.skb[qca->txr.head]) {
pkt_len = qca->txr.skb[qca->txr.head]->len + QCASPI_HW_PKT_LEN;
@@ -355,7 +363,13 @@ qcaspi_receive(struct qcaspi *qca)
netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n",
available);
- if (available == 0) {
+ if (available > QCASPI_HW_BUF_LEN) {
+ /* This could only happen by interferences on the SPI line.
+ * So retry later ...
+ */
+ qca->stats.buf_avail_err++;
+ return -1;
+ } else if (available == 0) {
netdev_dbg(net_dev, "qcaspi_receive called without any data being available!\n");
return -1;
}
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index 2d2c49726492..eb9af45fcc5e 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -74,6 +74,7 @@ struct qcaspi_stats {
u64 ring_full;
u64 spi_err;
u64 write_verify_failed;
+ u64 buf_avail_err;
};
struct qcaspi {
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 5f4e447c5dce..b8bbee645f51 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -301,10 +301,13 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
struct rmnet_port *port;
u16 mux_id;
+ if (!dev)
+ return -ENODEV;
+
real_dev = __dev_get_by_index(dev_net(dev),
nla_get_u32(tb[IFLA_LINK]));
- if (!real_dev || !dev || !rmnet_is_real_dev_registered(real_dev))
+ if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
return -ENODEV;
port = rmnet_get_port_rtnl(real_dev);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index 3ee8ae9b6838..f6cf59aee212 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -20,17 +20,12 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
struct rmnet_port *port,
int enable)
{
- struct rmnet_map_control_command *cmd;
struct rmnet_endpoint *ep;
struct net_device *vnd;
- u16 ip_family;
- u16 fc_seq;
- u32 qos_id;
u8 mux_id;
int r;
mux_id = RMNET_MAP_GET_MUX_ID(skb);
- cmd = RMNET_MAP_GET_CMD_START(skb);
if (mux_id >= RMNET_MAX_LOGICAL_EP) {
kfree_skb(skb);
@@ -45,10 +40,6 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
vnd = ep->egress_dev;
- ip_family = cmd->flow_control.ip_family;
- fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
- qos_id = ntohl(cmd->flow_control.qos_id);
-
/* Ignore the ip family and pass the sequence number for both v4 and v6
* sequence. User space does not support creating dedicated flows for
* the 2 protocols
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index fe2d754c6c8e..99bc3de906e2 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -56,13 +56,6 @@
#define R8169_MSG_DEFAULT \
(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
-#define TX_SLOTS_AVAIL(tp) \
- (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
-
-/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
-#define TX_FRAGS_READY_FOR(tp,nr_frags) \
- (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
-
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
static const int multicast_filter_limit = 32;
@@ -212,24 +205,24 @@ enum cfg_version {
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
- { PCI_DEVICE(PCI_VENDOR_ID_NCUBE, 0x8168), 0, 0, RTL_CFG_1 },
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
- { PCI_VENDOR_ID_DLINK, 0x4300,
- PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
- { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
- { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
- { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
- { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
+ { PCI_VDEVICE(REALTEK, 0x8129), RTL_CFG_0 },
+ { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_2 },
+ { PCI_VDEVICE(REALTEK, 0x8161), RTL_CFG_1 },
+ { PCI_VDEVICE(REALTEK, 0x8167), RTL_CFG_0 },
+ { PCI_VDEVICE(REALTEK, 0x8168), RTL_CFG_1 },
+ { PCI_VDEVICE(NCUBE, 0x8168), RTL_CFG_1 },
+ { PCI_VDEVICE(REALTEK, 0x8169), RTL_CFG_0 },
+ { PCI_VENDOR_ID_DLINK, 0x4300,
+ PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
+ { PCI_VDEVICE(DLINK, 0x4300), RTL_CFG_0 },
+ { PCI_VDEVICE(DLINK, 0x4302), RTL_CFG_0 },
+ { PCI_VDEVICE(AT, 0xc107), RTL_CFG_0 },
+ { PCI_VDEVICE(USR, 0x0116), RTL_CFG_0 },
{ PCI_VENDOR_ID_LINKSYS, 0x1032,
PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
{ 0x0001, 0x8168,
PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
- {0,},
+ {}
};
MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
@@ -603,7 +596,6 @@ struct RxDesc {
struct ring_info {
struct sk_buff *skb;
u32 len;
- u8 __pad[sizeof(void *) - sizeof(u32)];
};
struct rtl8169_counters {
@@ -661,7 +653,7 @@ struct rtl8169_private {
struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
u16 cp_cmd;
- u16 event_slow;
+ u16 irq_mask;
const struct rtl_coalesce_info *coalesce_info;
struct clk *clk;
@@ -1102,23 +1094,6 @@ static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
return rtl_eri_read(tp, reg, ERIAR_OOB);
}
-static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
-{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- return r8168dp_ocp_read(tp, mask, reg);
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
- return r8168ep_ocp_read(tp, mask, reg);
- default:
- BUG();
- return ~0;
- }
-}
-
static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
u32 data)
{
@@ -1134,30 +1109,11 @@ static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
data, ERIAR_OOB);
}
-static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
-{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- r8168dp_ocp_write(tp, mask, reg, data);
- break;
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
- r8168ep_ocp_write(tp, mask, reg, data);
- break;
- default:
- BUG();
- break;
- }
-}
-
-static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
+static void r8168dp_oob_notify(struct rtl8169_private *tp, u8 cmd)
{
rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd, ERIAR_EXGMAC);
- ocp_write(tp, 0x1, 0x30, 0x00000001);
+ r8168dp_ocp_write(tp, 0x1, 0x30, 0x00000001);
}
#define OOB_CMD_RESET 0x00
@@ -1169,18 +1125,18 @@ static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
}
-DECLARE_RTL_COND(rtl_ocp_read_cond)
+DECLARE_RTL_COND(rtl_dp_ocp_read_cond)
{
u16 reg;
reg = rtl8168_get_ocp_reg(tp);
- return ocp_read(tp, 0x0f, reg) & 0x00000800;
+ return r8168dp_ocp_read(tp, 0x0f, reg) & 0x00000800;
}
DECLARE_RTL_COND(rtl_ep_ocp_read_cond)
{
- return ocp_read(tp, 0x0f, 0x124) & 0x00000001;
+ return r8168ep_ocp_read(tp, 0x0f, 0x124) & 0x00000001;
}
DECLARE_RTL_COND(rtl_ocp_tx_cond)
@@ -1198,14 +1154,15 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
static void rtl8168dp_driver_start(struct rtl8169_private *tp)
{
- rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
- rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
+ r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START);
+ rtl_msleep_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10, 10);
}
static void rtl8168ep_driver_start(struct rtl8169_private *tp)
{
- ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
- ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01);
+ r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
+ r8168ep_ocp_write(tp, 0x01, 0x30,
+ r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01);
rtl_msleep_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10, 10);
}
@@ -1230,15 +1187,16 @@ static void rtl8168_driver_start(struct rtl8169_private *tp)
static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
{
- rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
- rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
+ r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP);
+ rtl_msleep_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10, 10);
}
static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
{
rtl8168ep_stop_cmac(tp);
- ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
- ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01);
+ r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
+ r8168ep_ocp_write(tp, 0x01, 0x30,
+ r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01);
rtl_msleep_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10, 10);
}
@@ -1265,12 +1223,12 @@ static bool r8168dp_check_dash(struct rtl8169_private *tp)
{
u16 reg = rtl8168_get_ocp_reg(tp);
- return !!(ocp_read(tp, 0x0f, reg) & 0x00008000);
+ return !!(r8168dp_ocp_read(tp, 0x0f, reg) & 0x00008000);
}
static bool r8168ep_check_dash(struct rtl8169_private *tp)
{
- return !!(ocp_read(tp, 0x0f, 0x128) & 0x00000001);
+ return !!(r8168ep_ocp_read(tp, 0x0f, 0x128) & 0x00000001);
}
static bool r8168_check_dash(struct rtl8169_private *tp)
@@ -1325,27 +1283,20 @@ static u16 rtl_get_events(struct rtl8169_private *tp)
static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
{
RTL_W16(tp, IntrStatus, bits);
- mmiowb();
}
static void rtl_irq_disable(struct rtl8169_private *tp)
{
RTL_W16(tp, IntrMask, 0);
- mmiowb();
-}
-
-static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
-{
- RTL_W16(tp, IntrMask, bits);
}
#define RTL_EVENT_NAPI_RX (RxOK | RxErr)
#define RTL_EVENT_NAPI_TX (TxOK | TxErr)
#define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
-static void rtl_irq_enable_all(struct rtl8169_private *tp)
+static void rtl_irq_enable(struct rtl8169_private *tp)
{
- rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
+ RTL_W16(tp, IntrMask, tp->irq_mask);
}
static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
@@ -2051,8 +2002,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
-static void rtl8169_get_mac_version(struct rtl8169_private *tp,
- u8 default_version)
+static void rtl8169_get_mac_version(struct rtl8169_private *tp)
{
/*
* The driver currently handles the 8168Bf and the 8168Be identically
@@ -2066,120 +2016,107 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
* (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
*/
static const struct rtl_mac_info {
- u32 mask;
- u32 val;
- int mac_version;
+ u16 mask;
+ u16 val;
+ u16 mac_version;
} mac_info[] = {
/* 8168EP family. */
- { 0x7cf00000, 0x50200000, RTL_GIGA_MAC_VER_51 },
- { 0x7cf00000, 0x50100000, RTL_GIGA_MAC_VER_50 },
- { 0x7cf00000, 0x50000000, RTL_GIGA_MAC_VER_49 },
+ { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 },
+ { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 },
+ { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 },
/* 8168H family. */
- { 0x7cf00000, 0x54100000, RTL_GIGA_MAC_VER_46 },
- { 0x7cf00000, 0x54000000, RTL_GIGA_MAC_VER_45 },
+ { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46 },
+ { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 },
/* 8168G family. */
- { 0x7cf00000, 0x5c800000, RTL_GIGA_MAC_VER_44 },
- { 0x7cf00000, 0x50900000, RTL_GIGA_MAC_VER_42 },
- { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
- { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
+ { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 },
+ { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42 },
+ { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 },
+ { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40 },
/* 8168F family. */
- { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
- { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
- { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
+ { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 },
+ { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 },
+ { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 },
/* 8168E family. */
- { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
- { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
- { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
+ { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34 },
+ { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32 },
+ { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33 },
/* 8168D family. */
- { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
- { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
+ { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25 },
+ { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26 },
/* 8168DP family. */
- { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
- { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
- { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
+ { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 },
+ { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 },
+ { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31 },
/* 8168C family. */
- { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
- { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
- { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
- { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
- { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
- { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
- { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
+ { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23 },
+ { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18 },
+ { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24 },
+ { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19 },
+ { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20 },
+ { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21 },
+ { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22 },
/* 8168B family. */
- { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
- { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
- { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
+ { 0x7cf, 0x380, RTL_GIGA_MAC_VER_12 },
+ { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 },
+ { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 },
/* 8101 family. */
- { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
- { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
- { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
- { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
- { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
- { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
- { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
- { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
- { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
- { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
- { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
- { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
- { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
- { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
+ { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39 },
+ { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37 },
+ { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29 },
+ { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30 },
+ { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08 },
+ { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08 },
+ { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 },
+ { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 },
+ { 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 },
+ { 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 },
+ { 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 },
+ { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 },
+ { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 },
+ { 0x7c8, 0x340, RTL_GIGA_MAC_VER_16 },
/* FIXME: where did these entries come from ? -- FR */
- { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
- { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
+ { 0xfc8, 0x388, RTL_GIGA_MAC_VER_15 },
+ { 0xfc8, 0x308, RTL_GIGA_MAC_VER_14 },
/* 8110 family. */
- { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
- { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
- { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
- { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
- { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
- { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
+ { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 },
+ { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05 },
+ { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04 },
+ { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03 },
+ { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02 },
+ { 0xfc8, 0x000, RTL_GIGA_MAC_VER_01 },
/* Catch-all */
- { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
+ { 0x000, 0x000, RTL_GIGA_MAC_NONE }
};
const struct rtl_mac_info *p = mac_info;
- u32 reg;
+ u16 reg = RTL_R32(tp, TxConfig) >> 20;
- reg = RTL_R32(tp, TxConfig);
while ((reg & p->mask) != p->val)
p++;
tp->mac_version = p->mac_version;
if (tp->mac_version == RTL_GIGA_MAC_NONE) {
- dev_notice(tp_to_dev(tp),
- "unknown MAC, using family default\n");
- tp->mac_version = default_version;
- } else if (tp->mac_version == RTL_GIGA_MAC_VER_42) {
- tp->mac_version = tp->supports_gmii ?
- RTL_GIGA_MAC_VER_42 :
- RTL_GIGA_MAC_VER_43;
- } else if (tp->mac_version == RTL_GIGA_MAC_VER_45) {
- tp->mac_version = tp->supports_gmii ?
- RTL_GIGA_MAC_VER_45 :
- RTL_GIGA_MAC_VER_47;
- } else if (tp->mac_version == RTL_GIGA_MAC_VER_46) {
- tp->mac_version = tp->supports_gmii ?
- RTL_GIGA_MAC_VER_46 :
- RTL_GIGA_MAC_VER_48;
+ dev_err(tp_to_dev(tp), "unknown chip XID %03x\n", reg & 0xfcf);
+ } else if (!tp->supports_gmii) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_42)
+ tp->mac_version = RTL_GIGA_MAC_VER_43;
+ else if (tp->mac_version == RTL_GIGA_MAC_VER_45)
+ tp->mac_version = RTL_GIGA_MAC_VER_47;
+ else if (tp->mac_version == RTL_GIGA_MAC_VER_46)
+ tp->mac_version = RTL_GIGA_MAC_VER_48;
}
}
-static void rtl8169_print_mac_version(struct rtl8169_private *tp)
-{
- netif_dbg(tp, drv, tp->dev, "mac_version = 0x%02x\n", tp->mac_version);
-}
-
struct phy_reg {
u16 reg;
u16 val;
@@ -3902,8 +3839,6 @@ static void rtl_hw_phy_config(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- rtl8169_print_mac_version(tp);
-
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_01:
break;
@@ -4643,7 +4578,7 @@ static void rtl_hw_start(struct rtl8169_private *tp)
rtl_set_rx_mode(tp->dev);
/* no early-rx interrupts */
RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
- rtl_irq_enable_all(tp);
+ rtl_irq_enable(tp);
}
static void rtl_hw_start_8169(struct rtl8169_private *tp)
@@ -5394,8 +5329,8 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
/* Work around for RxFIFO overflow. */
if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
- tp->event_slow |= RxFIFOOver | PCSTimeout;
- tp->event_slow &= ~RxOverflow;
+ tp->irq_mask |= RxFIFOOver;
+ tp->irq_mask &= ~RxOverflow;
}
switch (tp->mac_version) {
@@ -5632,7 +5567,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
static void rtl_hw_start_8101(struct rtl8169_private *tp)
{
if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
- tp->event_slow &= ~RxFIFOOver;
+ tp->irq_mask &= ~RxFIFOOver;
if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
tp->mac_version == RTL_GIGA_MAC_VER_16)
@@ -5888,6 +5823,16 @@ static void rtl8169_tx_timeout(struct net_device *dev)
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
+static __le32 rtl8169_get_txd_opts1(u32 opts0, u32 len, unsigned int entry)
+{
+ u32 status = opts0 | len;
+
+ if (entry == NUM_TX_DESC - 1)
+ status |= RingEnd;
+
+ return cpu_to_le32(status);
+}
+
static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
u32 *opts)
{
@@ -5900,7 +5845,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
const skb_frag_t *frag = info->frags + cur_frag;
dma_addr_t mapping;
- u32 status, len;
+ u32 len;
void *addr;
entry = (entry + 1) % NUM_TX_DESC;
@@ -5916,11 +5861,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
goto err_out;
}
- /* Anti gcc 2.95.3 bugware (sic) */
- status = opts[0] | len |
- (RingEnd * !((entry + 1) % NUM_TX_DESC));
-
- txd->opts1 = cpu_to_le32(status);
+ txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry);
txd->opts2 = cpu_to_le32(opts[1]);
txd->addr = cpu_to_le64(mapping);
@@ -6108,6 +6049,15 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
return true;
}
+static bool rtl_tx_slots_avail(struct rtl8169_private *tp,
+ unsigned int nr_frags)
+{
+ unsigned int slots_avail = tp->dirty_tx + NUM_TX_DESC - tp->cur_tx;
+
+ /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
+ return slots_avail > nr_frags;
+}
+
static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -6116,11 +6066,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct TxDesc *txd = tp->TxDescArray + entry;
struct device *d = tp_to_dev(tp);
dma_addr_t mapping;
- u32 status, len;
- u32 opts[2];
+ u32 opts[2], len;
+ bool stop_queue;
int frags;
- if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
+ if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) {
netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
goto err_stop_0;
}
@@ -6159,32 +6109,26 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd->opts2 = cpu_to_le32(opts[1]);
- netdev_sent_queue(dev, skb->len);
-
skb_tx_timestamp(skb);
/* Force memory writes to complete before releasing descriptor */
dma_wmb();
- /* Anti gcc 2.95.3 bugware (sic) */
- status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
- txd->opts1 = cpu_to_le32(status);
+ txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry);
/* Force all memory writes to complete before notifying device */
wmb();
tp->cur_tx += frags + 1;
- RTL_W8(tp, TxPoll, NPQ);
+ stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS);
+ if (unlikely(stop_queue))
+ netif_stop_queue(dev);
- mmiowb();
+ if (__netdev_sent_queue(dev, skb->len, skb->xmit_more))
+ RTL_W8(tp, TxPoll, NPQ);
- if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
- /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
- * not miss a ring update when it notices a stopped queue.
- */
- smp_wmb();
- netif_stop_queue(dev);
+ if (unlikely(stop_queue)) {
/* Sync with rtl_tx:
* - publish queue status and cur_tx ring index (write barrier)
* - refresh dirty_tx ring index (read barrier).
@@ -6193,7 +6137,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
* can't.
*/
smp_mb();
- if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
+ if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS))
netif_wake_queue(dev);
}
@@ -6257,7 +6201,8 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
-static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
+static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
+ int budget)
{
unsigned int dirty_tx, tx_left, bytes_compl = 0, pkts_compl = 0;
@@ -6285,7 +6230,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
if (status & LastFrag) {
pkts_compl++;
bytes_compl += tx_skb->skb->len;
- dev_consume_skb_any(tx_skb->skb);
+ napi_consume_skb(tx_skb->skb, budget);
tx_skb->skb = NULL;
}
dirty_tx++;
@@ -6310,7 +6255,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
*/
smp_mb();
if (netif_queue_stopped(dev) &&
- TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
+ rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
netif_wake_queue(dev);
}
/*
@@ -6460,8 +6405,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
{
struct rtl8169_private *tp = dev_instance;
u16 status = rtl_get_events(tp);
+ u16 irq_mask = RTL_R16(tp, IntrMask);
- if (status == 0xffff || !(status & (RTL_EVENT_NAPI | tp->event_slow)))
+ if (status == 0xffff || !(status & irq_mask))
return IRQ_NONE;
if (unlikely(status & SYSErr)) {
@@ -6528,13 +6474,11 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
work_done = rtl_rx(dev, tp, (u32) budget);
- rtl_tx(dev, tp);
+ rtl_tx(dev, tp, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
-
- rtl_irq_enable_all(tp);
- mmiowb();
+ rtl_irq_enable(tp);
}
return work_done;
@@ -6584,7 +6528,7 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
phy_set_max_speed(phydev, SPEED_100);
/* Ensure to advertise everything, incl. pause */
- phydev->advertising = phydev->supported;
+ linkmode_copy(phydev->advertising, phydev->supported);
phy_attached_info(phydev);
@@ -6824,8 +6768,7 @@ static void rtl8169_net_suspend(struct net_device *dev)
static int rtl8169_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct rtl8169_private *tp = netdev_priv(dev);
rtl8169_net_suspend(dev);
@@ -6855,8 +6798,7 @@ static void __rtl8169_resume(struct net_device *dev)
static int rtl8169_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct rtl8169_private *tp = netdev_priv(dev);
clk_prepare_enable(tp->clk);
@@ -6869,8 +6811,7 @@ static int rtl8169_resume(struct device *device)
static int rtl8169_runtime_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct rtl8169_private *tp = netdev_priv(dev);
if (!tp->TxDescArray)
@@ -6891,8 +6832,7 @@ static int rtl8169_runtime_suspend(struct device *device)
static int rtl8169_runtime_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct rtl8169_private *tp = netdev_priv(dev);
rtl_rar_set(tp, dev->dev_addr);
@@ -6910,8 +6850,7 @@ static int rtl8169_runtime_resume(struct device *device)
static int rtl8169_runtime_idle(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
if (!netif_running(dev) || !netif_carrier_ok(dev))
pm_schedule_suspend(device, 10000);
@@ -7023,31 +6962,26 @@ static const struct net_device_ops rtl_netdev_ops = {
static const struct rtl_cfg_info {
void (*hw_start)(struct rtl8169_private *tp);
- u16 event_slow;
+ u16 irq_mask;
unsigned int has_gmii:1;
const struct rtl_coalesce_info *coalesce_info;
- u8 default_ver;
} rtl_cfg_infos [] = {
[RTL_CFG_0] = {
.hw_start = rtl_hw_start_8169,
- .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
+ .irq_mask = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
.has_gmii = 1,
.coalesce_info = rtl_coalesce_info_8169,
- .default_ver = RTL_GIGA_MAC_VER_01,
},
[RTL_CFG_1] = {
.hw_start = rtl_hw_start_8168,
- .event_slow = SYSErr | LinkChg | RxOverflow,
+ .irq_mask = LinkChg | RxOverflow,
.has_gmii = 1,
.coalesce_info = rtl_coalesce_info_8168_8136,
- .default_ver = RTL_GIGA_MAC_VER_11,
},
[RTL_CFG_2] = {
.hw_start = rtl_hw_start_8101,
- .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
- PCSTimeout,
+ .irq_mask = LinkChg | RxOverflow | RxFIFOOver,
.coalesce_info = rtl_coalesce_info_8168_8136,
- .default_ver = RTL_GIGA_MAC_VER_13,
}
};
@@ -7309,11 +7243,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->mmio_addr = pcim_iomap_table(pdev)[region];
- if (!pci_is_pcie(pdev))
- dev_info(&pdev->dev, "not PCI Express\n");
-
/* Identify chip attached to board */
- rtl8169_get_mac_version(tp, cfg->default_ver);
+ rtl8169_get_mac_version(tp);
+ if (tp->mac_version == RTL_GIGA_MAC_NONE)
+ return -ENODEV;
if (rtl_tbi_enabled(tp)) {
dev_err(&pdev->dev, "TBI fiber mode not supported\n");
@@ -7351,8 +7284,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl_init_mdio_ops(tp);
rtl_init_jumbo_ops(tp);
- rtl8169_print_mac_version(tp);
-
chipset = tp->mac_version;
rc = rtl_alloc_irq(tp);
@@ -7426,7 +7357,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->max_mtu = jumbo_max;
tp->hw_start = cfg->hw_start;
- tp->event_slow = cfg->event_slow;
+ tp->irq_mask = RTL_EVENT_NAPI | cfg->irq_mask;
tp->coalesce_info = cfg->coalesce_info;
tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
@@ -7450,9 +7381,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto err_mdio_unregister;
- netif_info(tp, probe, dev, "%s, %pM, XID %08x, IRQ %d\n",
+ netif_info(tp, probe, dev, "%s, %pM, XID %03x, IRQ %d\n",
rtl_chip_infos[chipset].name, dev->dev_addr,
- (u32)(RTL_R32(tp, TxConfig) & 0xfcf0f8ff),
+ (RTL_R32(tp, TxConfig) >> 20) & 0xfcf,
pci_irq_vector(pdev, 0));
if (jumbo_max > JUMBO_1K)
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 1c6e4df94f01..ac9195add811 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -1032,7 +1032,6 @@ struct ravb_private {
phy_interface_t phy_interface;
int msg_enable;
int speed;
- int duplex;
int emac_irq;
enum ravb_chip_id chip_id;
int rx_irqs[NUM_RX_QUEUE];
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index defed0d0c51d..ffc1ada4e6da 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -82,13 +82,6 @@ static int ravb_config(struct net_device *ndev)
return error;
}
-static void ravb_set_duplex(struct net_device *ndev)
-{
- struct ravb_private *priv = netdev_priv(ndev);
-
- ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex ? ECMR_DM : 0);
-}
-
static void ravb_set_rate(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -406,13 +399,11 @@ error:
/* E-MAC init function */
static void ravb_emac_init(struct net_device *ndev)
{
- struct ravb_private *priv = netdev_priv(ndev);
-
/* Receive frame limit set register */
ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
/* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
- ravb_write(ndev, ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) |
+ ravb_write(ndev, ECMR_ZPF | ECMR_DM |
(ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
ECMR_TE | ECMR_RE, ECMR);
@@ -995,12 +986,6 @@ static void ravb_adjust_link(struct net_device *ndev)
ravb_rcv_snd_disable(ndev);
if (phydev->link) {
- if (phydev->duplex != priv->duplex) {
- new_state = true;
- priv->duplex = phydev->duplex;
- ravb_set_duplex(ndev);
- }
-
if (phydev->speed != priv->speed) {
new_state = true;
priv->speed = phydev->speed;
@@ -1015,7 +1000,6 @@ static void ravb_adjust_link(struct net_device *ndev)
new_state = true;
priv->link = 0;
priv->speed = 0;
- priv->duplex = -1;
}
/* Enable TX and RX right over here, if E-MAC change is ignored */
@@ -1045,7 +1029,6 @@ static int ravb_phy_init(struct net_device *ndev)
priv->link = 0;
priv->speed = 0;
- priv->duplex = -1;
/* Try connecting to PHY */
pn = of_parse_phandle(np, "phy-handle", 0);
@@ -1088,6 +1071,10 @@ static int ravb_phy_init(struct net_device *ndev)
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
+ /* Half Duplex is not supported */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+
phy_attached_info(phydev);
return 0;
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index beb06628f22d..6213827e3956 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1632,9 +1632,6 @@ rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
- if (netif_is_bridge_master(vlan->obj.orig_dev))
- return -EOPNOTSUPP;
-
if (!wops->port_obj_vlan_add)
return -EOPNOTSUPP;
@@ -2145,8 +2142,6 @@ static int rocker_port_obj_del(struct net_device *dev,
static const struct switchdev_ops rocker_port_switchdev_ops = {
.switchdev_port_attr_get = rocker_port_attr_get,
.switchdev_port_attr_set = rocker_port_attr_set,
- .switchdev_port_obj_add = rocker_port_obj_add,
- .switchdev_port_obj_del = rocker_port_obj_del,
};
struct rocker_fib_event_work {
@@ -2812,12 +2807,54 @@ static int rocker_switchdev_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
+static int
+rocker_switchdev_port_obj_event(unsigned long event, struct net_device *netdev,
+ struct switchdev_notifier_port_obj_info *port_obj_info)
+{
+ int err = -EOPNOTSUPP;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = rocker_port_obj_add(netdev, port_obj_info->obj,
+ port_obj_info->trans);
+ break;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = rocker_port_obj_del(netdev, port_obj_info->obj);
+ break;
+ }
+
+ port_obj_info->handled = true;
+ return notifier_from_errno(err);
+}
+
+static int rocker_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+
+ if (!rocker_port_dev_check(dev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ case SWITCHDEV_PORT_OBJ_DEL:
+ return rocker_switchdev_port_obj_event(event, dev, ptr);
+ }
+
+ return NOTIFY_DONE;
+}
+
static struct notifier_block rocker_switchdev_notifier = {
.notifier_call = rocker_switchdev_event,
};
+static struct notifier_block rocker_switchdev_blocking_notifier = {
+ .notifier_call = rocker_switchdev_blocking_event,
+};
+
static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct notifier_block *nb;
struct rocker *rocker;
int err;
@@ -2933,6 +2970,13 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_register_switchdev_notifier;
}
+ nb = &rocker_switchdev_blocking_notifier;
+ err = register_switchdev_blocking_notifier(nb);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register switchdev blocking notifier\n");
+ goto err_register_switchdev_blocking_notifier;
+ }
+
rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
@@ -2940,6 +2984,8 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
+err_register_switchdev_blocking_notifier:
+ unregister_switchdev_notifier(&rocker_switchdev_notifier);
err_register_switchdev_notifier:
unregister_fib_notifier(&rocker->fib_nb);
err_register_fib_notifier:
@@ -2971,6 +3017,10 @@ err_pci_enable_device:
static void rocker_remove(struct pci_dev *pdev)
{
struct rocker *rocker = pci_get_drvdata(pdev);
+ struct notifier_block *nb;
+
+ nb = &rocker_switchdev_blocking_notifier;
+ unregister_switchdev_blocking_notifier(nb);
unregister_switchdev_notifier(&rocker_switchdev_notifier);
unregister_fib_notifier(&rocker->fib_nb);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 7eeac3d6cfe8..b6a50058bb8d 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -6041,6 +6041,10 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
{ NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
{ NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
+ /* MUM and SUC firmware share the same partition type */
+ { NVRAM_PARTITION_TYPE_MUM_FIRMWARE, 0, 0, "sfc_mumfw" },
+ { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" },
+ { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" }
};
static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
@@ -6091,6 +6095,9 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
part->common.mtd.flags = MTD_CAP_NORFLASH;
part->common.mtd.size = size;
part->common.mtd.erasesize = erase_size;
+ /* sfc_status is read-only */
+ if (!erase_size)
+ part->common.mtd.flags |= MTD_NO_ERASE;
return 0;
}
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 3143588ffd77..600d7b895cf2 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -539,7 +539,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
/* We need rx buffers and interrupts. */
already_up = (efx->net_dev->flags & IFF_UP);
if (!already_up) {
- rc = dev_open(efx->net_dev);
+ rc = dev_open(efx->net_dev, NULL);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed opening device.\n");
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index 1ccdb7a82e2a..72cedec945c1 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -517,7 +517,7 @@ static void ef4_ethtool_self_test(struct net_device *net_dev,
/* We need rx buffers and interrupts. */
already_up = (efx->net_dev->flags & IFF_UP);
if (!already_up) {
- rc = dev_open(efx->net_dev);
+ rc = dev_open(efx->net_dev, NULL);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed opening device.\n");
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index c3ad564ac4c0..22eb059086f7 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -553,13 +553,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments)))
goto err;
- /* Update BQL */
- netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
-
efx_tx_maybe_stop_queue(tx_queue);
/* Pass off to hardware */
- if (!xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
+ if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) {
struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
/* There could be packets left on the partner queue if those
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 358820282ef0..79612060d0ba 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -27,7 +27,7 @@ config SMC9194
option if you have a DELL laptop with the docking station, or
another SMC9192/9194 based chipset. Say Y if you want it compiled
into the kernel, and read the file
- <file:Documentation/networking/smc9.txt>.
+ <file:Documentation/networking/device_drivers/smsc/smc9.txt>.
To compile this driver as a module, choose M here. The module
will be called smc9194.
@@ -43,7 +43,7 @@ config SMC91X
This is a driver for SMC's 91x series of Ethernet chipsets,
including the SMC91C94 and the SMC91C111. Say Y if you want it
compiled into the kernel, and read the file
- <file:Documentation/networking/smc9.txt>.
+ <file:Documentation/networking/device_drivers/smsc/smc9.txt>.
This driver is also available as a module ( = code which can be
inserted in and removed from the running kernel whenever you want).
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index d9d0d03e4ce7..05a0948ad929 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -234,6 +234,9 @@
#define DESC_NUM 256
+#define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#define NETSEC_RX_BUF_SZ 1536
+
#define DESC_SZ sizeof(struct netsec_de)
#define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000)
@@ -254,7 +257,6 @@ struct netsec_desc_ring {
dma_addr_t desc_dma;
struct netsec_desc *desc;
void *vaddr;
- u16 pkt_cnt;
u16 head, tail;
};
@@ -571,34 +573,10 @@ static const struct ethtool_ops netsec_ethtool_ops = {
/************* NETDEV_OPS FOLLOW *************/
-static struct sk_buff *netsec_alloc_skb(struct netsec_priv *priv,
- struct netsec_desc *desc)
-{
- struct sk_buff *skb;
-
- if (device_get_dma_attr(priv->dev) == DEV_DMA_COHERENT) {
- skb = netdev_alloc_skb_ip_align(priv->ndev, desc->len);
- } else {
- desc->len = L1_CACHE_ALIGN(desc->len);
- skb = netdev_alloc_skb(priv->ndev, desc->len);
- }
- if (!skb)
- return NULL;
-
- desc->addr = skb->data;
- desc->dma_addr = dma_map_single(priv->dev, desc->addr, desc->len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(priv->dev, desc->dma_addr)) {
- dev_kfree_skb_any(skb);
- return NULL;
- }
- return skb;
-}
static void netsec_set_rx_de(struct netsec_priv *priv,
struct netsec_desc_ring *dring, u16 idx,
- const struct netsec_desc *desc,
- struct sk_buff *skb)
+ const struct netsec_desc *desc)
{
struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
@@ -617,88 +595,28 @@ static void netsec_set_rx_de(struct netsec_priv *priv,
dring->desc[idx].dma_addr = desc->dma_addr;
dring->desc[idx].addr = desc->addr;
dring->desc[idx].len = desc->len;
- dring->desc[idx].skb = skb;
-}
-
-static struct sk_buff *netsec_get_rx_de(struct netsec_priv *priv,
- struct netsec_desc_ring *dring,
- u16 idx,
- struct netsec_rx_pkt_info *rxpi,
- struct netsec_desc *desc, u16 *len)
-{
- struct netsec_de de = {};
-
- memcpy(&de, dring->vaddr + DESC_SZ * idx, DESC_SZ);
-
- *len = de.buf_len_info >> 16;
-
- rxpi->err_flag = (de.attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
- rxpi->rx_cksum_result = (de.attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
- rxpi->err_code = (de.attr >> NETSEC_RX_PKT_ERR_FIELD) &
- NETSEC_RX_PKT_ERR_MASK;
- *desc = dring->desc[idx];
- return desc->skb;
-}
-
-static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv,
- struct netsec_rx_pkt_info *rxpi,
- struct netsec_desc *desc,
- u16 *len)
-{
- struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
- struct sk_buff *tmp_skb, *skb = NULL;
- struct netsec_desc td;
- int tail;
-
- *rxpi = (struct netsec_rx_pkt_info){};
-
- td.len = priv->ndev->mtu + 22;
-
- tmp_skb = netsec_alloc_skb(priv, &td);
-
- tail = dring->tail;
-
- if (!tmp_skb) {
- netsec_set_rx_de(priv, dring, tail, &dring->desc[tail],
- dring->desc[tail].skb);
- } else {
- skb = netsec_get_rx_de(priv, dring, tail, rxpi, desc, len);
- netsec_set_rx_de(priv, dring, tail, &td, tmp_skb);
- }
-
- /* move tail ahead */
- dring->tail = (dring->tail + 1) % DESC_NUM;
-
- return skb;
}
-static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget)
+static bool netsec_clean_tx_dring(struct netsec_priv *priv)
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
unsigned int pkts, bytes;
-
- dring->pkt_cnt += netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
-
- if (dring->pkt_cnt < budget)
- budget = dring->pkt_cnt;
+ struct netsec_de *entry;
+ int tail = dring->tail;
+ int cnt = 0;
pkts = 0;
bytes = 0;
+ entry = dring->vaddr + DESC_SZ * tail;
- while (pkts < budget) {
+ while (!(entry->attr & (1U << NETSEC_TX_SHIFT_OWN_FIELD)) &&
+ cnt < DESC_NUM) {
struct netsec_desc *desc;
- struct netsec_de *entry;
- int tail, eop;
-
- tail = dring->tail;
-
- /* move tail ahead */
- dring->tail = (tail + 1) % DESC_NUM;
+ int eop;
desc = &dring->desc[tail];
- entry = dring->vaddr + DESC_SZ * tail;
-
eop = (entry->attr >> NETSEC_TX_LAST) & 1;
+ dma_rmb();
dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
DMA_TO_DEVICE);
@@ -707,33 +625,94 @@ static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget)
bytes += desc->skb->len;
dev_kfree_skb(desc->skb);
}
+ /* clean up so netsec_uninit_pkt_dring() won't free the skb
+ * again
+ */
*desc = (struct netsec_desc){};
+
+ /* entry->attr is not going to be accessed by the NIC until
+ * netsec_set_tx_de() is called. No need for a dma_wmb() here
+ */
+ entry->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
+ /* move tail ahead */
+ dring->tail = (tail + 1) % DESC_NUM;
+
+ tail = dring->tail;
+ entry = dring->vaddr + DESC_SZ * tail;
+ cnt++;
}
- dring->pkt_cnt -= budget;
- priv->ndev->stats.tx_packets += budget;
+ if (!cnt)
+ return false;
+
+ /* reading the register clears the irq */
+ netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
+
+ priv->ndev->stats.tx_packets += cnt;
priv->ndev->stats.tx_bytes += bytes;
- netdev_completed_queue(priv->ndev, budget, bytes);
+ netdev_completed_queue(priv->ndev, cnt, bytes);
- return budget;
+ return true;
}
-static int netsec_process_tx(struct netsec_priv *priv, int budget)
+static void netsec_process_tx(struct netsec_priv *priv)
{
struct net_device *ndev = priv->ndev;
- int new, done = 0;
+ bool cleaned;
- do {
- new = netsec_clean_tx_dring(priv, budget);
- done += new;
- budget -= new;
- } while (new);
+ cleaned = netsec_clean_tx_dring(priv);
- if (done && netif_queue_stopped(ndev))
+ if (cleaned && netif_queue_stopped(ndev)) {
+ /* Make sure we update the value, anyone stopping the queue
+ * after this will read the proper consumer idx
+ */
+ smp_wmb();
netif_wake_queue(ndev);
+ }
+}
- return done;
+static void *netsec_alloc_rx_data(struct netsec_priv *priv,
+ dma_addr_t *dma_handle, u16 *desc_len)
+{
+ size_t total_len = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ size_t payload_len = NETSEC_RX_BUF_SZ;
+ dma_addr_t mapping;
+ void *buf;
+
+ total_len += SKB_DATA_ALIGN(payload_len + NETSEC_SKB_PAD);
+
+ buf = napi_alloc_frag(total_len);
+ if (!buf)
+ return NULL;
+
+ mapping = dma_map_single(priv->dev, buf + NETSEC_SKB_PAD, payload_len,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, mapping)))
+ goto err_out;
+
+ *dma_handle = mapping;
+ *desc_len = payload_len;
+
+ return buf;
+
+err_out:
+ skb_free_frag(buf);
+ return NULL;
+}
+
+static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ u16 idx = from;
+
+ while (num) {
+ netsec_set_rx_de(priv, dring, idx, &dring->desc[idx]);
+ idx++;
+ if (idx >= DESC_NUM)
+ idx = 0;
+ num--;
+ }
}
static int netsec_process_rx(struct netsec_priv *priv, int budget)
@@ -741,14 +720,17 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
struct net_device *ndev = priv->ndev;
struct netsec_rx_pkt_info rx_info;
- int done = 0;
- struct netsec_desc desc;
struct sk_buff *skb;
- u16 len;
+ int done = 0;
while (done < budget) {
u16 idx = dring->tail;
struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
+ struct netsec_desc *desc = &dring->desc[idx];
+ u16 pkt_len, desc_len;
+ dma_addr_t dma_handle;
+ void *buf_addr;
+ u32 truesize;
if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
/* reading the register clears the irq */
@@ -762,18 +744,59 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
*/
dma_rmb();
done++;
- skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len);
- if (unlikely(!skb) || rx_info.err_flag) {
+
+ pkt_len = de->buf_len_info >> 16;
+ rx_info.err_code = (de->attr >> NETSEC_RX_PKT_ERR_FIELD) &
+ NETSEC_RX_PKT_ERR_MASK;
+ rx_info.err_flag = (de->attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
+ if (rx_info.err_flag) {
netif_err(priv, drv, priv->ndev,
- "%s: rx fail err(%d)\n",
- __func__, rx_info.err_code);
+ "%s: rx fail err(%d)\n", __func__,
+ rx_info.err_code);
ndev->stats.rx_dropped++;
+ dring->tail = (dring->tail + 1) % DESC_NUM;
+ /* reuse buffer page frag */
+ netsec_rx_fill(priv, idx, 1);
continue;
}
+ rx_info.rx_cksum_result =
+ (de->attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
- dma_unmap_single(priv->dev, desc.dma_addr, desc.len,
- DMA_FROM_DEVICE);
- skb_put(skb, len);
+ /* allocate a fresh buffer and map it to the hardware.
+ * This will eventually replace the old buffer in the hardware
+ */
+ buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len);
+ if (unlikely(!buf_addr))
+ break;
+
+ dma_sync_single_for_cpu(priv->dev, desc->dma_addr, pkt_len,
+ DMA_FROM_DEVICE);
+ prefetch(desc->addr);
+
+ truesize = SKB_DATA_ALIGN(desc->len + NETSEC_SKB_PAD) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ skb = build_skb(desc->addr, truesize);
+ if (unlikely(!skb)) {
+ /* free the newly allocated buffer, we are not going to
+ * use it
+ */
+ dma_unmap_single(priv->dev, dma_handle, desc_len,
+ DMA_FROM_DEVICE);
+ skb_free_frag(buf_addr);
+ netif_err(priv, drv, priv->ndev,
+ "rx failed to build skb\n");
+ break;
+ }
+ dma_unmap_single_attrs(priv->dev, desc->dma_addr, desc->len,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+
+ /* Update the descriptor with the new buffer we allocated */
+ desc->len = desc_len;
+ desc->dma_addr = dma_handle;
+ desc->addr = buf_addr;
+
+ skb_reserve(skb, NETSEC_SKB_PAD);
+ skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, priv->ndev);
if (priv->rx_cksum_offload_flag &&
@@ -782,8 +805,11 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
if (napi_gro_receive(&priv->napi, skb) != GRO_DROP) {
ndev->stats.rx_packets++;
- ndev->stats.rx_bytes += len;
+ ndev->stats.rx_bytes += pkt_len;
}
+
+ netsec_rx_fill(priv, idx, 1);
+ dring->tail = (dring->tail + 1) % DESC_NUM;
}
return done;
@@ -792,24 +818,17 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
static int netsec_napi_poll(struct napi_struct *napi, int budget)
{
struct netsec_priv *priv;
- int tx, rx, done, todo;
+ int rx, done, todo;
priv = container_of(napi, struct netsec_priv, napi);
+ netsec_process_tx(priv);
+
todo = budget;
do {
- if (!todo)
- break;
-
- tx = netsec_process_tx(priv, todo);
- todo -= tx;
-
- if (!todo)
- break;
-
rx = netsec_process_rx(priv, todo);
todo -= rx;
- } while (rx || tx);
+ } while (rx);
done = budget - todo;
@@ -861,6 +880,41 @@ static void netsec_set_tx_de(struct netsec_priv *priv,
dring->head = (dring->head + 1) % DESC_NUM;
}
+static int netsec_desc_used(struct netsec_desc_ring *dring)
+{
+ int used;
+
+ if (dring->head >= dring->tail)
+ used = dring->head - dring->tail;
+ else
+ used = dring->head + DESC_NUM - dring->tail;
+
+ return used;
+}
+
+static int netsec_check_stop_tx(struct netsec_priv *priv, int used)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
+
+ /* keep tail from touching the queue */
+ if (DESC_NUM - used < 2) {
+ netif_stop_queue(priv->ndev);
+
+ /* Make sure we read the updated value in case
+ * descriptors got freed
+ */
+ smp_rmb();
+
+ used = netsec_desc_used(dring);
+ if (DESC_NUM - used < 2)
+ return NETDEV_TX_BUSY;
+
+ netif_wake_queue(priv->ndev);
+ }
+
+ return 0;
+}
+
static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
@@ -871,16 +925,10 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
u16 tso_seg_len = 0;
int filled;
- /* differentiate between full/emtpy ring */
- if (dring->head >= dring->tail)
- filled = dring->head - dring->tail;
- else
- filled = dring->head + DESC_NUM - dring->tail;
-
- if (DESC_NUM - filled < 2) { /* if less than 2 available */
- netif_err(priv, drv, priv->ndev, "%s: TxQFull!\n", __func__);
- netif_stop_queue(priv->ndev);
- dma_wmb();
+ filled = netsec_desc_used(dring);
+ if (netsec_check_stop_tx(priv, filled)) {
+ net_warn_ratelimited("%s %s Tx queue full\n",
+ dev_name(priv->dev), ndev->name);
return NETDEV_TX_BUSY;
}
@@ -946,7 +994,10 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
id == NETSEC_RING_RX ? DMA_FROM_DEVICE :
DMA_TO_DEVICE);
- dev_kfree_skb(desc->skb);
+ if (id == NETSEC_RING_RX)
+ skb_free_frag(desc->addr);
+ else if (id == NETSEC_RING_TX)
+ dev_kfree_skb(desc->skb);
}
memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
@@ -954,7 +1005,6 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
dring->head = 0;
dring->tail = 0;
- dring->pkt_cnt = 0;
if (id == NETSEC_RING_TX)
netdev_reset_queue(priv->ndev);
@@ -977,47 +1027,64 @@ static void netsec_free_dring(struct netsec_priv *priv, int id)
static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
{
struct netsec_desc_ring *dring = &priv->desc_ring[id];
- int ret = 0;
+ int i;
dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
&dring->desc_dma, GFP_KERNEL);
- if (!dring->vaddr) {
- ret = -ENOMEM;
+ if (!dring->vaddr)
goto err;
- }
dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL);
- if (!dring->desc) {
- ret = -ENOMEM;
+ if (!dring->desc)
goto err;
+
+ if (id == NETSEC_RING_TX) {
+ for (i = 0; i < DESC_NUM; i++) {
+ struct netsec_de *de;
+
+ de = dring->vaddr + (DESC_SZ * i);
+ /* de->attr is not going to be accessed by the NIC
+ * until netsec_set_tx_de() is called.
+ * No need for a dma_wmb() here
+ */
+ de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
+ }
}
return 0;
err:
netsec_free_dring(priv, id);
- return ret;
+ return -ENOMEM;
}
static int netsec_setup_rx_dring(struct netsec_priv *priv)
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
- struct netsec_desc desc;
- struct sk_buff *skb;
- int n;
+ int i;
- desc.len = priv->ndev->mtu + 22;
+ for (i = 0; i < DESC_NUM; i++) {
+ struct netsec_desc *desc = &dring->desc[i];
+ dma_addr_t dma_handle;
+ void *buf;
+ u16 len;
- for (n = 0; n < DESC_NUM; n++) {
- skb = netsec_alloc_skb(priv, &desc);
- if (!skb) {
+ buf = netsec_alloc_rx_data(priv, &dma_handle, &len);
+ if (!buf) {
netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
- return -ENOMEM;
+ goto err_out;
}
- netsec_set_rx_de(priv, dring, n, &desc, skb);
+ desc->dma_addr = dma_handle;
+ desc->addr = buf;
+ desc->len = len;
}
+ netsec_rx_fill(priv, 0, DESC_NUM);
+
return 0;
+
+err_out:
+ return -ENOMEM;
}
static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
@@ -1377,6 +1444,8 @@ static int netsec_netdev_init(struct net_device *ndev)
int ret;
u16 data;
+ BUILD_BUG_ON_NOT_POWER_OF_2(DESC_NUM);
+
ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 7c7cd9d94bcc..bb6d5fb73035 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -262,6 +262,7 @@ struct ave_private {
struct regmap *regmap;
unsigned int pinmode_mask;
unsigned int pinmode_val;
+ u32 wolopts;
/* stats */
struct ave_stats stats_rx;
@@ -1119,7 +1120,7 @@ static void ave_phy_adjust_link(struct net_device *ndev)
if (phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising);
+ lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (cap & FLOW_CTRL_TX)
txcr |= AVE_TXCR_FLOCTR;
@@ -1210,9 +1211,13 @@ static int ave_init(struct net_device *ndev)
priv->phydev = phydev;
- phy_ethtool_get_wol(phydev, &wol);
+ ave_ethtool_get_wol(ndev, &wol);
device_set_wakeup_capable(&ndev->dev, !!wol.supported);
+ /* set wol initial state disabled */
+ wol.wolopts = 0;
+ ave_ethtool_set_wol(ndev, &wol);
+
if (!phy_interface_is_rgmii(phydev))
phy_set_max_speed(phydev, SPEED_100);
@@ -1737,6 +1742,58 @@ static int ave_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int ave_suspend(struct device *dev)
+{
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ave_private *priv = netdev_priv(ndev);
+ int ret = 0;
+
+ if (netif_running(ndev)) {
+ ret = ave_stop(ndev);
+ netif_device_detach(ndev);
+ }
+
+ ave_ethtool_get_wol(ndev, &wol);
+ priv->wolopts = wol.wolopts;
+
+ return ret;
+}
+
+static int ave_resume(struct device *dev)
+{
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ave_private *priv = netdev_priv(ndev);
+ int ret = 0;
+
+ ave_global_reset(ndev);
+
+ ave_ethtool_get_wol(ndev, &wol);
+ wol.wolopts = priv->wolopts;
+ ave_ethtool_set_wol(ndev, &wol);
+
+ if (ndev->phydev) {
+ ret = phy_resume(ndev->phydev);
+ if (ret)
+ return ret;
+ }
+
+ if (netif_running(ndev)) {
+ ret = ave_open(ndev);
+ netif_device_attach(ndev);
+ }
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(ave_pm_ops, ave_suspend, ave_resume);
+#define AVE_PM_OPS (&ave_pm_ops)
+#else
+#define AVE_PM_OPS NULL
+#endif
+
static int ave_pro4_get_pinmode(struct ave_private *priv,
phy_interface_t phy_mode, u32 arg)
{
@@ -1911,6 +1968,7 @@ static struct platform_driver ave_driver = {
.remove = ave_remove,
.driver = {
.name = "ave",
+ .pm = AVE_PM_OPS,
.of_match_table = of_ave_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 324049eebb9b..6209cc1fb305 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -75,6 +75,14 @@ config DWMAC_LPC18XX
---help---
Support for NXP LPC18xx/43xx DWMAC Ethernet.
+config DWMAC_MEDIATEK
+ tristate "MediaTek MT27xx GMAC support"
+ depends on OF && (ARCH_MEDIATEK || COMPILE_TEST)
+ help
+ Support for MediaTek GMAC Ethernet controller.
+
+ This selects the MT2712 SoC support for the stmmac driver.
+
config DWMAC_MESON
tristate "Amlogic Meson dwmac support"
default ARCH_MESON
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 99967a80a8c8..bf09701d2623 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
obj-$(CONFIG_DWMAC_ANARION) += dwmac-anarion.o
obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o
obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
+obj-$(CONFIG_DWMAC_MEDIATEK) += dwmac-mediatek.o
obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o
obj-$(CONFIG_DWMAC_OXNAS) += dwmac-oxnas.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
new file mode 100644
index 000000000000..bf2562995fc8
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/regmap.h>
+#include <linux/stmmac.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+/* Peri Configuration register for mt2712 */
+#define PERI_ETH_PHY_INTF_SEL 0x418
+#define PHY_INTF_MII 0
+#define PHY_INTF_RGMII 1
+#define PHY_INTF_RMII 4
+#define RMII_CLK_SRC_RXC BIT(4)
+#define RMII_CLK_SRC_INTERNAL BIT(5)
+
+#define PERI_ETH_DLY 0x428
+#define ETH_DLY_GTXC_INV BIT(6)
+#define ETH_DLY_GTXC_ENABLE BIT(5)
+#define ETH_DLY_GTXC_STAGES GENMASK(4, 0)
+#define ETH_DLY_TXC_INV BIT(20)
+#define ETH_DLY_TXC_ENABLE BIT(19)
+#define ETH_DLY_TXC_STAGES GENMASK(18, 14)
+#define ETH_DLY_RXC_INV BIT(13)
+#define ETH_DLY_RXC_ENABLE BIT(12)
+#define ETH_DLY_RXC_STAGES GENMASK(11, 7)
+
+#define PERI_ETH_DLY_FINE 0x800
+#define ETH_RMII_DLY_TX_INV BIT(2)
+#define ETH_FINE_DLY_GTXC BIT(1)
+#define ETH_FINE_DLY_RXC BIT(0)
+
+struct mac_delay_struct {
+ u32 tx_delay;
+ u32 rx_delay;
+ bool tx_inv;
+ bool rx_inv;
+};
+
+struct mediatek_dwmac_plat_data {
+ const struct mediatek_dwmac_variant *variant;
+ struct mac_delay_struct mac_delay;
+ struct clk_bulk_data *clks;
+ struct device_node *np;
+ struct regmap *peri_regmap;
+ struct device *dev;
+ int phy_mode;
+ bool rmii_rxc;
+};
+
+struct mediatek_dwmac_variant {
+ int (*dwmac_set_phy_interface)(struct mediatek_dwmac_plat_data *plat);
+ int (*dwmac_set_delay)(struct mediatek_dwmac_plat_data *plat);
+
+ /* clock ids to be requested */
+ const char * const *clk_list;
+ int num_clks;
+
+ u32 dma_bit_mask;
+ u32 rx_delay_max;
+ u32 tx_delay_max;
+};
+
+/* list of clocks required for mac */
+static const char * const mt2712_dwmac_clk_l[] = {
+ "axi", "apb", "mac_main", "ptp_ref"
+};
+
+static int mt2712_set_interface(struct mediatek_dwmac_plat_data *plat)
+{
+ int rmii_rxc = plat->rmii_rxc ? RMII_CLK_SRC_RXC : 0;
+ u32 intf_val = 0;
+
+ /* select phy interface in top control domain */
+ switch (plat->phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ intf_val |= PHY_INTF_MII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ intf_val |= (PHY_INTF_RMII | rmii_rxc);
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ intf_val |= PHY_INTF_RGMII;
+ break;
+ default:
+ dev_err(plat->dev, "phy interface not supported\n");
+ return -EINVAL;
+ }
+
+ regmap_write(plat->peri_regmap, PERI_ETH_PHY_INTF_SEL, intf_val);
+
+ return 0;
+}
+
+static void mt2712_delay_ps2stage(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+
+ switch (plat->phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_RMII:
+ /* 550ps per stage for MII/RMII */
+ mac_delay->tx_delay /= 550;
+ mac_delay->rx_delay /= 550;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ /* 170ps per stage for RGMII */
+ mac_delay->tx_delay /= 170;
+ mac_delay->rx_delay /= 170;
+ break;
+ default:
+ dev_err(plat->dev, "phy interface not supported\n");
+ break;
+ }
+}
+
+static int mt2712_set_delay(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+ u32 delay_val = 0, fine_val = 0;
+
+ mt2712_delay_ps2stage(plat);
+
+ switch (plat->phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_ENABLE, !!mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_STAGES, mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_INV, mac_delay->tx_inv);
+
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_inv);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ /* the rmii reference clock is from external phy,
+ * and the property "rmii_rxc" indicates which pin(TXC/RXC)
+ * the reference clk is connected to. The reference clock is a
+ * received signal, so rx_delay/rx_inv are used to indicate
+ * the reference clock timing adjustment
+ */
+ if (plat->rmii_rxc) {
+ /* the rmii reference clock from outside is connected
+ * to RXC pin, the reference clock will be adjusted
+ * by RXC delay macro circuit.
+ */
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_inv);
+ } else {
+ /* the rmii reference clock from outside is connected
+ * to TXC pin, the reference clock will be adjusted
+ * by TXC delay macro circuit.
+ */
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_INV, mac_delay->rx_inv);
+ }
+ /* tx_inv will inverse the tx clock inside mac relateive to
+ * reference clock from external phy,
+ * and this bit is located in the same register with fine-tune
+ */
+ if (mac_delay->tx_inv)
+ fine_val = ETH_RMII_DLY_TX_INV;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ fine_val = ETH_FINE_DLY_GTXC | ETH_FINE_DLY_RXC;
+
+ delay_val |= FIELD_PREP(ETH_DLY_GTXC_ENABLE, !!mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_GTXC_STAGES, mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_GTXC_INV, mac_delay->tx_inv);
+
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_inv);
+ break;
+ default:
+ dev_err(plat->dev, "phy interface not supported\n");
+ return -EINVAL;
+ }
+ regmap_write(plat->peri_regmap, PERI_ETH_DLY, delay_val);
+ regmap_write(plat->peri_regmap, PERI_ETH_DLY_FINE, fine_val);
+
+ return 0;
+}
+
+static const struct mediatek_dwmac_variant mt2712_gmac_variant = {
+ .dwmac_set_phy_interface = mt2712_set_interface,
+ .dwmac_set_delay = mt2712_set_delay,
+ .clk_list = mt2712_dwmac_clk_l,
+ .num_clks = ARRAY_SIZE(mt2712_dwmac_clk_l),
+ .dma_bit_mask = 33,
+ .rx_delay_max = 17600,
+ .tx_delay_max = 17600,
+};
+
+static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+ u32 tx_delay_ps, rx_delay_ps;
+
+ plat->peri_regmap = syscon_regmap_lookup_by_phandle(plat->np, "mediatek,pericfg");
+ if (IS_ERR(plat->peri_regmap)) {
+ dev_err(plat->dev, "Failed to get pericfg syscon\n");
+ return PTR_ERR(plat->peri_regmap);
+ }
+
+ plat->phy_mode = of_get_phy_mode(plat->np);
+ if (plat->phy_mode < 0) {
+ dev_err(plat->dev, "not find phy-mode\n");
+ return -EINVAL;
+ }
+
+ if (!of_property_read_u32(plat->np, "mediatek,tx-delay-ps", &tx_delay_ps)) {
+ if (tx_delay_ps < plat->variant->tx_delay_max) {
+ mac_delay->tx_delay = tx_delay_ps;
+ } else {
+ dev_err(plat->dev, "Invalid TX clock delay: %dps\n", tx_delay_ps);
+ return -EINVAL;
+ }
+ }
+
+ if (!of_property_read_u32(plat->np, "mediatek,rx-delay-ps", &rx_delay_ps)) {
+ if (rx_delay_ps < plat->variant->rx_delay_max) {
+ mac_delay->rx_delay = rx_delay_ps;
+ } else {
+ dev_err(plat->dev, "Invalid RX clock delay: %dps\n", rx_delay_ps);
+ return -EINVAL;
+ }
+ }
+
+ mac_delay->tx_inv = of_property_read_bool(plat->np, "mediatek,txc-inverse");
+ mac_delay->rx_inv = of_property_read_bool(plat->np, "mediatek,rxc-inverse");
+ plat->rmii_rxc = of_property_read_bool(plat->np, "mediatek,rmii-rxc");
+
+ return 0;
+}
+
+static int mediatek_dwmac_clk_init(struct mediatek_dwmac_plat_data *plat)
+{
+ const struct mediatek_dwmac_variant *variant = plat->variant;
+ int i, num = variant->num_clks;
+
+ plat->clks = devm_kcalloc(plat->dev, num, sizeof(*plat->clks), GFP_KERNEL);
+ if (!plat->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ plat->clks[i].id = variant->clk_list[i];
+
+ return devm_clk_bulk_get(plat->dev, num, plat->clks);
+}
+
+static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
+{
+ struct mediatek_dwmac_plat_data *plat = priv;
+ const struct mediatek_dwmac_variant *variant = plat->variant;
+ int ret;
+
+ ret = dma_set_mask_and_coherent(plat->dev, DMA_BIT_MASK(variant->dma_bit_mask));
+ if (ret) {
+ dev_err(plat->dev, "No suitable DMA available, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = variant->dwmac_set_phy_interface(plat);
+ if (ret) {
+ dev_err(plat->dev, "failed to set phy interface, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = variant->dwmac_set_delay(plat);
+ if (ret) {
+ dev_err(plat->dev, "failed to set delay value, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(variant->num_clks, plat->clks);
+ if (ret) {
+ dev_err(plat->dev, "failed to enable clks, err = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mediatek_dwmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct mediatek_dwmac_plat_data *plat = priv;
+ const struct mediatek_dwmac_variant *variant = plat->variant;
+
+ clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
+}
+
+static int mediatek_dwmac_probe(struct platform_device *pdev)
+{
+ struct mediatek_dwmac_plat_data *priv_plat;
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ int ret;
+
+ priv_plat = devm_kzalloc(&pdev->dev, sizeof(*priv_plat), GFP_KERNEL);
+ if (!priv_plat)
+ return -ENOMEM;
+
+ priv_plat->variant = of_device_get_match_data(&pdev->dev);
+ if (!priv_plat->variant) {
+ dev_err(&pdev->dev, "Missing dwmac-mediatek variant\n");
+ return -EINVAL;
+ }
+
+ priv_plat->dev = &pdev->dev;
+ priv_plat->np = pdev->dev.of_node;
+
+ ret = mediatek_dwmac_config_dt(priv_plat);
+ if (ret)
+ return ret;
+
+ ret = mediatek_dwmac_clk_init(priv_plat);
+ if (ret)
+ return ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ plat_dat->interface = priv_plat->phy_mode;
+ /* clk_csr_i = 250-300MHz & MDC = clk_csr_i/124 */
+ plat_dat->clk_csr = 5;
+ plat_dat->has_gmac4 = 1;
+ plat_dat->has_gmac = 0;
+ plat_dat->pmt = 0;
+ plat_dat->maxmtu = ETH_DATA_LEN;
+ plat_dat->bsp_priv = priv_plat;
+ plat_dat->init = mediatek_dwmac_init;
+ plat_dat->exit = mediatek_dwmac_exit;
+ mediatek_dwmac_init(pdev, priv_plat);
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret) {
+ stmmac_remove_config_dt(pdev, plat_dat);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id mediatek_dwmac_match[] = {
+ { .compatible = "mediatek,mt2712-gmac",
+ .data = &mt2712_gmac_variant },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, mediatek_dwmac_match);
+
+static struct platform_driver mediatek_dwmac_driver = {
+ .probe = mediatek_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "dwmac-mediatek",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = mediatek_dwmac_match,
+ },
+};
+module_platform_driver(mediatek_dwmac_driver);
+
+MODULE_AUTHOR("Biao Huang <biao.huang@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek DWMAC specific glue layer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 5710864fa809..d1f61c25d82b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -458,8 +458,10 @@ stmmac_get_pauseparam(struct net_device *netdev,
if (!adv_lp.pause)
return;
} else {
- if (!(netdev->phydev->supported & SUPPORTED_Pause) ||
- !(netdev->phydev->supported & SUPPORTED_Asym_Pause))
+ if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ netdev->phydev->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ netdev->phydev->supported))
return;
}
@@ -487,8 +489,10 @@ stmmac_set_pauseparam(struct net_device *netdev,
if (!adv_lp.pause)
return -EOPNOTSUPP;
} else {
- if (!(phy->supported & SUPPORTED_Pause) ||
- !(phy->supported & SUPPORTED_Asym_Pause))
+ if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phy->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phy->supported))
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c4a35e932f05..0e0a0789c2ed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3881,7 +3881,7 @@ static void sysfs_display_ring(void *head, int size, int extend_desc,
}
}
-static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
+static int stmmac_rings_status_show(struct seq_file *seq, void *v)
{
struct net_device *dev = seq->private;
struct stmmac_priv *priv = netdev_priv(dev);
@@ -3926,23 +3926,9 @@ static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
return 0;
}
+DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
-static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
-{
- return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
-}
-
-/* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
-
-static const struct file_operations stmmac_rings_status_fops = {
- .owner = THIS_MODULE,
- .open = stmmac_sysfs_ring_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
+static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
{
struct net_device *dev = seq->private;
struct stmmac_priv *priv = netdev_priv(dev);
@@ -4005,19 +3991,7 @@ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
return 0;
}
-
-static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
-{
- return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
-}
-
-static const struct file_operations stmmac_dma_cap_fops = {
- .owner = THIS_MODULE,
- .open = stmmac_sysfs_dma_cap_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
static int stmmac_init_fs(struct net_device *dev)
{
@@ -4101,7 +4075,7 @@ static void stmmac_reset_subtask(struct stmmac_priv *priv)
set_bit(STMMAC_DOWN, &priv->state);
dev_close(priv->dev);
- dev_open(priv->dev);
+ dev_open(priv->dev, NULL);
clear_bit(STMMAC_DOWN, &priv->state);
clear_bit(STMMAC_RESETING, &priv->state);
rtnl_unlock();
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 863fd602fd33..ff641cf30a4e 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2691,7 +2691,7 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
sbus_dp = op->dev.parent->of_node;
/* We can match PCI devices too, do not accept those here. */
- if (strcmp(sbus_dp->name, "sbus") && strcmp(sbus_dp->name, "sbi"))
+ if (!of_node_name_eq(sbus_dp, "sbus") && !of_node_name_eq(sbus_dp, "sbi"))
return err;
if (is_qfe) {
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index f932923f7d56..bb126be1eb72 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -121,7 +121,8 @@ config TLAN
Devices currently supported by this driver are Compaq Netelligent,
Compaq NetFlex and Olicom cards. Please read the file
- <file:Documentation/networking/tlan.txt> for more details.
+ <file:Documentation/networking/device_drivers/ti/tlan.txt>
+ for more details.
To compile this driver as a module, choose M here. The module
will be called tlan.
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 9b8a30bf939b..810dfc7de1f9 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -991,7 +991,6 @@ static int cpmac_open(struct net_device *dev)
cpmac_hw_start(dev);
napi_enable(&priv->napi);
- dev->phydev->state = PHY_CHANGELINK;
phy_start(dev->phydev);
return 0;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 500f7ed8c58c..0e8f61a29479 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -283,7 +283,7 @@ struct cpsw_ss_regs {
#define CTRL_V2_TS_BITS \
(TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
- TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN)
+ TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN | VLAN_LTYPE1_EN)
#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
#define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN)
@@ -293,7 +293,7 @@ struct cpsw_ss_regs {
#define CTRL_V3_TS_BITS \
(TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
- TS_LTYPE1_EN)
+ TS_LTYPE1_EN | VLAN_LTYPE1_EN)
#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
#define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN)
@@ -466,6 +466,8 @@ struct cpsw_priv {
bool mqprio_hw;
int fifo_bw[CPSW_TC_NUM];
int shp_cfg_speed;
+ int tx_ts_enabled;
+ int rx_ts_enabled;
u32 emac_port;
struct cpsw_common *cpsw;
};
@@ -565,26 +567,14 @@ static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
(func)(slave++, ##arg); \
} while (0)
+static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
+ __be16 proto, u16 vid);
+
static inline int cpsw_get_slave_port(u32 slave_num)
{
return slave_num + 1;
}
-static void cpsw_add_mcast(struct cpsw_priv *priv, const u8 *addr)
-{
- struct cpsw_common *cpsw = priv->cpsw;
-
- if (cpsw->data.dual_emac) {
- struct cpsw_slave *slave = cpsw->slaves + priv->emac_port;
-
- cpsw_ale_add_mcast(cpsw->ale, addr, ALE_PORT_HOST,
- ALE_VLAN, slave->port_vlan, 0);
- return;
- }
-
- cpsw_ale_add_mcast(cpsw->ale, addr, ALE_ALL_PORTS, 0, 0, 0);
-}
-
static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
{
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
@@ -640,7 +630,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
/* Clear all mcast from ALE */
cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
- __dev_mc_unsync(ndev, NULL);
+ __hw_addr_ref_unsync_dev(&ndev->mc, ndev, NULL);
/* Flood All Unicast Packets to Host port */
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
@@ -661,29 +651,148 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
}
}
-static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr)
+struct addr_sync_ctx {
+ struct net_device *ndev;
+ const u8 *addr; /* address to be synched */
+ int consumed; /* number of address instances */
+ int flush; /* flush flag */
+};
+
+/**
+ * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
+ * if it's not deleted
+ * @ndev: device to sync
+ * @addr: address to be added or deleted
+ * @vid: vlan id, if vid < 0 set/unset address for real device
+ * @add: add address if the flag is set or remove otherwise
+ */
+static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
+ int vid, int add)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int mask, flags, ret;
+
+ if (vid < 0) {
+ if (cpsw->data.dual_emac)
+ vid = cpsw->slaves[priv->emac_port].port_vlan;
+ else
+ vid = 0;
+ }
+
+ mask = cpsw->data.dual_emac ? ALE_PORT_HOST : ALE_ALL_PORTS;
+ flags = vid ? ALE_VLAN : 0;
+
+ if (add)
+ ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
+ else
+ ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
+
+ return ret;
+}
+
+static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
+{
+ struct addr_sync_ctx *sync_ctx = ctx;
+ struct netdev_hw_addr *ha;
+ int found = 0, ret = 0;
+
+ if (!vdev || !(vdev->flags & IFF_UP))
+ return 0;
+
+ /* vlan address is relevant if its sync_cnt != 0 */
+ netdev_for_each_mc_addr(ha, vdev) {
+ if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
+ found = ha->sync_cnt;
+ break;
+ }
+ }
+
+ if (found)
+ sync_ctx->consumed++;
+
+ if (sync_ctx->flush) {
+ if (!found)
+ cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
+ return 0;
+ }
+
+ if (found)
+ ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
+
+ return ret;
+}
+
+static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+ int ret;
+
+ sync_ctx.consumed = 0;
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.flush = 0;
+
+ ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed < num && !ret)
+ ret = cpsw_set_mc(ndev, addr, -1, 1);
+
+ return ret;
+}
+
+static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+
+ sync_ctx.consumed = 0;
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.flush = 1;
+
+ vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed == num)
+ cpsw_set_mc(ndev, addr, -1, 0);
- cpsw_add_mcast(priv, addr);
return 0;
}
-static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr)
+static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int vid, flags;
+ struct addr_sync_ctx *sync_ctx = ctx;
+ struct netdev_hw_addr *ha;
+ int found = 0;
- if (cpsw->data.dual_emac) {
- vid = cpsw->slaves[priv->emac_port].port_vlan;
- flags = ALE_VLAN;
- } else {
- vid = 0;
- flags = 0;
+ if (!vdev || !(vdev->flags & IFF_UP))
+ return 0;
+
+ /* vlan address is relevant if its sync_cnt != 0 */
+ netdev_for_each_mc_addr(ha, vdev) {
+ if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
+ found = ha->sync_cnt;
+ break;
+ }
}
- cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
+ if (!found)
+ return 0;
+
+ sync_ctx->consumed++;
+ cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
+ return 0;
+}
+
+static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.consumed = 0;
+
+ vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed < num)
+ cpsw_set_mc(ndev, addr, -1, 0);
+
return 0;
}
@@ -704,7 +813,9 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
/* Restore allmulti on vlans if necessary */
cpsw_ale_set_allmulti(cpsw->ale, ndev->flags & IFF_ALLMULTI);
- __dev_mc_sync(ndev, cpsw_add_mc_addr, cpsw_del_mc_addr);
+ /* add/remove mcast address either for real netdev or for vlan */
+ __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
+ cpsw_del_mc_addr);
}
static void cpsw_intr_enable(struct cpsw_common *cpsw)
@@ -796,6 +907,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
struct net_device *ndev = skb->dev;
int ret = 0, port;
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpsw_priv *priv;
if (cpsw->data.dual_emac) {
port = CPDMA_RX_SOURCE_PORT(status);
@@ -830,7 +942,9 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb_put(skb, len);
if (status & CPDMA_RX_VLAN_ENCAP)
cpsw_rx_vlan_encap(skb);
- cpts_rx_timestamp(cpsw->cpts, skb);
+ priv = netdev_priv(ndev);
+ if (priv->rx_ts_enabled)
+ cpts_rx_timestamp(cpsw->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
ndev->stats.rx_bytes += len;
@@ -1845,9 +1959,23 @@ static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave_write(slave, tx_prio_map, tx_prio_rg);
}
+static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
+{
+ struct cpsw_priv *priv = arg;
+
+ if (!vdev)
+ return 0;
+
+ cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
+ return 0;
+}
+
/* restore resources after port reset */
static void cpsw_restore(struct cpsw_priv *priv)
{
+ /* restore vlan configurations */
+ vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
+
/* restore MQPRIO offload */
for_each_slave(priv, cpsw_mqprio_resume, priv);
@@ -1964,7 +2092,7 @@ static int cpsw_ndo_stop(struct net_device *ndev)
struct cpsw_common *cpsw = priv->cpsw;
cpsw_info(priv, ifdown, "shutting down cpsw device\n");
- __dev_mc_unsync(priv->ndev, cpsw_del_mc_addr);
+ __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
netif_tx_stop_all_queues(priv->ndev);
netif_carrier_off(priv->ndev);
@@ -2003,7 +2131,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
}
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- cpts_is_tx_enabled(cpts) && cpts_can_timestamp(cpts, skb))
+ priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
q_idx = skb_get_queue_mapping(skb);
@@ -2047,13 +2175,13 @@ fail:
#if IS_ENABLED(CONFIG_TI_CPTS)
-static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
+static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
{
+ struct cpsw_common *cpsw = priv->cpsw;
struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
u32 ts_en, seq_id;
- if (!cpts_is_tx_enabled(cpsw->cpts) &&
- !cpts_is_rx_enabled(cpsw->cpts)) {
+ if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
slave_write(slave, 0, CPSW1_TS_CTL);
return;
}
@@ -2061,10 +2189,10 @@ static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
- if (cpts_is_tx_enabled(cpsw->cpts))
+ if (priv->tx_ts_enabled)
ts_en |= CPSW_V1_TS_TX_EN;
- if (cpts_is_rx_enabled(cpsw->cpts))
+ if (priv->rx_ts_enabled)
ts_en |= CPSW_V1_TS_RX_EN;
slave_write(slave, ts_en, CPSW1_TS_CTL);
@@ -2084,20 +2212,20 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
case CPSW_VERSION_2:
ctrl &= ~CTRL_V2_ALL_TS_MASK;
- if (cpts_is_tx_enabled(cpsw->cpts))
+ if (priv->tx_ts_enabled)
ctrl |= CTRL_V2_TX_TS_BITS;
- if (cpts_is_rx_enabled(cpsw->cpts))
+ if (priv->rx_ts_enabled)
ctrl |= CTRL_V2_RX_TS_BITS;
break;
case CPSW_VERSION_3:
default:
ctrl &= ~CTRL_V3_ALL_TS_MASK;
- if (cpts_is_tx_enabled(cpsw->cpts))
+ if (priv->tx_ts_enabled)
ctrl |= CTRL_V3_TX_TS_BITS;
- if (cpts_is_rx_enabled(cpsw->cpts))
+ if (priv->rx_ts_enabled)
ctrl |= CTRL_V3_RX_TS_BITS;
break;
}
@@ -2107,6 +2235,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
slave_write(slave, ctrl, CPSW2_CONTROL);
writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
+ writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
}
static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
@@ -2114,7 +2243,6 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
struct cpsw_priv *priv = netdev_priv(dev);
struct hwtstamp_config cfg;
struct cpsw_common *cpsw = priv->cpsw;
- struct cpts *cpts = cpsw->cpts;
if (cpsw->version != CPSW_VERSION_1 &&
cpsw->version != CPSW_VERSION_2 &&
@@ -2133,7 +2261,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- cpts_rx_enable(cpts, 0);
+ priv->rx_ts_enabled = 0;
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_NTP_ALL:
@@ -2141,7 +2269,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
+ priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -2153,18 +2281,18 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
+ priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
return -ERANGE;
}
- cpts_tx_enable(cpts, cfg.tx_type == HWTSTAMP_TX_ON);
+ priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
switch (cpsw->version) {
case CPSW_VERSION_1:
- cpsw_hwtstamp_v1(cpsw);
+ cpsw_hwtstamp_v1(priv);
break;
case CPSW_VERSION_2:
case CPSW_VERSION_3:
@@ -2180,7 +2308,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
{
struct cpsw_common *cpsw = ndev_to_cpsw(dev);
- struct cpts *cpts = cpsw->cpts;
+ struct cpsw_priv *priv = netdev_priv(dev);
struct hwtstamp_config cfg;
if (cpsw->version != CPSW_VERSION_1 &&
@@ -2189,10 +2317,8 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
return -EOPNOTSUPP;
cfg.flags = 0;
- cfg.tx_type = cpts_is_tx_enabled(cpts) ?
- HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
- cpts->rx_enable : HWTSTAMP_FILTER_NONE);
+ cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = priv->rx_ts_enabled;
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
@@ -2415,6 +2541,7 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
HOST_PORT_NUM, ALE_VLAN, vid);
ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
0, ALE_VLAN, vid);
+ ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid);
err:
pm_runtime_put(cpsw->dev);
return ret;
@@ -3144,7 +3271,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
const __be32 *parp;
/* This is no slave child node, continue */
- if (strcmp(slave_node->name, "slave"))
+ if (!of_node_name_eq(slave_node, "slave"))
continue;
slave_data->phy_node = of_parse_phandle(slave_node,
@@ -3240,7 +3367,7 @@ static void cpsw_remove_dt(struct platform_device *pdev)
for_each_available_child_of_node(node, slave_node) {
struct cpsw_slave_data *slave_data = &data->slave_data[i];
- if (strcmp(slave_node->name, "slave"))
+ if (!of_node_name_eq(slave_node, "slave"))
continue;
if (of_phy_is_fixed_link(slave_node))
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index b96b93c686bf..054f78295d1d 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -86,6 +86,25 @@ static int cpts_purge_events(struct cpts *cpts)
return removed ? 0 : -1;
}
+static void cpts_purge_txq(struct cpts *cpts)
+{
+ struct cpts_skb_cb_data *skb_cb;
+ struct sk_buff *skb, *tmp;
+ int removed = 0;
+
+ skb_queue_walk_safe(&cpts->txq, skb, tmp) {
+ skb_cb = (struct cpts_skb_cb_data *)skb->cb;
+ if (time_after(jiffies, skb_cb->tmo)) {
+ __skb_unlink(skb, &cpts->txq);
+ dev_consume_skb_any(skb);
+ ++removed;
+ }
+ }
+
+ if (removed)
+ dev_dbg(cpts->dev, "txq cleaned up %d\n", removed);
+}
+
static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
{
struct sk_buff *skb, *tmp;
@@ -119,9 +138,7 @@ static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
if (time_after(jiffies, skb_cb->tmo)) {
/* timeout any expired skbs over 1s */
- dev_dbg(cpts->dev,
- "expiring tx timestamp mtype %u seqid %04x\n",
- mtype, seqid);
+ dev_dbg(cpts->dev, "expiring tx timestamp from txq\n");
__skb_unlink(skb, &cpts->txq);
dev_consume_skb_any(skb);
}
@@ -294,8 +311,11 @@ static long cpts_overflow_check(struct ptp_clock_info *ptp)
spin_lock_irqsave(&cpts->lock, flags);
ts = ns_to_timespec64(timecounter_read(&cpts->tc));
- if (!skb_queue_empty(&cpts->txq))
- delay = CPTS_SKB_TX_WORK_TIMEOUT;
+ if (!skb_queue_empty(&cpts->txq)) {
+ cpts_purge_txq(cpts);
+ if (!skb_queue_empty(&cpts->txq))
+ delay = CPTS_SKB_TX_WORK_TIMEOUT;
+ }
spin_unlock_irqrestore(&cpts->lock, flags);
pr_debug("cpts overflow check at %lld.%09ld\n",
@@ -410,8 +430,6 @@ void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
u64 ns;
struct skb_shared_hwtstamps *ssh;
- if (!cpts->rx_enable)
- return;
ns = cpts_find_ts(cpts, skb, CPTS_EV_RX);
if (!ns)
return;
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index 73d73faf0f38..d2c7decd59b6 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -136,26 +136,6 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
struct device_node *node);
void cpts_release(struct cpts *cpts);
-static inline void cpts_rx_enable(struct cpts *cpts, int enable)
-{
- cpts->rx_enable = enable;
-}
-
-static inline bool cpts_is_rx_enabled(struct cpts *cpts)
-{
- return !!cpts->rx_enable;
-}
-
-static inline void cpts_tx_enable(struct cpts *cpts, int enable)
-{
- cpts->tx_enable = enable;
-}
-
-static inline bool cpts_is_tx_enabled(struct cpts *cpts)
-{
- return !!cpts->tx_enable;
-}
-
static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
unsigned int class = ptp_classify_raw(skb);
@@ -197,24 +177,6 @@ static inline void cpts_unregister(struct cpts *cpts)
{
}
-static inline void cpts_rx_enable(struct cpts *cpts, int enable)
-{
-}
-
-static inline bool cpts_is_rx_enabled(struct cpts *cpts)
-{
- return false;
-}
-
-static inline void cpts_tx_enable(struct cpts *cpts, int enable)
-{
-}
-
-static inline bool cpts_is_tx_enabled(struct cpts *cpts)
-{
- return false;
-}
-
static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
return false;
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 9153db120352..840820402cd0 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1912,11 +1912,15 @@ static int davinci_emac_probe(struct platform_device *pdev)
ether_addr_copy(ndev->dev_addr, priv->mac_addr);
if (!is_valid_ether_addr(priv->mac_addr)) {
- /* Use random MAC if none passed */
- eth_hw_addr_random(ndev);
- memcpy(priv->mac_addr, ndev->dev_addr, ndev->addr_len);
- dev_warn(&pdev->dev, "using random MAC addr: %pM\n",
- priv->mac_addr);
+ /* Try nvmem if MAC wasn't passed over pdata or DT. */
+ rc = nvmem_get_mac_address(&pdev->dev, priv->mac_addr);
+ if (rc) {
+ /* Use random MAC if still none obtained. */
+ eth_hw_addr_random(ndev);
+ memcpy(priv->mac_addr, ndev->dev_addr, ndev->addr_len);
+ dev_warn(&pdev->dev, "using random MAC addr: %pM\n",
+ priv->mac_addr);
+ }
}
ndev->netdev_ops = &emac_netdev_ops;
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 0397ccb6597e..5174d318901e 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -763,6 +763,8 @@ struct gbe_priv {
int cpts_registered;
struct cpts *cpts;
+ int rx_ts_enabled;
+ int tx_ts_enabled;
};
struct gbe_intf {
@@ -2564,7 +2566,7 @@ static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
- !cpts_is_tx_enabled(gbe_dev->cpts))
+ !gbe_dev->tx_ts_enabled)
return 0;
/* If phy has the txtstamp api, assume it will do it.
@@ -2598,7 +2600,9 @@ static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
return 0;
}
- cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
+ if (gbe_dev->rx_ts_enabled)
+ cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
+
p_info->rxtstamp_complete = true;
return 0;
@@ -2614,10 +2618,8 @@ static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
return -EOPNOTSUPP;
cfg.flags = 0;
- cfg.tx_type = cpts_is_tx_enabled(cpts) ?
- HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
- cpts->rx_enable : HWTSTAMP_FILTER_NONE);
+ cfg.tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = gbe_dev->rx_ts_enabled;
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
@@ -2628,8 +2630,8 @@ static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
struct gbe_slave *slave = gbe_intf->slave;
u32 ts_en, seq_id, ctl;
- if (!cpts_is_rx_enabled(gbe_dev->cpts) &&
- !cpts_is_tx_enabled(gbe_dev->cpts)) {
+ if (!gbe_dev->rx_ts_enabled &&
+ !gbe_dev->tx_ts_enabled) {
writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
return;
}
@@ -2641,10 +2643,10 @@ static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
(slave->ts_ctl.uni ? TS_UNI_EN :
slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
- if (cpts_is_tx_enabled(gbe_dev->cpts))
+ if (gbe_dev->tx_ts_enabled)
ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
- if (cpts_is_rx_enabled(gbe_dev->cpts))
+ if (gbe_dev->rx_ts_enabled)
ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
writel(ts_en, GBE_REG_ADDR(slave, port_regs, ts_ctl));
@@ -2670,10 +2672,10 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
switch (cfg.tx_type) {
case HWTSTAMP_TX_OFF:
- cpts_tx_enable(cpts, 0);
+ gbe_dev->tx_ts_enabled = 0;
break;
case HWTSTAMP_TX_ON:
- cpts_tx_enable(cpts, 1);
+ gbe_dev->tx_ts_enabled = 1;
break;
default:
return -ERANGE;
@@ -2681,12 +2683,12 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- cpts_rx_enable(cpts, 0);
+ gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_NONE;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
+ gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -2698,7 +2700,7 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
+ gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
@@ -3621,7 +3623,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
return -EINVAL;
}
- if (!strcmp(node->name, "gbe")) {
+ if (of_node_name_eq(node, "gbe")) {
ret = get_gbe_resource_version(gbe_dev, node);
if (ret)
return ret;
@@ -3635,7 +3637,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
else
ret = -ENODEV;
- } else if (!strcmp(node->name, "xgbe")) {
+ } else if (of_node_name_eq(node, "xgbe")) {
ret = set_xgbe_ethss10_priv(gbe_dev, node);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 93d142867c2a..b4ab1a5f6cd0 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -69,7 +69,9 @@ MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
MODULE_LICENSE("GPL");
-/* Turn on debugging. See Documentation/networking/tlan.txt for details */
+/* Turn on debugging.
+ * See Documentation/networking/device_drivers/ti/tlan.txt for details
+ */
static int debug;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 6a71c2c0f17d..c50a9772f4af 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -607,9 +607,9 @@ static void tc_handle_link_change(struct net_device *dev)
static int tc_mii_probe(struct net_device *dev)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct tc35815_local *lp = netdev_priv(dev);
struct phy_device *phydev;
- u32 dropmask;
phydev = phy_find_first(lp->mii_bus);
if (!phydev) {
@@ -630,17 +630,22 @@ static int tc_mii_probe(struct net_device *dev)
/* mask with MAC supported features */
phy_set_max_speed(phydev, SPEED_100);
- dropmask = 0;
- if (options.speed == 10)
- dropmask |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
- else if (options.speed == 100)
- dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full;
- if (options.duplex == 1)
- dropmask |= SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full;
- else if (options.duplex == 2)
- dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Half;
- phydev->supported &= ~dropmask;
- phydev->advertising = phydev->supported;
+ if (options.speed == 10) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
+ } else if (options.speed == 100) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, mask);
+ }
+ if (options.duplex == 1) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
+ } else if (options.duplex == 2) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
+ }
+ linkmode_and(phydev->supported, phydev->supported, mask);
+ linkmode_copy(phydev->advertising, phydev->supported);
lp->link = 0;
lp->speed = 0;